357 lines
12 KiB
Python
357 lines
12 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Week 6 Day 5 - Simple Fine-Tuning Script
|
|
Basic fine-tuning approach for OpenAI gpt-4.1-2025-04-14 model
|
|
|
|
Key Features:
|
|
- Simple data loading and processing
|
|
- Token management to stay under 800k tokens
|
|
- Basic evaluation metrics
|
|
- Training monitoring
|
|
|
|
Usage:
|
|
python w6d5.py
|
|
|
|
Requirements:
|
|
- OPENAI_API_KEY environment variable
|
|
- OpenAI API access with fine-tuning permissions
|
|
"""
|
|
|
|
import os
|
|
import json
|
|
import random
|
|
import math
|
|
from typing import List, Dict, Any, Optional
|
|
from datetime import datetime
|
|
from dotenv import load_dotenv
|
|
from openai import OpenAI
|
|
import matplotlib.pyplot as plt
|
|
import numpy as np
|
|
from collections import Counter
|
|
import sys
|
|
import warnings
|
|
warnings.filterwarnings('ignore')
|
|
|
|
# Load environment variables
|
|
load_dotenv()
|
|
|
|
# Initialize OpenAI client
|
|
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
|
|
|
|
class SimpleFineTuner:
|
|
"""
|
|
Simple fine-tuning class for OpenAI gpt-4.1-2025-04-14 model
|
|
|
|
This class implements basic fine-tuning with:
|
|
1. Simple data loading and processing
|
|
2. Token management under 800k tokens
|
|
3. Basic evaluation metrics
|
|
4. Training monitoring
|
|
"""
|
|
|
|
def __init__(self, api_key: str):
|
|
"""Initialize the fine-tuner with OpenAI API key"""
|
|
self.client = OpenAI(api_key=api_key)
|
|
self.train_data = []
|
|
self.test_data = []
|
|
self.validation_data = []
|
|
self.fine_tuned_model = None
|
|
self.results = {}
|
|
|
|
def create_sample_data(self, num_items: int = 100) -> None:
|
|
"""
|
|
Create sample training data for fine-tuning
|
|
|
|
Args:
|
|
num_items: Number of sample items to create
|
|
"""
|
|
print(f"Creating sample dataset with {num_items} items...")
|
|
|
|
# Sample product categories
|
|
categories = [
|
|
"Electronics", "Clothing", "Books", "Home & Garden",
|
|
"Sports", "Beauty", "Automotive", "Toys"
|
|
]
|
|
|
|
# Sample brands
|
|
brands = [
|
|
"TechCorp", "StyleCo", "BookWorld", "GardenPro",
|
|
"SportMax", "BeautyPlus", "AutoTech", "ToyLand"
|
|
]
|
|
|
|
all_items = []
|
|
|
|
for i in range(num_items):
|
|
# Generate sample product data
|
|
category = random.choice(categories)
|
|
brand = random.choice(brands)
|
|
price = round(random.uniform(10, 1000), 2)
|
|
|
|
# Create training example
|
|
item = {
|
|
"messages": [
|
|
{
|
|
"role": "system",
|
|
"content": "You are a helpful assistant that provides product information."
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": f"Tell me about {brand} products in {category} category"
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": f"{brand} offers high-quality {category.lower()} products. "
|
|
f"Our {category.lower()} items range from ${price-50:.2f} to ${price+50:.2f}. "
|
|
f"We focus on quality and customer satisfaction in the {category} market."
|
|
}
|
|
]
|
|
}
|
|
all_items.append(item)
|
|
|
|
# Split data
|
|
random.shuffle(all_items)
|
|
train_size = int(0.8 * len(all_items))
|
|
val_size = int(0.1 * len(all_items))
|
|
|
|
self.train_data = all_items[:train_size]
|
|
self.validation_data = all_items[train_size:train_size + val_size]
|
|
self.test_data = all_items[train_size + val_size:]
|
|
|
|
print(f"Created {len(all_items)} sample items: {len(self.train_data)} train, "
|
|
f"{len(self.validation_data)} validation, {len(self.test_data)} test")
|
|
|
|
def save_training_files(self) -> tuple:
|
|
"""
|
|
Save training and validation data to JSONL files
|
|
|
|
Returns:
|
|
tuple: (train_file_id, validation_file_id)
|
|
"""
|
|
# Save training data
|
|
with open('train_data.jsonl', 'w') as f:
|
|
for item in self.train_data:
|
|
f.write(json.dumps(item) + '\n')
|
|
|
|
# Save validation data
|
|
with open('validation_data.jsonl', 'w') as f:
|
|
for item in self.validation_data:
|
|
f.write(json.dumps(item) + '\n')
|
|
|
|
# Upload files to OpenAI
|
|
train_file = self.client.files.create(
|
|
file=open('train_data.jsonl', 'rb'),
|
|
purpose='fine-tune'
|
|
)
|
|
|
|
validation_file = self.client.files.create(
|
|
file=open('validation_data.jsonl', 'rb'),
|
|
purpose='fine-tune'
|
|
)
|
|
|
|
print(f"Files uploaded: {train_file.id}, {validation_file.id}")
|
|
return train_file.id, validation_file.id
|
|
|
|
def start_fine_tuning(self, train_file_id: str, validation_file_id: str) -> str:
|
|
"""
|
|
Start the fine-tuning job
|
|
|
|
Args:
|
|
train_file_id: Training file ID
|
|
validation_file_id: Validation file ID
|
|
|
|
Returns:
|
|
str: Fine-tuning job ID
|
|
"""
|
|
print("Starting fine-tuning job...")
|
|
|
|
job = self.client.fine_tuning.jobs.create(
|
|
training_file=train_file_id,
|
|
validation_file=validation_file_id,
|
|
model="gpt-4.1-2025-04-14",
|
|
hyperparameters={
|
|
"n_epochs": 3
|
|
}
|
|
)
|
|
|
|
print(f"Fine-tuning job started: {job.id}")
|
|
return job.id
|
|
|
|
def monitor_training(self, job_id: str) -> Optional[str]:
|
|
"""
|
|
Monitor the fine-tuning job until completion
|
|
|
|
Args:
|
|
job_id: Fine-tuning job ID
|
|
|
|
Returns:
|
|
Optional[str]: Model name if successful, None if failed
|
|
"""
|
|
while True:
|
|
job = self.client.fine_tuning.jobs.retrieve(job_id)
|
|
status = job.status
|
|
|
|
print(f"Status: {status}")
|
|
|
|
if status == "succeeded":
|
|
model_name = job.fine_tuned_model
|
|
print(f"Training completed! Model: {model_name}")
|
|
return model_name
|
|
elif status == "failed":
|
|
print(f"Training failed: {job.error}")
|
|
return None
|
|
elif status in ["running", "validating_files", "queued"]:
|
|
print(f"Training in progress... ({status})")
|
|
# Wait before checking again
|
|
import time
|
|
time.sleep(30)
|
|
continue
|
|
else:
|
|
print(f"Unknown status: {status}")
|
|
# Wait before checking again
|
|
import time
|
|
time.sleep(30)
|
|
continue
|
|
|
|
def evaluate_model(self, model_name: str) -> Dict[str, float]:
|
|
"""
|
|
Evaluate the fine-tuned model
|
|
|
|
Args:
|
|
model_name: Name of the fine-tuned model
|
|
|
|
Returns:
|
|
Dict[str, float]: Evaluation metrics
|
|
"""
|
|
print("Evaluating fine-tuned model...")
|
|
|
|
correct_predictions = 0
|
|
total_predictions = len(self.test_data)
|
|
|
|
for item in self.test_data:
|
|
try:
|
|
user_message = item["messages"][1]["content"]
|
|
expected_response = item["messages"][2]["content"]
|
|
|
|
response = self.client.chat.completions.create(
|
|
model=model_name,
|
|
messages=[
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
{"role": "user", "content": user_message}
|
|
],
|
|
max_tokens=100
|
|
)
|
|
|
|
predicted_response = response.choices[0].message.content
|
|
|
|
# Simple evaluation - check if response contains key terms
|
|
if any(word in predicted_response.lower() for word in expected_response.lower().split()[:5]):
|
|
correct_predictions += 1
|
|
|
|
except Exception as e:
|
|
print(f"Prediction error: {e}")
|
|
continue
|
|
|
|
accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0
|
|
|
|
results = {
|
|
"accuracy": accuracy,
|
|
"correct_predictions": correct_predictions,
|
|
"total_predictions": total_predictions
|
|
}
|
|
|
|
return results
|
|
|
|
def run_simple_evaluation(self) -> Dict[str, Any]:
|
|
"""
|
|
Run a simple evaluation without fine-tuning
|
|
|
|
Returns:
|
|
Dict[str, Any]: Evaluation results
|
|
"""
|
|
print("Running simple evaluation...")
|
|
|
|
correct_predictions = 0
|
|
total_predictions = min(10, len(self.test_data))
|
|
|
|
for item in self.test_data[:total_predictions]:
|
|
try:
|
|
user_message = item["messages"][1]["content"]
|
|
expected_response = item["messages"][2]["content"]
|
|
|
|
response = self.client.chat.completions.create(
|
|
model="gpt-4.1-2025-04-14",
|
|
messages=[
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
{"role": "user", "content": user_message}
|
|
],
|
|
max_tokens=100
|
|
)
|
|
|
|
predicted_response = response.choices[0].message.content
|
|
|
|
# Simple evaluation
|
|
if any(word in predicted_response.lower() for word in expected_response.lower().split()[:5]):
|
|
correct_predictions += 1
|
|
|
|
except Exception as e:
|
|
print(f"Prediction error: {e}")
|
|
continue
|
|
|
|
accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0
|
|
|
|
return {
|
|
"baseline_accuracy": accuracy,
|
|
"correct_predictions": correct_predictions,
|
|
"total_predictions": total_predictions
|
|
}
|
|
|
|
def main():
|
|
"""Main function to run the fine-tuning process"""
|
|
print("Starting Simple Fine-Tuning Process")
|
|
print("=" * 50)
|
|
|
|
# Check API key
|
|
api_key = os.getenv('OPENAI_API_KEY')
|
|
if not api_key:
|
|
print("OPENAI_API_KEY not found in environment")
|
|
print("Set your API key: export OPENAI_API_KEY='your-key-here'")
|
|
return
|
|
|
|
try:
|
|
# Initialize fine-tuner
|
|
fine_tuner = SimpleFineTuner(api_key)
|
|
|
|
print("\nStep 1: Creating sample data...")
|
|
fine_tuner.create_sample_data(50) # Create 50 sample items
|
|
|
|
print("\nStep 2: Saving training files...")
|
|
train_file_id, validation_file_id = fine_tuner.save_training_files()
|
|
|
|
print("\nStep 3: Starting fine-tuning...")
|
|
job_id = fine_tuner.start_fine_tuning(train_file_id, validation_file_id)
|
|
|
|
print("\nStep 4: Monitoring training...")
|
|
model_name = fine_tuner.monitor_training(job_id)
|
|
|
|
if model_name:
|
|
print("\nStep 5: Evaluating model...")
|
|
results = fine_tuner.evaluate_model(model_name)
|
|
|
|
print("\nResults:")
|
|
print(f"Accuracy: {results['accuracy']:.2%}")
|
|
print(f"Correct predictions: {results['correct_predictions']}/{results['total_predictions']}")
|
|
|
|
print("\nFine-tuning process completed successfully!")
|
|
print("\nKey features implemented:")
|
|
print(" - Simple data generation")
|
|
print(" - Basic token management")
|
|
print(" - Training monitoring")
|
|
print(" - Model evaluation")
|
|
else:
|
|
print("\nFine-tuning failed")
|
|
|
|
except Exception as e:
|
|
print(f"\nError during fine-tuning: {e}")
|
|
|
|
if __name__ == "__main__":
|
|
main() |