Update Week 6 Day 5 Notebook to make use of WandB sync.

This commit is contained in:
Carlos Bazaga
2025-08-12 01:26:22 +02:00
parent 54cd9cb24d
commit c663379828

View File

@@ -149,7 +149,7 @@
"source": [
"# First let's work on a good prompt for a Frontier model\n",
"# Notice that I'm removing the \" to the nearest dollar\"\n",
"# When we train our own models, we'll need to make the problem as easy as possible, \n",
"# When we train our own models, we'll need to make the problem as easy as possible,\n",
"# but a Frontier model needs no such simplification.\n",
"\n",
"def messages_for(item):\n",
@@ -393,6 +393,22 @@
"openai.fine_tuning.jobs.list_events(fine_tuning_job_id=job_id, limit=10).data"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b19ea9e9",
"metadata": {},
"outputs": [],
"source": [
"import wandb\n",
"from wandb.integration.openai.fine_tuning import WandbLogger\n",
"\n",
"# Log in to Weights & Biases.\n",
"wandb.login()\n",
"# Sync the fine-tuning job with Weights & Biases.\n",
"WandbLogger.sync(fine_tune_job_id=job_id, project=\"gpt-pricer\")"
]
},
{
"cell_type": "markdown",
"id": "066fef03-8338-4526-9df3-89b649ad4f0a",
@@ -490,7 +506,7 @@
"\n",
"def gpt_fine_tuned(item):\n",
" response = openai.chat.completions.create(\n",
" model=fine_tuned_model_name, \n",
" model=fine_tuned_model_name,\n",
" messages=messages_for(item),\n",
" seed=42,\n",
" max_tokens=7\n",