Merge pull request #865 from joeymaza/jmz/lab1_exercises

Week 1 Exercise- AI Tutor
This commit is contained in:
Ed Donner
2025-11-01 20:26:22 -04:00
committed by GitHub

View File

@@ -0,0 +1,172 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "712506d5",
"metadata": {},
"source": [
"This is my week 1 exercise experiment.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3058139d",
"metadata": {},
"outputs": [],
"source": [
"#Imports\n",
"\n",
"import os\n",
"\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dd4d9f32",
"metadata": {},
"outputs": [],
"source": [
"#Constants andn Initializing GPT\n",
"\n",
"MODEL_GPT = 'gpt-4o-mini'\n",
"MODEL_LLAMA = 'llama3.2'\n",
"OLLAMA_BASE_URL = \"http://localhost:11434/v1\"\n",
"openai = OpenAI()\n",
"ollama = OpenAI(base_url=OLLAMA_BASE_URL, api_key='ollama')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0199945b",
"metadata": {},
"outputs": [],
"source": [
"#Check API key\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
" print(\"API key looks good so far\")\n",
"else:\n",
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a671fa0f",
"metadata": {},
"outputs": [],
"source": [
"#Prompts\n",
"\n",
"system_prompt = \"\"\"\n",
"You are a senior software coding master. \n",
"You will help explain an input of code, check if there are errors and correct them.\n",
"Show how this code works and suggest other ways of writing this code efficiently if there is an alternative.\n",
"Respond to a user who is a beginner. \"\"\"\n",
"\n",
"question = \"\"\"\n",
"Please explain what this code does and why:\n",
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
"Show some examples on the use of this code.\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1fbc6aa5",
"metadata": {},
"outputs": [],
"source": [
"#Function to stream response of output from OpenAI API\n",
"\n",
"def code_examiner_stream(question):\n",
" stream = openai.chat.completions.create(\n",
" model=MODEL_GPT,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": question}\n",
" ],\n",
" stream=True\n",
" ) \n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" update_display(Markdown(response), display_id=display_handle.display_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "07d93dba",
"metadata": {},
"outputs": [],
"source": [
"\n",
"code_examiner_stream(question)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fb7184cb",
"metadata": {},
"outputs": [],
"source": [
"#Function for Ollama (locally) to reponse with output.\n",
"\n",
"def code_examiner_ollama(question):\n",
" response = ollama.chat.completions.create(\n",
" model=MODEL_LLAMA,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\":system_prompt},\n",
" {\"role\": \"user\", \"content\": question}\n",
" ],\n",
" )\n",
" result = response.choices[0].message.content\n",
" display(Markdown(result))\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"code_examiner_ollama(question)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}