Merge pull request #568 from hafizabrarshah/week1_excercise_tutor

Added a new notebook for week 1 Exercise.
This commit is contained in:
Ed Donner
2025-08-09 08:01:10 -04:00
committed by GitHub

View File

@@ -0,0 +1,209 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "a7cb82f0-bcf2-4fca-84e4-67144594ff2e",
"metadata": {},
"source": [
"End of Week 1 Exercise\n",
"\n",
"This notebook demonstrates how to interact with large language models using both OpenAI and Ollama APIs. Based on the user's input and selected model, the notebook routes the message to the corresponding backend and returns the generated response. It's a simple yet flexible interface to explore and compare model behavior across different providers."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "eea15c09-c949-4f30-a23b-02130305ff00",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ac06d3f8-215c-4474-a16b-f5a9980f18b5",
"metadata": {},
"outputs": [],
"source": [
"# constants\n",
"\n",
"MODEL_GPT = 'gpt-4o-mini'\n",
"MODEL_LLAMA = 'llama3.2'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a768729-d38e-489f-b572-2af356e6ae78",
"metadata": {},
"outputs": [],
"source": [
"# set up environment\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"# set up clients\n",
"openai = OpenAI()\n",
"ollama_url = \"http://localhost:11434/api/chat\"\n",
"ollama_headers = {\"Content-Type\": \"application/json\"}\n",
"# ollama = OpenAI(base_url=\"http://localhost:11434/v1\" , api_key=\"ollama\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9572d7b-f51c-47c3-9651-56a4106d6f19",
"metadata": {},
"outputs": [],
"source": [
"# here is the question; type over this to ask something new\n",
"\n",
"default_question = \"\"\"\n",
"Please explain what this code does and why:\n",
"yield from {word for s in sentences for word in s.split()}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "10dabd26-a22f-4f9f-9046-6e76b1c9e5e4",
"metadata": {},
"outputs": [],
"source": [
"# Here is the System Prompt\n",
"\n",
"system_prompt = \"\"\"\n",
"You are an expert tutor who explains complex topics in simple, clear, and engaging ways. Tailor your teaching style to the \n",
"learners level of knowledge and preferred learning pace. Encourage critical thinking, provide examples, \n",
"and ask occasional questions to check understanding. Avoid giving direct answers when guiding \n",
"problem-solving — instead, offer hints or break the problem into steps. Be friendly, patient, and always supportive. \n",
"Adapt your explanations based on feedback or confusion. When asked for code, equations, or definitions, \n",
"provide them in a structured, easy-to-understand format.\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "83840afa-6445-404a-b922-2acadf228ade",
"metadata": {},
"outputs": [],
"source": [
"# Here is the System Prompt\n",
"user_prompt = \"Please give a detailed explanation to the following question: \" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "570917cc-1f4d-4ca7-b846-f8952aa6c4a0",
"metadata": {},
"outputs": [],
"source": [
"# Get User prompt\n",
"\n",
"def get_user_prompt(question=None):\n",
" if not question:\n",
" print(f\"As the given question is empty. So I'm going to ask a default question, which is {default_question}\" )\n",
" question = default_question\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt + question}\n",
" ]\n",
" return messages"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aed48452-3029-482e-8175-e6acfce5b08d",
"metadata": {},
"outputs": [],
"source": [
"# Get Llama 3.2 to answer\n",
"def get_answer_from_ollama(question=None):\n",
" messages = get_user_prompt(question)\n",
"\n",
" data = {\n",
" \"model\": MODEL_LLAMA,\n",
" \"messages\": messages,\n",
" \"stream\": False\n",
" }\n",
"\n",
" response = requests.post(ollama_url, headers=ollama_headers, json=data)\n",
" display(Markdown(response.json()['message']['content']))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "374b680a-7994-4636-8939-ab591314f8d6",
"metadata": {},
"outputs": [],
"source": [
"# Get gpt-4o-mini to answer, with streaming\n",
"\n",
"def get_answer_from_openai(question=None):\n",
" messages = get_user_prompt(question)\n",
" stream = openai.chat.completions.create(model=MODEL_GPT, messages=messages,stream=True)\n",
" \n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "69406774-33f1-4c67-a8ef-af92567f29a7",
"metadata": {},
"outputs": [],
"source": [
"# Ask a question and get the user's response\n",
"answer = input(\"What's the question you want to ask?\")\n",
"\n",
"model_choice = input(\"Please choose a model to use (GPT or Llama): \")\n",
"\n",
"if model_choice.lower() == \"gpt\":\n",
" get_answer_from_openai(answer)\n",
"elif model_choice.lower() == \"llama\":\n",
" get_answer_from_ollama(answer)\n",
"else:\n",
" print(\"Choose the correct model name\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}