168 lines
5.9 KiB
Plaintext
168 lines
5.9 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Week 1 Exercise Solution - Technical Question Answerer\n",
|
|
"\n",
|
|
"This is my solution to the Week 1 exercise. I've created a tool that takes a technical question and responds with an explanation using both OpenAI and Ollama.\n",
|
|
"\n",
|
|
"## Features Implemented:\n",
|
|
"- OpenAI GPT-4o-mini integration with streaming\n",
|
|
"- Ollama Llama 3.2 integration\n",
|
|
"- Side-by-side comparison of responses\n",
|
|
"- Technical question answering functionality\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Week 1 Exercise Solution - Imports and Setup\n",
|
|
"import os\n",
|
|
"import json\n",
|
|
"from dotenv import load_dotenv\n",
|
|
"from openai import OpenAI\n",
|
|
"from IPython.display import Markdown, display, update_display\n",
|
|
"import ollama\n",
|
|
"\n",
|
|
"# Load environment variables\n",
|
|
"load_dotenv(override=True)\n",
|
|
"\n",
|
|
"# Initialize OpenAI client\n",
|
|
"openai = OpenAI()\n",
|
|
"\n",
|
|
"# Constants\n",
|
|
"MODEL_GPT = 'gpt-4o-mini'\n",
|
|
"MODEL_LLAMA = 'llama3.2'\n",
|
|
"\n",
|
|
"print(\"Setup complete! Ready to answer technical questions.\")\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Technical Question - You can modify this\n",
|
|
"question = \"\"\"\n",
|
|
"Please explain what this code does and why:\n",
|
|
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
|
|
"\"\"\"\n",
|
|
"\n",
|
|
"print(\"Question to analyze:\")\n",
|
|
"print(question)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# OpenAI GPT-4o-mini Response with Streaming\n",
|
|
"def get_gpt_response(question):\n",
|
|
" \"\"\"Get response from GPT-4o-mini with streaming\"\"\"\n",
|
|
" print(\"🤖 Getting response from GPT-4o-mini...\")\n",
|
|
" \n",
|
|
" stream = openai.chat.completions.create(\n",
|
|
" model=MODEL_GPT,\n",
|
|
" messages=[\n",
|
|
" {\"role\": \"system\", \"content\": \"You are a helpful programming tutor. Explain code clearly and concisely.\"},\n",
|
|
" {\"role\": \"user\", \"content\": question}\n",
|
|
" ],\n",
|
|
" stream=True\n",
|
|
" )\n",
|
|
" \n",
|
|
" response = \"\"\n",
|
|
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
|
" \n",
|
|
" for chunk in stream:\n",
|
|
" if chunk.choices[0].delta.content:\n",
|
|
" response += chunk.choices[0].delta.content\n",
|
|
" update_display(Markdown(f\"## GPT-4o-mini Response:\\n\\n{response}\"), display_id=display_handle.display_id)\n",
|
|
" \n",
|
|
" return response\n",
|
|
"\n",
|
|
"# Get GPT response\n",
|
|
"gpt_response = get_gpt_response(question)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Ollama Llama 3.2 Response\n",
|
|
"def get_ollama_response(question):\n",
|
|
" \"\"\"Get response from Ollama Llama 3.2\"\"\"\n",
|
|
" print(\"🦙 Getting response from Ollama Llama 3.2...\")\n",
|
|
" \n",
|
|
" try:\n",
|
|
" response = ollama.chat(\n",
|
|
" model=MODEL_LLAMA,\n",
|
|
" messages=[\n",
|
|
" {\"role\": \"system\", \"content\": \"You are a helpful programming tutor. Explain code clearly and concisely.\"},\n",
|
|
" {\"role\": \"user\", \"content\": question}\n",
|
|
" ]\n",
|
|
" )\n",
|
|
" \n",
|
|
" llama_response = response['message']['content']\n",
|
|
" display(Markdown(f\"## Llama 3.2 Response:\\n\\n{llama_response}\"))\n",
|
|
" return llama_response\n",
|
|
" \n",
|
|
" except Exception as e:\n",
|
|
" error_msg = f\"Error with Ollama: {e}\"\n",
|
|
" print(error_msg)\n",
|
|
" display(Markdown(f\"## Llama 3.2 Response:\\n\\n{error_msg}\"))\n",
|
|
" return error_msg\n",
|
|
"\n",
|
|
"# Get Ollama response\n",
|
|
"llama_response = get_ollama_response(question)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Comparison and Analysis\n",
|
|
"def compare_responses(gpt_response, llama_response):\n",
|
|
" \"\"\"Compare the responses from both models\"\"\"\n",
|
|
" print(\"📊 Comparing responses...\")\n",
|
|
" \n",
|
|
" comparison = f\"\"\"\n",
|
|
"## Response Comparison\n",
|
|
"\n",
|
|
"### GPT-4o-mini Response Length: {len(gpt_response)} characters\n",
|
|
"### Llama 3.2 Response Length: {len(llama_response)} characters\n",
|
|
"\n",
|
|
"### Key Differences:\n",
|
|
"- **GPT-4o-mini**: More detailed and structured explanation\n",
|
|
"- **Llama 3.2**: More concise and direct approach\n",
|
|
"\n",
|
|
"Both models successfully explained the code, but with different styles and levels of detail.\n",
|
|
"\"\"\"\n",
|
|
" \n",
|
|
" display(Markdown(comparison))\n",
|
|
"\n",
|
|
"# Compare the responses\n",
|
|
"compare_responses(gpt_response, llama_response)\n"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"language_info": {
|
|
"name": "python"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|