diff --git a/week1/community-contributions/emmy/emmy_week1_EXERCISE.ipynb b/week1/community-contributions/emmy/emmy_week1_EXERCISE.ipynb new file mode 100644 index 0000000..e6c9b1d --- /dev/null +++ b/week1/community-contributions/emmy/emmy_week1_EXERCISE.ipynb @@ -0,0 +1,226 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise\n", + "\n", + "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", + "and responds with an explanation. This is a tool that you will be able to use yourself during the course!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import ollama\n", + "import ipywidgets as widgets\n", + "from IPython.display import display, Markdown" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL_GEMINI = \"gemini-2.5-flash\"\n", + "MODEL_LLAMA = \"llama3.1:8b\"\n", + "\n", + "CHOICE_GEMINI = \"gemini\"\n", + "CHOICE_OLLAMA = \"ollama\"\n", + "\n", + "SYSTEM_PROMPT = (\n", + " \"You are a technical adviser. The student is learning LLM engineering \"\n", + " \"and you will be asked to explain lines of code with an example, \"\n", + " \"mostly in Python.\"\n", + " \"You can answer other questions as well.\"\n", + ")\n", + "\n", + "GEMINI_BASE_URL = \"https://generativelanguage.googleapis.com/v1beta/openai/\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [], + "source": [ + "# set up environment\n", + "load_dotenv(override=True)\n", + "google_api_key = os.getenv(\"GOOGLE_API_KEY\")\n", + "\n", + "if not google_api_key:\n", + " print(\"Warning: GOOGLE_API_KEY not found. Gemini calls will fail.\")\n", + " print(\"Please create a .env file with GOOGLE_API_KEY=your_key\")\n", + "\n", + "gemini_client = OpenAI(\n", + " base_url=GEMINI_BASE_URL,\n", + " api_key=google_api_key,\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", + "metadata": {}, + "outputs": [], + "source": [ + "# here is the question; type over this to ask something new\n", + "\n", + "question = \"\"\"\n", + "Please explain what this code does and why:\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [], + "source": [ + "def make_messages(user_question: str):\n", + " return [\n", + " {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n", + " {\"role\": \"user\", \"content\": user_question},\n", + " ]\n", + "\n", + "\n", + "def stream_gemini(messages):\n", + " \"\"\"Stream response chunks from Gemini.\"\"\"\n", + " stream = gemini_client.chat.completions.create(\n", + " model=MODEL_GEMINI,\n", + " messages=messages,\n", + " stream=True,\n", + " )\n", + "\n", + " full = []\n", + " for chunk in stream:\n", + " piece = chunk.choices[0].delta.content or \"\"\n", + " full.append(piece)\n", + " return \"\".join(full)\n", + "\n", + "\n", + "def stream_ollama(messages):\n", + " \"\"\"Stream response chunks from local Ollama.\"\"\"\n", + " stream = ollama.chat(\n", + " model=MODEL_LLAMA,\n", + " messages=messages,\n", + " stream=True,\n", + " )\n", + "\n", + " full = []\n", + " for chunk in stream:\n", + " piece = chunk[\"message\"][\"content\"]\n", + " full.append(piece)\n", + " return \"\".join(full)\n", + "\n", + "\n", + "def get_explanation(question: str, model_choice: str):\n", + " \"\"\"Gets a technical explanation from the chosen model and streams the response.\"\"\"\n", + " messages = make_messages(question)\n", + " try:\n", + " if model_choice == CHOICE_GEMINI:\n", + " return stream_gemini(messages)\n", + " elif model_choice == CHOICE_OLLAMA:\n", + " return stream_ollama(messages)\n", + " else:\n", + " print(\"Unknown model choice.\")\n", + " return \"\"\n", + " except Exception as e:\n", + " print(f\"\\nAn error occurred: {e}\")\n", + " return \"\"\n", + "\n", + "print(\"💡 Your personal technical tutor is ready.\\n\")\n", + "\n", + "# Dropdown for model selection\n", + "model_dropdown = widgets.Dropdown(\n", + " options=[\n", + " (\"Gemini (gemini-2.5-flash)\", CHOICE_GEMINI),\n", + " (\"Ollama (llama3.1:8b)\", CHOICE_OLLAMA),\n", + " ],\n", + " value=CHOICE_GEMINI,\n", + " description=\"Model:\",\n", + " style={\"description_width\": \"initial\"},\n", + ")\n", + "\n", + "# Text input for question\n", + "question_box = widgets.Textarea(\n", + " placeholder=\"Type your technical question here...\",\n", + " description=\"Question:\",\n", + " layout=widgets.Layout(width=\"100%\", height=\"100px\"),\n", + " style={\"description_width\": \"initial\"},\n", + ")\n", + "\n", + "submit_button = widgets.Button(description=\"Ask\", button_style=\"success\", icon=\"paper-plane\")\n", + "\n", + "output_area = widgets.Output()\n", + "loader_label = widgets.Label(value=\"\")\n", + "\n", + "def on_submit(_):\n", + " output_area.clear_output()\n", + " question = question_box.value.strip()\n", + " if not question:\n", + " with output_area:\n", + " print(\"Please enter a question.\")\n", + " return\n", + "\n", + " loader_label.value = \"⏳ Thinking...\"\n", + " submit_button.disabled = True\n", + "\n", + " answer = get_explanation(question, model_dropdown.value)\n", + "\n", + " loader_label.value = \"\"\n", + " submit_button.disabled = False\n", + "\n", + " with output_area:\n", + " print(f\"🤖 Model: {model_dropdown.label}\")\n", + " print(f\"📜 Question: {question}\\n\")\n", + " display(Markdown(answer))\n", + " print(\"\\n--- End of response ---\")\n", + "\n", + "submit_button.on_click(on_submit)\n", + "\n", + "# Display everything\n", + "display(model_dropdown, question_box, submit_button, loader_label, output_area)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llm-engineering (3.12.10)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}