diff --git a/week2/community-contributions/kachaje/week2-exercise.ipynb b/week2/community-contributions/kachaje/week2-exercise.ipynb new file mode 100644 index 0000000..67259fc --- /dev/null +++ b/week2/community-contributions/kachaje/week2-exercise.ipynb @@ -0,0 +1,218 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4df365ad", + "metadata": {}, + "source": [ + "# Week 2 Exercise\n", + "\n", + "## Objective:\n", + "\n", + "Demonstrate what has been learnt in week 2 by upgrading week 1 project to have a UI using Gradio UI. Expected to include streaming and use of system prompts to add expertise and ability to switch between models. \n", + "Bonus points if use of a tool can also be demonstrated.\n", + "Audio input with autio output also a bonus." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ac344b4", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "import gradio as gr\n", + "import anthropic\n", + "import google.generativeai as genai\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf272f10", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "\n", + "# Set up the Anthropic API key\n", + "anthropic_api_key = os.getenv(\"ANTHROPIC_API_KEY\")\n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API key set and begins with: {anthropic_api_key[:6]}...\")\n", + "\n", + "# Set up the Google API key\n", + "google_api_key = os.getenv(\"GOOGLE_API_KEY\")\n", + "if google_api_key:\n", + " print(f\"Google API key set and begins with: {google_api_key[:6]}...\")\n", + "\n", + "openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", + "\n", + "anthropic_url = \"https://api.anthropic.com/v1/\"\n", + "gemini_url = \"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", + "\n", + "anthropic = OpenAI(api_key=anthropic_api_key, base_url=anthropic_url)\n", + "gemini = OpenAI(api_key=google_api_key, base_url=gemini_url)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "77b67726", + "metadata": {}, + "outputs": [], + "source": [ + "# models\n", + "\n", + "MODEL_LLAMA=\"llama3.2\"\n", + "MODEL_ANTHROPIC=\"claude-sonnet-4-5-20250929\"\n", + "MODEL_GOOGLE=\"gemini-2.5-flash\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9fe4a2f3", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"\"\"\n", + "You are an expert software engineer.\n", + "You are given a technical question and you need to explain what the code does and why.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9afdce10", + "metadata": {}, + "outputs": [], + "source": [ + "MODEL=MODEL_LLAMA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62d0135e", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_llama(message):\n", + " history = []\n", + " history = [{\"role\":h[\"role\"], \"content\":h[\"content\"]} for h in history]\n", + " \n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = openai.chat.completions.create(model=MODEL_LLAMA, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response\n", + "\n", + "def stream_claude(message):\n", + " history = []\n", + " history = [{\"role\":h[\"role\"], \"content\":h[\"content\"]} for h in history]\n", + " \n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = anthropic.chat.completions.create(model=MODEL_ANTHROPIC, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response\n", + " \n", + "def stream_gemini(message):\n", + " history = []\n", + " history = [{\"role\":h[\"role\"], \"content\":h[\"content\"]} for h in history]\n", + " \n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = gemini.chat.completions.create(model=MODEL_GOOGLE, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3fec5ce3", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_model(prompt, model):\n", + " print(f\"Prompt: {prompt}, Model: {model}\")\n", + "\n", + " if model==\"Llama\":\n", + " result = stream_llama(prompt)\n", + " elif model==\"Claude\":\n", + " result = stream_claude(prompt)\n", + " elif model==\"Gemini\":\n", + " result = stream_gemini(prompt)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " yield from result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f3db610", + "metadata": {}, + "outputs": [], + "source": [ + "question_input = gr.Textbox(label=\"Your message:\", info=\"Enter a question\", lines=7)\n", + "model_selector = gr.Dropdown(choices=[\"Llama\", \"Claude\", \"Gemini\"], value=\"Llama\", label=\"Model\") \n", + "message_output = gr.Markdown(label=\"Response:\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1428a4a8", + "metadata": {}, + "outputs": [], + "source": [ + "view = gr.Interface(\n", + " fn=stream_model, \n", + " inputs=[question_input, model_selector], \n", + " outputs=message_output,\n", + " flagging_mode=\"never\"\n", + " )\n", + "\n", + "view.launch(inbrowser=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}