diff --git a/week1/community-contributions/week1_exercise_gpt_llama_teachers.ipynb b/week1/community-contributions/week1_exercise_gpt_llama_teachers.ipynb new file mode 100644 index 0000000..9b122be --- /dev/null +++ b/week1/community-contributions/week1_exercise_gpt_llama_teachers.ipynb @@ -0,0 +1,202 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise\n", + "\n", + "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", + "and responds with an explanation. This is a tool that you will be able to use yourself during the course!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51d1bbb7-d56a-4483-935f-480f8e22546f", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "import ollama" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [], + "source": [ + "# set up environment\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", + "\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bd61eb75-be6a-46d6-8aeb-84c1eeeac04f", + "metadata": {}, + "outputs": [], + "source": [ + "# read the user question\n", + "def user_question_reader() -> str:\n", + " input_text = \"Hello! I’m your AI Teacher, ready to help you explore any topic you’re curious about.\\n\"\n", + " input_text +=\"I have access to a vast amount of knowledge and will do my best to explain things clearly, no matter your experience level.\\n\\n\"\n", + " input_text +=\"What would you like to learn about today?\\n\"\n", + "\n", + " question = input(input_text)\n", + "\n", + " return question;" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6bd9b592-e398-4637-9188-bfdf8dd6bf75", + "metadata": {}, + "outputs": [], + "source": [ + "# generate the user prompt\n", + "def user_prompt_generator() -> str:\n", + " question = user_question_reader()\n", + " user_prompt = f\"I need you to answer to this question: {question}.\\n\"\n", + " user_prompt += \" Take into account that I dont have prior knowledge about my question \\\n", + " so I want the answer as complete as possible. Also please provide it in markdown\"\n", + "\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ffda047-fec3-4d9e-97b0-46f428ac9313", + "metadata": {}, + "outputs": [], + "source": [ + "# define the system prompt\n", + "system_prompt = \"Your job it's to be a teacher. You have access to all the knowledge \\\n", + " in the internet. You will be thankful to any question given to you and \\\n", + " will try to answer it the best you can. Your students might know little to nothing \\\n", + " about what they ask and make mistakes so you will have to think about the meaning of their question \\\n", + " before and provide an answer according to the meaning behind it.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [], + "source": [ + "# define gpt-4o-mini function to answer, with streaming\n", + "def gpt_teacher():\n", + " stream = openai.chat.completions.create(\n", + " model = MODEL_GPT,\n", + " messages= [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\" : user_prompt_generator()}\n", + " ],\n", + " stream=True\n", + " )\n", + "\n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + "\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [], + "source": [ + "# define Llama 3.2 function to answer\n", + "def llama_teacher():\n", + " response = ollama.chat(\n", + " model = MODEL_LLAMA,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\":system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_generator()}\n", + " ]\n", + " )\n", + "\n", + " return display(Markdown(response['message']['content']))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20d963d4-f4ce-4979-b8c7-0db6ebcec96c", + "metadata": {}, + "outputs": [], + "source": [ + "# try the gpt teacher\n", + "gpt_teacher()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d62ca06f-c808-43ee-9ecd-5a704ffcd5c1", + "metadata": {}, + "outputs": [], + "source": [ + "#try the ollama teacher\n", + "llama_teacher()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}