diff --git a/week1/community-contributions/WEEK-1 EXERCISE - Hope Ogbons/week1 EXERCISE.ipynb b/week1/community-contributions/WEEK-1 EXERCISE - Hope Ogbons/week1 EXERCISE.ipynb new file mode 100644 index 0000000..dede835 --- /dev/null +++ b/week1/community-contributions/WEEK-1 EXERCISE - Hope Ogbons/week1 EXERCISE.ipynb @@ -0,0 +1,319 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# Instant LLM Term Explanations with Live LLM\n", + "### NON-LLM Detection & Color-Coded Answers.\n", + "\n", + "As an LLM student, I wanted a lightweight tool to speed up learning: one that explains technical terms concisely, flags whether a question falls under LLM topics, and points out when a term is not actually LLM-related. The interface color-codes answers (green for LLM topics, orange otherwise) and provides short notes so you can quickly confirm or correct your assumptions." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from openai import OpenAI\n", + "from dotenv import load_dotenv\n", + "from IPython.display import HTML\n", + "\n", + "# Load env (allow values in .env to override existing env if you want)\n", + "load_dotenv(override=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ llama3.2 already exists\n" + ] + } + ], + "source": [ + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2' # Direct ollama cloud models work out of the box e.g. 'gpt-oss:120b-cloud'\n", + "\n", + "# Pull the model\n", + "!ollama list | grep -q \"$MODEL_LLAMA\" && echo \"✅ $MODEL_LLAMA already exists\" || (echo \"🚀 Pulling $MODEL_LLAMA model...\" && echo \"⏳ This may take a few minutes depending on size...\" && ollama pull \"$MODEL_LLAMA\" && echo \"✅ Download completed! $MODEL_LLAMA is ready.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "acb15ca5", + "metadata": {}, + "outputs": [], + "source": [ + "# environment\n", + "\n", + "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", + "OLLAMA_API_KEY = os.getenv(\"OLLAMA_API_KEY\")\n", + "\n", + "OLLAMA_BASE_URL = os.getenv(\"OLLAMA_BASE_URL\")" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "0d571e52", + "metadata": {}, + "outputs": [], + "source": [ + "# clients\n", + "\n", + "openai_client = OpenAI(api_key=OPENAI_API_KEY)\n", + "ollama_client = OpenAI(base_url=OLLAMA_BASE_URL, api_key=OLLAMA_API_KEY)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", + "metadata": {}, + "outputs": [], + "source": [ + "# here is the question; type over this to ask something new\n", + "\n", + "system_prompt = (\n", + " \"You are an assistant whose job is to (1) decide whether the user's question is about \"\n", + " \"large language models (LLMs) or any topic within the LLM domain (architecture, training, \"\n", + " \"fine-tuning, distillation, quantization, prompting, prompt engineering, tokenization, \"\n", + " \"embeddings, inference, evaluation, safety, deployment, retrieval, adapters, LoRA, etc.), \"\n", + " \"and (2) answer the question. Use your judgment — do not output your internal chain-of-thought. \"\n", + " \"Return ONLY a single valid JSON object with these fields:\\n\"\n", + " ' { \"is_llm_related\": true|false, \"color\": \"green\"|\"orange\", '\n", + " '\"answer\": \"\", \"note\": \"\" }\\n'\n", + " \"If the question is LLM-related, set is_llm_related=true and color='green'. \"\n", + " \"If not, set is_llm_related=false and color='orange' and include a short note. \"\n", + " \"Do not include any extra text before or after the JSON.\"\n", + ")\n", + "\n", + "def create_prompt(user_prompt):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "e223f413", + "metadata": {}, + "outputs": [], + "source": [ + "def display_response(content):\n", + " c = json.loads(content)\n", + " html = f\"Answer:

{c['answer']}

\"\n", + " if not c['is_llm_related']:\n", + " html += f\"
Out of scope: {c['note']}\"\n", + " return display(HTML(html))\n" + ] + }, + { + "cell_type": "markdown", + "id": "2a0c1330", + "metadata": {}, + "source": [ + "### OpenAI `gpt-4o-mini` Model" + ] + }, + { + "cell_type": "markdown", + "id": "ec13ce08", + "metadata": {}, + "source": [ + "- ##### Example of a prompt within the LLM domain" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "cf165787", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "Answer:

Amnesia in large language models refers to the phenomenon where a model forgets previously learned information when it is updated or fine-tuned with new data. This can lead to a degradation in performance on tasks for which the model was previously competent, as it may overwrite old knowledge with new training. Managing this aspect involves careful training strategies to retain useful information while integrating new data.

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "good_response = openai_client.chat.completions.create(\n", + " model=MODEL_GPT,\n", + " messages=create_prompt(\"What is amnesia in large language models?\")\n", + ")\n", + "display_response(good_response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "969b5ccf", + "metadata": {}, + "source": [ + "- ##### Example of a prompt outside the LLM domain" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "Answer:

The Eiffel Tower was designed by the engineer Gustave Eiffel and his company.


Out of scope: This question is about historical architecture, not LLMs." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "bad_response = openai_client.chat.completions.create(\n", + " model=MODEL_GPT,\n", + " messages=create_prompt(\"Who built the Eiffel Tower?\")\n", + ")\n", + "display_response(bad_response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "42ba3599", + "metadata": {}, + "source": [ + "### Ollama `llama3.2` Model" + ] + }, + { + "cell_type": "markdown", + "id": "e1c95d06", + "metadata": {}, + "source": [ + "- ##### Example of a prompt within the LLM domain" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "Answer:

In large language models, hydration refers to the process of adjusting the model's parameters and biases to better handle input data that may contain typos, missing information, or other forms of inaccuracies. This can involve modifying the model's weights, adding noise to the training data, or using different optimization algorithms.

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "good_response = ollama_client.chat.completions.create(\n", + " model=MODEL_LLAMA,\n", + " messages=create_prompt(\"What is hydration in large language models?\")\n", + ")\n", + "display_response(good_response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "e2a249ba", + "metadata": {}, + "source": [ + "- ##### Example of a prompt outside the LLM domain" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "ac8c33a2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "Answer:

The current President of the United States is Joe Biden.


Out of scope: No relation to large language models." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "bad_response = ollama_client.chat.completions.create(\n", + " model=MODEL_LLAMA,\n", + " messages=create_prompt(\"Who is the president of the United States?\")\n", + ")\n", + "display_response(bad_response.choices[0].message.content)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}