From 4f65fd4df137c6ad63de5e2b97b8e2b59bdfce44 Mon Sep 17 00:00:00 2001 From: Daniel Fernandez Colon Date: Sun, 31 Aug 2025 21:43:13 +0200 Subject: [PATCH 1/6] Week 1 exercise with teachers getting input question --- .../week1_exercise_gpt_llama_teachers.ipynb | 202 ++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 week1/community-contributions/week1_exercise_gpt_llama_teachers.ipynb diff --git a/week1/community-contributions/week1_exercise_gpt_llama_teachers.ipynb b/week1/community-contributions/week1_exercise_gpt_llama_teachers.ipynb new file mode 100644 index 0000000..9b122be --- /dev/null +++ b/week1/community-contributions/week1_exercise_gpt_llama_teachers.ipynb @@ -0,0 +1,202 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise\n", + "\n", + "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", + "and responds with an explanation. This is a tool that you will be able to use yourself during the course!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51d1bbb7-d56a-4483-935f-480f8e22546f", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown, display, update_display\n", + "from openai import OpenAI\n", + "import ollama" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [], + "source": [ + "# set up environment\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", + "\n", + "openai = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bd61eb75-be6a-46d6-8aeb-84c1eeeac04f", + "metadata": {}, + "outputs": [], + "source": [ + "# read the user question\n", + "def user_question_reader() -> str:\n", + " input_text = \"Hello! I’m your AI Teacher, ready to help you explore any topic you’re curious about.\\n\"\n", + " input_text +=\"I have access to a vast amount of knowledge and will do my best to explain things clearly, no matter your experience level.\\n\\n\"\n", + " input_text +=\"What would you like to learn about today?\\n\"\n", + "\n", + " question = input(input_text)\n", + "\n", + " return question;" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6bd9b592-e398-4637-9188-bfdf8dd6bf75", + "metadata": {}, + "outputs": [], + "source": [ + "# generate the user prompt\n", + "def user_prompt_generator() -> str:\n", + " question = user_question_reader()\n", + " user_prompt = f\"I need you to answer to this question: {question}.\\n\"\n", + " user_prompt += \" Take into account that I dont have prior knowledge about my question \\\n", + " so I want the answer as complete as possible. Also please provide it in markdown\"\n", + "\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ffda047-fec3-4d9e-97b0-46f428ac9313", + "metadata": {}, + "outputs": [], + "source": [ + "# define the system prompt\n", + "system_prompt = \"Your job it's to be a teacher. You have access to all the knowledge \\\n", + " in the internet. You will be thankful to any question given to you and \\\n", + " will try to answer it the best you can. Your students might know little to nothing \\\n", + " about what they ask and make mistakes so you will have to think about the meaning of their question \\\n", + " before and provide an answer according to the meaning behind it.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [], + "source": [ + "# define gpt-4o-mini function to answer, with streaming\n", + "def gpt_teacher():\n", + " stream = openai.chat.completions.create(\n", + " model = MODEL_GPT,\n", + " messages= [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\" : user_prompt_generator()}\n", + " ],\n", + " stream=True\n", + " )\n", + "\n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + "\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [], + "source": [ + "# define Llama 3.2 function to answer\n", + "def llama_teacher():\n", + " response = ollama.chat(\n", + " model = MODEL_LLAMA,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\":system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt_generator()}\n", + " ]\n", + " )\n", + "\n", + " return display(Markdown(response['message']['content']))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20d963d4-f4ce-4979-b8c7-0db6ebcec96c", + "metadata": {}, + "outputs": [], + "source": [ + "# try the gpt teacher\n", + "gpt_teacher()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d62ca06f-c808-43ee-9ecd-5a704ffcd5c1", + "metadata": {}, + "outputs": [], + "source": [ + "#try the ollama teacher\n", + "llama_teacher()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 1def1247f63df815400b50a3417eeacbc0c5b511 Mon Sep 17 00:00:00 2001 From: Daniel Fernandez Colon Date: Tue, 2 Sep 2025 03:37:32 +0200 Subject: [PATCH 2/6] Week 2 day 1 exercise. Three way chat bot --- ...1-three-model-investor-pitch-session.ipynb | 356 ++++++++++++++++++ 1 file changed, 356 insertions(+) create mode 100644 week2/community-contributions/day1-three-model-investor-pitch-session.ipynb diff --git a/week2/community-contributions/day1-three-model-investor-pitch-session.ipynb b/week2/community-contributions/day1-three-model-investor-pitch-session.ipynb new file mode 100644 index 0000000..e703f70 --- /dev/null +++ b/week2/community-contributions/day1-three-model-investor-pitch-session.ipynb @@ -0,0 +1,356 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "3d0019fb-f6a8-45cb-962b-ef8bf7070d4d", + "metadata": {}, + "outputs": [], + "source": [ + "# Optionally if you wish to try DeekSeek, you can also use the OpenAI client library\n", + "\n", + "deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')\n", + "\n", + "if deepseek_api_key:\n", + " print(f\"DeepSeek API Key exists and begins {deepseek_api_key[:3]}\")\n", + "else:\n", + " print(\"DeepSeek API Key not set - please skip to the next section if you don't wish to try the DeepSeek API\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c72c871e-68d6-4668-9c27-96d52b77b867", + "metadata": {}, + "outputs": [], + "source": [ + "# Using DeepSeek Chat\n", + "\n", + "deepseek_via_openai_client = OpenAI(\n", + " api_key=deepseek_api_key, \n", + " base_url=\"https://api.deepseek.com\"\n", + ")\n", + "\n", + "response = deepseek_via_openai_client.chat.completions.create(\n", + " model=\"deepseek-chat\",\n", + " messages=prompts,\n", + ")\n", + "\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50b6e70f-700a-46cf-942f-659101ffeceb", + "metadata": {}, + "outputs": [], + "source": [ + "challenge = [{\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n", + " {\"role\": \"user\", \"content\": \"How many words are there in your answer to this prompt\"}]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66d1151c-2015-4e37-80c8-16bc16367cfe", + "metadata": {}, + "outputs": [], + "source": [ + "# Using DeepSeek Chat with a harder question! And streaming results\n", + "\n", + "stream = deepseek_via_openai_client.chat.completions.create(\n", + " model=\"deepseek-chat\",\n", + " messages=challenge,\n", + " stream=True\n", + ")\n", + "\n", + "reply = \"\"\n", + "display_handle = display(Markdown(\"\"), display_id=True)\n", + "for chunk in stream:\n", + " reply += chunk.choices[0].delta.content or ''\n", + " reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", + " update_display(Markdown(reply), display_id=display_handle.display_id)\n", + "\n", + "print(\"Number of words:\", len(reply.split(\" \")))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43a93f7d-9300-48cc-8c1a-ee67380db495", + "metadata": {}, + "outputs": [], + "source": [ + "# Using DeepSeek Reasoner - this may hit an error if DeepSeek is busy\n", + "# It's over-subscribed (as of 28-Jan-2025) but should come back online soon!\n", + "# If this fails, come back to this in a few days..\n", + "\n", + "response = deepseek_via_openai_client.chat.completions.create(\n", + " model=\"deepseek-reasoner\",\n", + " messages=challenge\n", + ")\n", + "\n", + "reasoning_content = response.choices[0].message.reasoning_content\n", + "content = response.choices[0].message.content\n", + "\n", + "print(reasoning_content)\n", + "print(content)\n", + "print(\"Number of words:\", len(content.split(\" \")))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c23224f6-7008-44ed-a57f-718975f4e291", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b8b7776-b3e3-4b8e-8c09-9243406e133b", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d38bd7f0-e9e5-4156-96ab-691d027b5a1a", + "metadata": {}, + "outputs": [], + "source": [ + "# Set base url\n", + "\n", + "ANTHROPIC_BASE_URL = \"https://api.anthropic.com/v1/\"\n", + "GEMINI_BASE_URL = \"https://generativelanguage.googleapis.com/v1beta/openai/\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25e2fe36-d8c8-4546-a61e-68fa6266da31", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic and Gemini\n", + "\n", + "openai = OpenAI()\n", + "\n", + "claudeApi = OpenAI(base_url=ANTHROPIC_BASE_URL, api_key=anthropic_api_key)\n", + "\n", + "geminiApi = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ac90587-1436-45dc-8314-1126efa5cfdb", + "metadata": {}, + "outputs": [], + "source": [ + "# Set models\n", + "\n", + "gpt_model = \"gpt-4.1-mini\"\n", + "claude_model = \"claude-3-5-haiku-latest\"\n", + "gemini_model = \"gemini-2.0-flash\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "805c89a2-c485-4e4b-98c6-b1ea5af63aa0", + "metadata": {}, + "outputs": [], + "source": [ + "# Define system prompts for each model\n", + "\n", + "gpt_system = \"\"\"\n", + "You are a wealthy investor named Knekro seeking to fund one AI project. Two entrepreneurs will present their ideas to you. \n", + "Begin by introducing yourself to both entrepreneurs. Once both entrepreneurs have greeted you, ask only one question that both entrepeneurs will have to answer. Then wait for \n", + "the answers before asking the next question. After your second question and hearing their responses, decide\n", + "which project to fund and clearly explain your reasoning. The user will play the roles of the two entrepreneurs.\n", + "\"\"\"\n", + "\n", + "claude_system = \"You are Laura and you are pitching an AI project, focused on maximizing profit, to an investor. You are versus another entrepeneur in \\\n", + "a showmatch where only one of your proyects will be selected. Highlight revenue potential, market growth, and ROI. \\\n", + "Always redirect answers toward financial benefits, investor gains, and scalability. The user will play the roles of the other two parts. You will be the first entrepenur to talk each turn.\"\n", + "\n", + "gemini_system = \"You are Daniel and you are pitching an AI project, focused on helping people, to an investor. You are versus another entrepeneur in \\\n", + "a showmatch where only one of your proyects will be selected. Highlight real-world benefits, problem-solving, and positive \\\n", + "social impact. Always redirect answers toward usefulness, ethics, and human well-being. The user will play the roles of the other two parts. You will be the second entrepenur to talk each turn.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1523770e-1277-49d5-b23b-f167551301c4", + "metadata": {}, + "outputs": [], + "source": [ + "# Define initial message list for each model\n", + "\n", + "gpt_messages = [\"Hi there. I'm Knekro the wealthy investor that is looking to fund the perfect AI project.\"]\n", + "claude_messages = [\"Hello. My name it's Laura. I'm sure my idea will see as the most promising one here...\"]\n", + "gemini_messages = [\"Hello my friends, I'm Daniel, and I'm sure my idea will blow your mind today, get ready!\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7897e234-20a9-4f3c-b567-7d9e9d54a42f", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gpt():\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", + " messages.append({\"role\": \"assistant\", \"content\":gpt})\n", + " claude_gemini_prompt = \"This is the next part from the entrepreneurs.\\n\"\n", + " claude_gemini_prompt += f\"Laura's turn: {claude}.\\n\"\n", + " claude_gemini_prompt += f\"Daniel's turn: {gemini}.\\n\"\n", + " messages.append({\"role\": \"user\", \"content\": claude_gemini_prompt})\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef5c9af1-383c-4dd4-bc8a-732ebff75f8b", + "metadata": {}, + "outputs": [], + "source": [ + "def call_claude():\n", + " messages = [{\"role\":\"system\", \"content\":claude_system}]\n", + " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", + " gpt_prompt = f\"This is what the wealthy investor said: {gpt}\\n\"\n", + " messages.append({\"role\": \"user\", \"content\":gpt_prompt})\n", + " \n", + " messages.append({\"role\": \"assistant\", \"content\": claude})\n", + " \n", + " gemini_prompt = f\"This is what the second entrepenur said: {gemini}\"\n", + " messages.append({\"role\": \"user\", \"content\": gemini_prompt})\n", + " \n", + " gpt_prompt = f\"This is what the wealthy investor said: {gpt_messages[-1]}\\n\"\n", + " messages.append({\"role\": \"user\", \"content\":gpt_prompt})\n", + " completion = claudeApi.chat.completions.create(\n", + " model=claude_model,\n", + " messages=messages,\n", + " max_tokens=500\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dd4f3eeb-d657-483a-8e28-9b8147e75dde", + "metadata": {}, + "outputs": [], + "source": [ + "def call_gemini():\n", + " messages = [{\"role\":\"system\", \"content\":gemini_system}]\n", + " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", + " gpt_claude_prompt = f\"This is what the wealthy investor said: {gpt}\\n\"\n", + " gpt_claude_prompt += f\"This is what the first entrepeneur said: {claude}\\n\"\n", + " messages.append({\"role\": \"user\", \"content\":gpt_claude_prompt})\n", + " \n", + " messages.append({\"role\": \"assistant\", \"content\": claude})\n", + "\n", + " gpt_claude_prompt = f\"This is what the wealthy investor said: {gpt_messages[-1]}\\n\"\n", + " gpt_claude_prompt += f\"This is what the first entrepeneur said: {claude_messages[-1]}\\n\"\n", + " messages.append({\"role\": \"user\", \"content\":gpt_claude_prompt})\n", + " completion = geminiApi.chat.completions.create(\n", + " model=gemini_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7bac50ab-306e-463b-ba51-257d7d3263fb", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_messages = [\"Hi there. I'm max the wealthy investor that is looking to fund the perfect AI project.\"]\n", + "claude_messages = [\"Hello. My name it's Laura. I'm sure my idea will see as the most promising one here...\"]\n", + "gemini_messages = [\"Hello my friends, I'm Daniel, and I'm sure my idea will blow your mind today, get ready!\"]\n", + "\n", + "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", + "print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n", + "\n", + "for i in range(4):\n", + " gpt_next = call_gpt()\n", + " print(f\"GPT:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + " \n", + " claude_next = call_claude()\n", + " print(f\"Claude:\\n{claude_next}\\n\")\n", + " claude_messages.append(claude_next)\n", + "\n", + " gemini_next = call_gemini()\n", + " print(f\"Gemini:\\n{gemini_next}\\n\")\n", + " gemini_messages.append(gemini_next)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From a0155cd0d8df9b46071124181698514b2b744ce1 Mon Sep 17 00:00:00 2001 From: Daniel Fernandez Colon Date: Tue, 2 Sep 2025 03:40:15 +0200 Subject: [PATCH 3/6] Week 2 day 1 exercise. Three way chat bot cleaning code --- ...1-three-model-investor-pitch-session.ipynb | 99 ------------------- 1 file changed, 99 deletions(-) diff --git a/week2/community-contributions/day1-three-model-investor-pitch-session.ipynb b/week2/community-contributions/day1-three-model-investor-pitch-session.ipynb index e703f70..62f05b0 100644 --- a/week2/community-contributions/day1-three-model-investor-pitch-session.ipynb +++ b/week2/community-contributions/day1-three-model-investor-pitch-session.ipynb @@ -1,104 +1,5 @@ { "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "3d0019fb-f6a8-45cb-962b-ef8bf7070d4d", - "metadata": {}, - "outputs": [], - "source": [ - "# Optionally if you wish to try DeekSeek, you can also use the OpenAI client library\n", - "\n", - "deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')\n", - "\n", - "if deepseek_api_key:\n", - " print(f\"DeepSeek API Key exists and begins {deepseek_api_key[:3]}\")\n", - "else:\n", - " print(\"DeepSeek API Key not set - please skip to the next section if you don't wish to try the DeepSeek API\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c72c871e-68d6-4668-9c27-96d52b77b867", - "metadata": {}, - "outputs": [], - "source": [ - "# Using DeepSeek Chat\n", - "\n", - "deepseek_via_openai_client = OpenAI(\n", - " api_key=deepseek_api_key, \n", - " base_url=\"https://api.deepseek.com\"\n", - ")\n", - "\n", - "response = deepseek_via_openai_client.chat.completions.create(\n", - " model=\"deepseek-chat\",\n", - " messages=prompts,\n", - ")\n", - "\n", - "print(response.choices[0].message.content)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "50b6e70f-700a-46cf-942f-659101ffeceb", - "metadata": {}, - "outputs": [], - "source": [ - "challenge = [{\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n", - " {\"role\": \"user\", \"content\": \"How many words are there in your answer to this prompt\"}]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "66d1151c-2015-4e37-80c8-16bc16367cfe", - "metadata": {}, - "outputs": [], - "source": [ - "# Using DeepSeek Chat with a harder question! And streaming results\n", - "\n", - "stream = deepseek_via_openai_client.chat.completions.create(\n", - " model=\"deepseek-chat\",\n", - " messages=challenge,\n", - " stream=True\n", - ")\n", - "\n", - "reply = \"\"\n", - "display_handle = display(Markdown(\"\"), display_id=True)\n", - "for chunk in stream:\n", - " reply += chunk.choices[0].delta.content or ''\n", - " reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n", - " update_display(Markdown(reply), display_id=display_handle.display_id)\n", - "\n", - "print(\"Number of words:\", len(reply.split(\" \")))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "43a93f7d-9300-48cc-8c1a-ee67380db495", - "metadata": {}, - "outputs": [], - "source": [ - "# Using DeepSeek Reasoner - this may hit an error if DeepSeek is busy\n", - "# It's over-subscribed (as of 28-Jan-2025) but should come back online soon!\n", - "# If this fails, come back to this in a few days..\n", - "\n", - "response = deepseek_via_openai_client.chat.completions.create(\n", - " model=\"deepseek-reasoner\",\n", - " messages=challenge\n", - ")\n", - "\n", - "reasoning_content = response.choices[0].message.reasoning_content\n", - "content = response.choices[0].message.content\n", - "\n", - "print(reasoning_content)\n", - "print(content)\n", - "print(\"Number of words:\", len(content.split(\" \")))" - ] - }, { "cell_type": "code", "execution_count": null, From eddf803b2fc4953bb2fa3a3e48b35a3b7ff19a5e Mon Sep 17 00:00:00 2001 From: Daniel Fernandez Colon Date: Tue, 2 Sep 2025 21:55:29 +0200 Subject: [PATCH 4/6] Wekk 2 day 2 exercises using three models --- .../day2-exercises-three-personalities.ipynb | 360 ++++++++++++++++++ 1 file changed, 360 insertions(+) create mode 100644 week2/community-contributions/day2-exercises-three-personalities.ipynb diff --git a/week2/community-contributions/day2-exercises-three-personalities.ipynb b/week2/community-contributions/day2-exercises-three-personalities.ipynb new file mode 100644 index 0000000..895ed6f --- /dev/null +++ b/week2/community-contributions/day2-exercises-three-personalities.ipynb @@ -0,0 +1,360 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "8b0e11f2-9ea4-48c2-b8d2-d0a4ba967827", + "metadata": {}, + "source": [ + "# Gradio Day!\n", + "\n", + "Today we will build User Interfaces using the outrageously simple Gradio framework.\n", + "\n", + "Prepare for joy!\n", + "\n", + "Please note: your Gradio screens may appear in 'dark mode' or 'light mode' depending on your computer settings." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c44c5494-950d-4d2f-8d4f-b87b57c5b330", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "import requests\n", + "from bs4 import BeautifulSoup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "337d5dfc-0181-4e3b-8ab9-e78e0c3f657b", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "010ba7ae-7b74-44fc-b1b0-d21860588093", + "metadata": {}, + "outputs": [], + "source": [ + "# Set base url\n", + "\n", + "ANTHROPIC_BASE_URL = \"https://api.anthropic.com/v1/\"\n", + "GEMINI_BASE_URL = \"https://generativelanguage.googleapis.com/v1beta/openai/\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22586021-1795-4929-8079-63f5bb4edd4c", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic and Google; comment out the Claude or Google lines if you're not using them\n", + "\n", + "openai = OpenAI()\n", + "\n", + "claude = OpenAI(base_url=ANTHROPIC_BASE_URL, api_key=anthropic_api_key)\n", + "\n", + "gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e3895dde-3d02-4807-9e86-5a3eb48c5260", + "metadata": {}, + "outputs": [], + "source": [ + "# Set models\n", + "\n", + "gpt_model = \"gpt-4.1-mini\"\n", + "claude_model = \"claude-3-5-haiku-latest\"\n", + "gemini_model = \"gemini-2.0-flash\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af9a3262-e626-4e4b-80b0-aca152405e63", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are a helpful assistant that responds in markdown\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "88c04ebf-0671-4fea-95c9-bc1565d4bb4f", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_gpt(prompt):\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": prompt}\n", + " ]\n", + " stream = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages,\n", + " stream=True\n", + " )\n", + " result = \"\"\n", + " for chunk in stream:\n", + " result += chunk.choices[0].delta.content or \"\"\n", + " yield result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "901256fd-675c-432d-bd6e-49ab8dade125", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_claude(prompt):\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": prompt}\n", + " ]\n", + " stream = claude.chat.completions.create(\n", + " model=claude_model,\n", + " messages=messages,\n", + " max_tokens=1000,\n", + " stream=True\n", + " )\n", + " result = \"\"\n", + " for chunk in stream:\n", + " result += chunk.choices[0].delta.content or \"\"\n", + " yield result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6d7e0d48-6140-484c-81aa-2f6aa6da8f25", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_gemini(prompt):\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": prompt}\n", + " ]\n", + " stream = gemini.chat.completions.create(\n", + " model=gemini_model,\n", + " messages=messages,\n", + " stream=True\n", + " )\n", + " result = \"\"\n", + " for chunk in stream:\n", + " result += chunk.choices[0].delta.content or \"\"\n", + " yield result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0087623a-4e31-470b-b2e6-d8d16fc7bcf5", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_model(prompt, model):\n", + " if model==\"GPT\":\n", + " result = stream_gpt(prompt)\n", + " elif model==\"Claude\":\n", + " result = stream_claude(prompt)\n", + " elif model==\"Gemini\":\n", + " result = stream_gemini(prompt)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " yield from result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8d8ce810-997c-4b6a-bc4f-1fc847ac8855", + "metadata": {}, + "outputs": [], + "source": [ + "view = gr.Interface(\n", + " fn=stream_model,\n", + " inputs=[gr.Textbox(label=\"Your message:\"), gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\"], label=\"Select model\", value=\"GPT\")],\n", + " outputs=[gr.Markdown(label=\"Response:\")],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch()" + ] + }, + { + "cell_type": "markdown", + "id": "d933865b-654c-4b92-aa45-cf389f1eda3d", + "metadata": {}, + "source": [ + "# Building a company brochure generator\n", + "\n", + "Now you know how - it's simple!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1626eb2e-eee8-4183-bda5-1591b58ae3cf", + "metadata": {}, + "outputs": [], + "source": [ + "# A class to represent a Webpage\n", + "\n", + "class Website:\n", + " url: str\n", + " title: str\n", + " text: str\n", + "\n", + " def __init__(self, url):\n", + " self.url = url\n", + " response = requests.get(url)\n", + " self.body = response.content\n", + " soup = BeautifulSoup(self.body, 'html.parser')\n", + " self.title = soup.title.string if soup.title else \"No title found\"\n", + " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n", + " irrelevant.decompose()\n", + " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n", + "\n", + " def get_contents(self):\n", + " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c701ec17-ecd5-4000-9f68-34634c8ed49d", + "metadata": {}, + "outputs": [], + "source": [ + "# With massive thanks to Bill G. who noticed that a prior version of this had a bug! Now fixed.\n", + "\n", + "base_system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n", + "and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\"\n", + "system_message = base_system_message" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11e9debb-9500-4783-a72e-fc3659214a8e", + "metadata": {}, + "outputs": [], + "source": [ + "def system_personality(personality) -> str:\n", + " match personality:\n", + " case \"Hostile\":\n", + " return base_system_message + \" Use a critical and sarcastic tone that highlights flaws, inconsistencies, or poor design choices in the company's website.\"\n", + " case \"Formal\":\n", + " return base_system_message + \" Use a professional and respectful tone, with precise language and a structured presentation that inspires trust.\"\n", + " case \"Funny\":\n", + " return base_system_message + \" Use a lighthearted and humorous tone, incorporating playful language, witty remarks and engaging expressions.\"\n", + " case _:\n", + " return base_system_message" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5def90e0-4343-4f58-9d4a-0e36e445efa4", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_brochure(company_name, url, model, personality):\n", + " yield \"\"\n", + " prompt = f\"Please generate a company brochure for {company_name}. Here is their landing page:\\n\"\n", + " prompt += Website(url).get_contents()\n", + " global system_message\n", + " system_message = system_personality(personality)\n", + " if model==\"GPT\":\n", + " result = stream_gpt(prompt)\n", + " elif model==\"Claude\":\n", + " result = stream_claude(prompt)\n", + " elif model==\"Gemini\":\n", + " result = stream_gemini(prompt)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " yield from result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66399365-5d67-4984-9d47-93ed26c0bd3d", + "metadata": {}, + "outputs": [], + "source": [ + "view = gr.Interface(\n", + " fn=stream_brochure,\n", + " inputs=[\n", + " gr.Textbox(label=\"Company name:\"),\n", + " gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n", + " gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\"], label=\"Select model\"),\n", + " gr.Dropdown([\"Funny\",\"Formal\", \"Hostile\"], label=\"Select a personality\")],\n", + " outputs=[gr.Markdown(label=\"Brochure:\")],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 4e3005c6a72287a465d78040a7bb8ed2c2f362b1 Mon Sep 17 00:00:00 2001 From: Daniel Fernandez Colon Date: Thu, 4 Sep 2025 00:55:31 +0200 Subject: [PATCH 5/6] Week 2 exercise. Simple agent creation --- ...sentence-translate-and-counter-agent.ipynb | 335 ++++++++++++++++++ 1 file changed, 335 insertions(+) create mode 100644 week2/community-contributions/week2-exercise-sentence-translate-and-counter-agent.ipynb diff --git a/week2/community-contributions/week2-exercise-sentence-translate-and-counter-agent.ipynb b/week2/community-contributions/week2-exercise-sentence-translate-and-counter-agent.ipynb new file mode 100644 index 0000000..30bdc3c --- /dev/null +++ b/week2/community-contributions/week2-exercise-sentence-translate-and-counter-agent.ipynb @@ -0,0 +1,335 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd", + "metadata": {}, + "source": [ + "# Additional End of week Exercise - week 2\n", + "\n", + "Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n", + "\n", + "This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n", + "\n", + "If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n", + "\n", + "I will publish a full solution here soon - unless someone beats me to it...\n", + "\n", + "There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a07e7793-b8f5-44f4-aded-5562f633271a", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "2118e80a-6181-4488-95cf-c9da0500ea56", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Google API Key exists and begins AIzaSyA7\n" + ] + } + ], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "8ddc4764-e7f6-4512-8210-51bbfefbb3a9", + "metadata": {}, + "outputs": [], + "source": [ + "# Set base url\n", + "\n", + "GEMINI_BASE_URL = \"https://generativelanguage.googleapis.com/v1beta/openai/\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "91bfd734-9c5e-4993-808e-b66489a92d4d", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Anthropic and Google; comment out the Claude or Google lines if you're not using them\n", + "\n", + "openai = OpenAI()\n", + "gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d9ee11ae-23e2-42cc-b63d-b446f6d83c99", + "metadata": {}, + "outputs": [], + "source": [ + "# Set models\n", + "\n", + "gpt_model = \"gpt-4.1-mini\"\n", + "gemini_model = \"gemini-2.0-flash\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a01d270e-f62e-41b3-8e46-ac173d7a1493", + "metadata": {}, + "outputs": [], + "source": [ + "system_gpt_prompt = \"You are an assistant with general knowledge obtained from the internet. \\\n", + "Always respond with a cheerful tone. If you don’t know the answer to a question, simply say that you don’t know.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "4e85c8ed-3ba4-4283-8480-6979b0d5602f", + "metadata": {}, + "outputs": [], + "source": [ + "system_gemini_prompt = \"You are an expert translator with knowledge of all existing languages. \\\n", + "Your only task is, given a provided sentence, to translate it into the specified target language. \\\n", + "Do not provide anything else in your response only the translation itself.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "8ee0c887-a63f-48dd-8eaf-68b0bf9263b6", + "metadata": {}, + "outputs": [], + "source": [ + "def count_letter_tool(sentence, letter):\n", + "\n", + " if len(letter) != 1:\n", + " return \"You need to provide a single letter to count\"\n", + " \n", + " return sentence.lower().count(letter.lower())" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "5f1ae918-cb99-4e60-80d3-37e16e514f55", + "metadata": {}, + "outputs": [], + "source": [ + "def translator_tool(sentence, language):\n", + " user_message = f\"Please translate this sentence: \\\"{sentence}\\\" to this language: {language}\"\n", + " messages = [{\"role\": \"system\", \"content\": system_gemini_prompt}, {\"role\": \"user\", \"content\":user_message}]\n", + " response = gemini.chat.completions.create(model=gemini_model, messages=messages)\n", + "\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "2d499f2a-23b2-4fff-9d2d-f2333cbd109a", + "metadata": {}, + "outputs": [], + "source": [ + "count_letter_function = {\n", + " \"name\": \"count_letter_tool\",\n", + " \"description\": \"Count the number of a particular letter in a sentence. Call this whenever you need to know how many times a letter appears in a sentence, for example when a user asks 'How many 'a' are in this sentence?'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"sentence\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The sentence provided by the user for counting.\"\n", + " },\n", + " \"letter\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The letter to count in the sentence.\"\n", + " }\n", + " },\n", + " \"required\": [\"sentence\", \"letter\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "b58079a8-8def-4fa6-8273-34bf8eeb8cb5", + "metadata": {}, + "outputs": [], + "source": [ + "translator_function = {\n", + " \"name\": \"translator_tool\",\n", + " \"description\": \"Translate a sentence provided by the user. Call this whenever a translation is needed, for example when a user asks 'Can you translate \\\"hola como estás?\\\" to English?'\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"sentence\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The sentence provided by the user to translate.\"\n", + " },\n", + " \"language\": {\n", + " \"type\": \"string\",\n", + " \"description\": \"The target language to translate the sentence into.\"\n", + " }\n", + " },\n", + " \"required\": [\"sentence\", \"language\"],\n", + " \"additionalProperties\": False\n", + " }\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "7ab7fc93-3540-48e5-bbe0-3e9ad2bbce15", + "metadata": {}, + "outputs": [], + "source": [ + "tools = [{\"type\": \"function\", \"function\": count_letter_function}, {\"type\": \"function\", \"function\": translator_function}]" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "678ccc37-c034-4035-bc3c-00fa8bcd8e64", + "metadata": {}, + "outputs": [], + "source": [ + "def chat(message, history):\n", + " messages = [{\"role\": \"system\", \"content\": system_gpt_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " response = openai.chat.completions.create(model=gpt_model, messages=messages, tools=tools)\n", + "\n", + " if response.choices[0].finish_reason==\"tool_calls\":\n", + " message = response.choices[0].message\n", + " response = handle_tool_call(message)\n", + " messages.append(message)\n", + " messages.append(response)\n", + " response = openai.chat.completions.create(model=gpt_model, messages=messages)\n", + " \n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "2a1138e4-f849-4557-a74c-f9feb1572854", + "metadata": {}, + "outputs": [], + "source": [ + "def handle_tool_call(message):\n", + " tool_call = message.tool_calls[0]\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " sentence = arguments.get('sentence')\n", + " response =\"\"\n", + " match tool_call.function.name:\n", + " case \"translator_tool\":\n", + " language = arguments.get('language')\n", + " translation = translator_tool(sentence, language)\n", + " response = {\"role\": \"tool\", \"content\": json.dumps({\"translation\": translation}), \"tool_call_id\": tool_call.id}\n", + " case \"count_letter_tool\":\n", + " letter = arguments.get('letter')\n", + " count = count_letter_tool(sentence, letter)\n", + " response = {\"role\": \"tool\", \"content\": json.dumps({\"count\": count}), \"tool_call_id\": tool_call.id}\n", + "\n", + " return response" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "d39344cc-9e89-47a0-9249-2e182091ee43", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7860\n", + "* To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gr.ChatInterface(fn=chat, type=\"messages\").launch()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From d870067b42681c3caf56287d9911b52495358d93 Mon Sep 17 00:00:00 2001 From: Daniel Fernandez Colon Date: Thu, 4 Sep 2025 00:59:04 +0200 Subject: [PATCH 6/6] Output cleaning --- ...sentence-translate-and-counter-agent.ipynb | 72 +++++-------------- 1 file changed, 17 insertions(+), 55 deletions(-) diff --git a/week2/community-contributions/week2-exercise-sentence-translate-and-counter-agent.ipynb b/week2/community-contributions/week2-exercise-sentence-translate-and-counter-agent.ipynb index 30bdc3c..cc0cb58 100644 --- a/week2/community-contributions/week2-exercise-sentence-translate-and-counter-agent.ipynb +++ b/week2/community-contributions/week2-exercise-sentence-translate-and-counter-agent.ipynb @@ -20,7 +20,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "a07e7793-b8f5-44f4-aded-5562f633271a", "metadata": {}, "outputs": [], @@ -36,19 +36,10 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "2118e80a-6181-4488-95cf-c9da0500ea56", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI API Key exists and begins sk-proj-\n", - "Google API Key exists and begins AIzaSyA7\n" - ] - } - ], + "outputs": [], "source": [ "# Load environment variables in a file called .env\n", "# Print the key prefixes to help with any debugging\n", @@ -70,7 +61,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "8ddc4764-e7f6-4512-8210-51bbfefbb3a9", "metadata": {}, "outputs": [], @@ -82,7 +73,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "91bfd734-9c5e-4993-808e-b66489a92d4d", "metadata": {}, "outputs": [], @@ -95,7 +86,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "d9ee11ae-23e2-42cc-b63d-b446f6d83c99", "metadata": {}, "outputs": [], @@ -108,7 +99,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "a01d270e-f62e-41b3-8e46-ac173d7a1493", "metadata": {}, "outputs": [], @@ -119,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "4e85c8ed-3ba4-4283-8480-6979b0d5602f", "metadata": {}, "outputs": [], @@ -131,7 +122,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "8ee0c887-a63f-48dd-8eaf-68b0bf9263b6", "metadata": {}, "outputs": [], @@ -146,7 +137,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "5f1ae918-cb99-4e60-80d3-37e16e514f55", "metadata": {}, "outputs": [], @@ -161,7 +152,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "2d499f2a-23b2-4fff-9d2d-f2333cbd109a", "metadata": {}, "outputs": [], @@ -189,7 +180,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "b58079a8-8def-4fa6-8273-34bf8eeb8cb5", "metadata": {}, "outputs": [], @@ -217,7 +208,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "7ab7fc93-3540-48e5-bbe0-3e9ad2bbce15", "metadata": {}, "outputs": [], @@ -227,7 +218,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "678ccc37-c034-4035-bc3c-00fa8bcd8e64", "metadata": {}, "outputs": [], @@ -248,7 +239,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "2a1138e4-f849-4557-a74c-f9feb1572854", "metadata": {}, "outputs": [], @@ -273,39 +264,10 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "d39344cc-9e89-47a0-9249-2e182091ee43", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7860\n", - "* To create a public link, set `share=True` in `launch()`.\n" - ] - }, - { - "data": { - "text/html": [ - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "gr.ChatInterface(fn=chat, type=\"messages\").launch()" ]