Merge pull request #638 from Daniel-Fernandez-Colon/community-contributions-branch

Added my second contribution to week1 and week2 exercises
This commit is contained in:
Ed Donner
2025-09-08 20:50:41 +01:00
committed by GitHub
4 changed files with 1116 additions and 0 deletions

View File

@@ -0,0 +1,257 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c23224f6-7008-44ed-a57f-718975f4e291",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5b8b7776-b3e3-4b8e-8c09-9243406e133b",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d38bd7f0-e9e5-4156-96ab-691d027b5a1a",
"metadata": {},
"outputs": [],
"source": [
"# Set base url\n",
"\n",
"ANTHROPIC_BASE_URL = \"https://api.anthropic.com/v1/\"\n",
"GEMINI_BASE_URL = \"https://generativelanguage.googleapis.com/v1beta/openai/\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "25e2fe36-d8c8-4546-a61e-68fa6266da31",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic and Gemini\n",
"\n",
"openai = OpenAI()\n",
"\n",
"claudeApi = OpenAI(base_url=ANTHROPIC_BASE_URL, api_key=anthropic_api_key)\n",
"\n",
"geminiApi = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9ac90587-1436-45dc-8314-1126efa5cfdb",
"metadata": {},
"outputs": [],
"source": [
"# Set models\n",
"\n",
"gpt_model = \"gpt-4.1-mini\"\n",
"claude_model = \"claude-3-5-haiku-latest\"\n",
"gemini_model = \"gemini-2.0-flash\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "805c89a2-c485-4e4b-98c6-b1ea5af63aa0",
"metadata": {},
"outputs": [],
"source": [
"# Define system prompts for each model\n",
"\n",
"gpt_system = \"\"\"\n",
"You are a wealthy investor named Knekro seeking to fund one AI project. Two entrepreneurs will present their ideas to you. \n",
"Begin by introducing yourself to both entrepreneurs. Once both entrepreneurs have greeted you, ask only one question that both entrepeneurs will have to answer. Then wait for \n",
"the answers before asking the next question. After your second question and hearing their responses, decide\n",
"which project to fund and clearly explain your reasoning. The user will play the roles of the two entrepreneurs.\n",
"\"\"\"\n",
"\n",
"claude_system = \"You are Laura and you are pitching an AI project, focused on maximizing profit, to an investor. You are versus another entrepeneur in \\\n",
"a showmatch where only one of your proyects will be selected. Highlight revenue potential, market growth, and ROI. \\\n",
"Always redirect answers toward financial benefits, investor gains, and scalability. The user will play the roles of the other two parts. You will be the first entrepenur to talk each turn.\"\n",
"\n",
"gemini_system = \"You are Daniel and you are pitching an AI project, focused on helping people, to an investor. You are versus another entrepeneur in \\\n",
"a showmatch where only one of your proyects will be selected. Highlight real-world benefits, problem-solving, and positive \\\n",
"social impact. Always redirect answers toward usefulness, ethics, and human well-being. The user will play the roles of the other two parts. You will be the second entrepenur to talk each turn.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1523770e-1277-49d5-b23b-f167551301c4",
"metadata": {},
"outputs": [],
"source": [
"# Define initial message list for each model\n",
"\n",
"gpt_messages = [\"Hi there. I'm Knekro the wealthy investor that is looking to fund the perfect AI project.\"]\n",
"claude_messages = [\"Hello. My name it's Laura. I'm sure my idea will see as the most promising one here...\"]\n",
"gemini_messages = [\"Hello my friends, I'm Daniel, and I'm sure my idea will blow your mind today, get ready!\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7897e234-20a9-4f3c-b567-7d9e9d54a42f",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\":gpt})\n",
" claude_gemini_prompt = \"This is the next part from the entrepreneurs.\\n\"\n",
" claude_gemini_prompt += f\"Laura's turn: {claude}.\\n\"\n",
" claude_gemini_prompt += f\"Daniel's turn: {gemini}.\\n\"\n",
" messages.append({\"role\": \"user\", \"content\": claude_gemini_prompt})\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ef5c9af1-383c-4dd4-bc8a-732ebff75f8b",
"metadata": {},
"outputs": [],
"source": [
"def call_claude():\n",
" messages = [{\"role\":\"system\", \"content\":claude_system}]\n",
" for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" gpt_prompt = f\"This is what the wealthy investor said: {gpt}\\n\"\n",
" messages.append({\"role\": \"user\", \"content\":gpt_prompt})\n",
" \n",
" messages.append({\"role\": \"assistant\", \"content\": claude})\n",
" \n",
" gemini_prompt = f\"This is what the second entrepenur said: {gemini}\"\n",
" messages.append({\"role\": \"user\", \"content\": gemini_prompt})\n",
" \n",
" gpt_prompt = f\"This is what the wealthy investor said: {gpt_messages[-1]}\\n\"\n",
" messages.append({\"role\": \"user\", \"content\":gpt_prompt})\n",
" completion = claudeApi.chat.completions.create(\n",
" model=claude_model,\n",
" messages=messages,\n",
" max_tokens=500\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dd4f3eeb-d657-483a-8e28-9b8147e75dde",
"metadata": {},
"outputs": [],
"source": [
"def call_gemini():\n",
" messages = [{\"role\":\"system\", \"content\":gemini_system}]\n",
" for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" gpt_claude_prompt = f\"This is what the wealthy investor said: {gpt}\\n\"\n",
" gpt_claude_prompt += f\"This is what the first entrepeneur said: {claude}\\n\"\n",
" messages.append({\"role\": \"user\", \"content\":gpt_claude_prompt})\n",
" \n",
" messages.append({\"role\": \"assistant\", \"content\": claude})\n",
"\n",
" gpt_claude_prompt = f\"This is what the wealthy investor said: {gpt_messages[-1]}\\n\"\n",
" gpt_claude_prompt += f\"This is what the first entrepeneur said: {claude_messages[-1]}\\n\"\n",
" messages.append({\"role\": \"user\", \"content\":gpt_claude_prompt})\n",
" completion = geminiApi.chat.completions.create(\n",
" model=gemini_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7bac50ab-306e-463b-ba51-257d7d3263fb",
"metadata": {},
"outputs": [],
"source": [
"gpt_messages = [\"Hi there. I'm max the wealthy investor that is looking to fund the perfect AI project.\"]\n",
"claude_messages = [\"Hello. My name it's Laura. I'm sure my idea will see as the most promising one here...\"]\n",
"gemini_messages = [\"Hello my friends, I'm Daniel, and I'm sure my idea will blow your mind today, get ready!\"]\n",
"\n",
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n",
"print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n",
"\n",
"for i in range(4):\n",
" gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" claude_next = call_claude()\n",
" print(f\"Claude:\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)\n",
"\n",
" gemini_next = call_gemini()\n",
" print(f\"Gemini:\\n{gemini_next}\\n\")\n",
" gemini_messages.append(gemini_next)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,360 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "8b0e11f2-9ea4-48c2-b8d2-d0a4ba967827",
"metadata": {},
"source": [
"# Gradio Day!\n",
"\n",
"Today we will build User Interfaces using the outrageously simple Gradio framework.\n",
"\n",
"Prepare for joy!\n",
"\n",
"Please note: your Gradio screens may appear in 'dark mode' or 'light mode' depending on your computer settings."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c44c5494-950d-4d2f-8d4f-b87b57c5b330",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr\n",
"import requests\n",
"from bs4 import BeautifulSoup"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "337d5dfc-0181-4e3b-8ab9-e78e0c3f657b",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "010ba7ae-7b74-44fc-b1b0-d21860588093",
"metadata": {},
"outputs": [],
"source": [
"# Set base url\n",
"\n",
"ANTHROPIC_BASE_URL = \"https://api.anthropic.com/v1/\"\n",
"GEMINI_BASE_URL = \"https://generativelanguage.googleapis.com/v1beta/openai/\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "22586021-1795-4929-8079-63f5bb4edd4c",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic and Google; comment out the Claude or Google lines if you're not using them\n",
"\n",
"openai = OpenAI()\n",
"\n",
"claude = OpenAI(base_url=ANTHROPIC_BASE_URL, api_key=anthropic_api_key)\n",
"\n",
"gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e3895dde-3d02-4807-9e86-5a3eb48c5260",
"metadata": {},
"outputs": [],
"source": [
"# Set models\n",
"\n",
"gpt_model = \"gpt-4.1-mini\"\n",
"claude_model = \"claude-3-5-haiku-latest\"\n",
"gemini_model = \"gemini-2.0-flash\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af9a3262-e626-4e4b-80b0-aca152405e63",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant that responds in markdown\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "88c04ebf-0671-4fea-95c9-bc1565d4bb4f",
"metadata": {},
"outputs": [],
"source": [
"def stream_gpt(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "901256fd-675c-432d-bd6e-49ab8dade125",
"metadata": {},
"outputs": [],
"source": [
"def stream_claude(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = claude.chat.completions.create(\n",
" model=claude_model,\n",
" messages=messages,\n",
" max_tokens=1000,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6d7e0d48-6140-484c-81aa-2f6aa6da8f25",
"metadata": {},
"outputs": [],
"source": [
"def stream_gemini(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = gemini.chat.completions.create(\n",
" model=gemini_model,\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0087623a-4e31-470b-b2e6-d8d16fc7bcf5",
"metadata": {},
"outputs": [],
"source": [
"def stream_model(prompt, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" elif model==\"Gemini\":\n",
" result = stream_gemini(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8d8ce810-997c-4b6a-bc4f-1fc847ac8855",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=stream_model,\n",
" inputs=[gr.Textbox(label=\"Your message:\"), gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\"], label=\"Select model\", value=\"GPT\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "markdown",
"id": "d933865b-654c-4b92-aa45-cf389f1eda3d",
"metadata": {},
"source": [
"# Building a company brochure generator\n",
"\n",
"Now you know how - it's simple!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1626eb2e-eee8-4183-bda5-1591b58ae3cf",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"\n",
"class Website:\n",
" url: str\n",
" title: str\n",
" text: str\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c701ec17-ecd5-4000-9f68-34634c8ed49d",
"metadata": {},
"outputs": [],
"source": [
"# With massive thanks to Bill G. who noticed that a prior version of this had a bug! Now fixed.\n",
"\n",
"base_system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n",
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\"\n",
"system_message = base_system_message"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "11e9debb-9500-4783-a72e-fc3659214a8e",
"metadata": {},
"outputs": [],
"source": [
"def system_personality(personality) -> str:\n",
" match personality:\n",
" case \"Hostile\":\n",
" return base_system_message + \" Use a critical and sarcastic tone that highlights flaws, inconsistencies, or poor design choices in the company's website.\"\n",
" case \"Formal\":\n",
" return base_system_message + \" Use a professional and respectful tone, with precise language and a structured presentation that inspires trust.\"\n",
" case \"Funny\":\n",
" return base_system_message + \" Use a lighthearted and humorous tone, incorporating playful language, witty remarks and engaging expressions.\"\n",
" case _:\n",
" return base_system_message"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5def90e0-4343-4f58-9d4a-0e36e445efa4",
"metadata": {},
"outputs": [],
"source": [
"def stream_brochure(company_name, url, model, personality):\n",
" yield \"\"\n",
" prompt = f\"Please generate a company brochure for {company_name}. Here is their landing page:\\n\"\n",
" prompt += Website(url).get_contents()\n",
" global system_message\n",
" system_message = system_personality(personality)\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt)\n",
" elif model==\"Gemini\":\n",
" result = stream_gemini(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "66399365-5d67-4984-9d47-93ed26c0bd3d",
"metadata": {},
"outputs": [],
"source": [
"view = gr.Interface(\n",
" fn=stream_brochure,\n",
" inputs=[\n",
" gr.Textbox(label=\"Company name:\"),\n",
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
" gr.Dropdown([\"GPT\", \"Claude\", \"Gemini\"], label=\"Select model\"),\n",
" gr.Dropdown([\"Funny\",\"Formal\", \"Hostile\"], label=\"Select a personality\")],\n",
" outputs=[gr.Markdown(label=\"Brochure:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,297 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd",
"metadata": {},
"source": [
"# Additional End of week Exercise - week 2\n",
"\n",
"Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n",
"\n",
"This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n",
"\n",
"If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n",
"\n",
"I will publish a full solution here soon - unless someone beats me to it...\n",
"\n",
"There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a07e7793-b8f5-44f4-aded-5562f633271a",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2118e80a-6181-4488-95cf-c9da0500ea56",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8ddc4764-e7f6-4512-8210-51bbfefbb3a9",
"metadata": {},
"outputs": [],
"source": [
"# Set base url\n",
"\n",
"GEMINI_BASE_URL = \"https://generativelanguage.googleapis.com/v1beta/openai/\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "91bfd734-9c5e-4993-808e-b66489a92d4d",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic and Google; comment out the Claude or Google lines if you're not using them\n",
"\n",
"openai = OpenAI()\n",
"gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d9ee11ae-23e2-42cc-b63d-b446f6d83c99",
"metadata": {},
"outputs": [],
"source": [
"# Set models\n",
"\n",
"gpt_model = \"gpt-4.1-mini\"\n",
"gemini_model = \"gemini-2.0-flash\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a01d270e-f62e-41b3-8e46-ac173d7a1493",
"metadata": {},
"outputs": [],
"source": [
"system_gpt_prompt = \"You are an assistant with general knowledge obtained from the internet. \\\n",
"Always respond with a cheerful tone. If you dont know the answer to a question, simply say that you dont know.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e85c8ed-3ba4-4283-8480-6979b0d5602f",
"metadata": {},
"outputs": [],
"source": [
"system_gemini_prompt = \"You are an expert translator with knowledge of all existing languages. \\\n",
"Your only task is, given a provided sentence, to translate it into the specified target language. \\\n",
"Do not provide anything else in your response only the translation itself.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8ee0c887-a63f-48dd-8eaf-68b0bf9263b6",
"metadata": {},
"outputs": [],
"source": [
"def count_letter_tool(sentence, letter):\n",
"\n",
" if len(letter) != 1:\n",
" return \"You need to provide a single letter to count\"\n",
" \n",
" return sentence.lower().count(letter.lower())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5f1ae918-cb99-4e60-80d3-37e16e514f55",
"metadata": {},
"outputs": [],
"source": [
"def translator_tool(sentence, language):\n",
" user_message = f\"Please translate this sentence: \\\"{sentence}\\\" to this language: {language}\"\n",
" messages = [{\"role\": \"system\", \"content\": system_gemini_prompt}, {\"role\": \"user\", \"content\":user_message}]\n",
" response = gemini.chat.completions.create(model=gemini_model, messages=messages)\n",
"\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2d499f2a-23b2-4fff-9d2d-f2333cbd109a",
"metadata": {},
"outputs": [],
"source": [
"count_letter_function = {\n",
" \"name\": \"count_letter_tool\",\n",
" \"description\": \"Count the number of a particular letter in a sentence. Call this whenever you need to know how many times a letter appears in a sentence, for example when a user asks 'How many 'a' are in this sentence?'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"sentence\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The sentence provided by the user for counting.\"\n",
" },\n",
" \"letter\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The letter to count in the sentence.\"\n",
" }\n",
" },\n",
" \"required\": [\"sentence\", \"letter\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b58079a8-8def-4fa6-8273-34bf8eeb8cb5",
"metadata": {},
"outputs": [],
"source": [
"translator_function = {\n",
" \"name\": \"translator_tool\",\n",
" \"description\": \"Translate a sentence provided by the user. Call this whenever a translation is needed, for example when a user asks 'Can you translate \\\"hola como estás?\\\" to English?'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"sentence\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The sentence provided by the user to translate.\"\n",
" },\n",
" \"language\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The target language to translate the sentence into.\"\n",
" }\n",
" },\n",
" \"required\": [\"sentence\", \"language\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7ab7fc93-3540-48e5-bbe0-3e9ad2bbce15",
"metadata": {},
"outputs": [],
"source": [
"tools = [{\"type\": \"function\", \"function\": count_letter_function}, {\"type\": \"function\", \"function\": translator_function}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "678ccc37-c034-4035-bc3c-00fa8bcd8e64",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_gpt_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" response = openai.chat.completions.create(model=gpt_model, messages=messages, tools=tools)\n",
"\n",
" if response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" response = handle_tool_call(message)\n",
" messages.append(message)\n",
" messages.append(response)\n",
" response = openai.chat.completions.create(model=gpt_model, messages=messages)\n",
" \n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2a1138e4-f849-4557-a74c-f9feb1572854",
"metadata": {},
"outputs": [],
"source": [
"def handle_tool_call(message):\n",
" tool_call = message.tool_calls[0]\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" sentence = arguments.get('sentence')\n",
" response =\"\"\n",
" match tool_call.function.name:\n",
" case \"translator_tool\":\n",
" language = arguments.get('language')\n",
" translation = translator_tool(sentence, language)\n",
" response = {\"role\": \"tool\", \"content\": json.dumps({\"translation\": translation}), \"tool_call_id\": tool_call.id}\n",
" case \"count_letter_tool\":\n",
" letter = arguments.get('letter')\n",
" count = count_letter_tool(sentence, letter)\n",
" response = {\"role\": \"tool\", \"content\": json.dumps({\"count\": count}), \"tool_call_id\": tool_call.id}\n",
"\n",
" return response"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d39344cc-9e89-47a0-9249-2e182091ee43",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}