Notebook details an error generated when using Gradio's ChatInterface
with Claude models, for generating Conversational AI (Chatbots).

It also provides a workaround to support this scenario.
This commit is contained in:
Octavio Ortiz-Bosch
2025-04-03 14:19:12 -04:00
101 changed files with 20707 additions and 829 deletions

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,460 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "e71d7ff9-c27a-4602-9230-856626b1de07",
"metadata": {},
"source": [
"# Company Brochure Generator UI\n",
"Generates a brochure for a company website, after scraping the website and pages linked with that page, based on the provided company URL. \n",
"Enables users to \n",
"- Choose a model type (Llama 3.2, Claude, GPT)-\n",
"- Choose the tone preference\n",
"- Choose the target audience"
]
},
{
"cell_type": "markdown",
"id": "de9b59b9-8673-42e7-8849-62fe30f56711",
"metadata": {},
"source": [
"#### Imports, Keys, Instantiation"
]
},
{
"cell_type": "code",
"execution_count": 38,
"id": "39fd7fed-b215-4037-bd6e-7e1af1b83897",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"import json\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"import anthropic\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "0bf24357-1d77-4721-9d5a-f99827b2158c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Anthropic API Key exists and begins sk-ant-\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "1afc12e1-02c1-4394-b589-19cd08d2a8bb",
"metadata": {},
"outputs": [],
"source": [
"# Define models\n",
"CLAUDE_MODEL = \"claude-3-haiku-20240307\"\n",
"GPT_MODEL = \"gpt-4o-mini\""
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "d5d79a69-0a39-4ab4-aaf8-bc591bce0536",
"metadata": {},
"outputs": [],
"source": [
"# Creating instances\n",
"claude = anthropic.Anthropic()\n",
"openai = OpenAI()"
]
},
{
"cell_type": "markdown",
"id": "1d3369bc-b751-4f4d-a288-d7d81c384e67",
"metadata": {},
"source": [
"#### Web Scraper"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "fafe1074-fbf4-47cc-80dc-34413a447977",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
" \"\"\"\n",
" A utility class to represent a Website that we have scraped, now with links\n",
" \"\"\"\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" if soup.body:\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" else:\n",
" self.text = \"\"\n",
" links = [link.get('href') for link in soup.find_all('a')]\n",
" self.links = [link for link in links if link]\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "41c1f1af-ae20-423b-bf7c-efd7f8c2751b",
"metadata": {},
"outputs": [],
"source": [
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n",
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n",
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n",
"link_system_prompt += \"You should respond in JSON as in this example:\"\n",
"link_system_prompt += \"\"\"\n",
"{\n",
" \"links\": [\n",
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n",
" ]\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "eb537563-e393-47ca-9af2-a8ea7393edd9",
"metadata": {},
"outputs": [],
"source": [
"def get_links_user_prompt(website):\n",
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n",
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n",
"Do not include Terms of Service, Privacy, email or social media links.\\n\"\n",
" user_prompt += \"Links (some might be relative links):\\n\"\n",
" user_prompt += \"\\n\".join(website.links)\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 36,
"id": "033568d2-3f1a-43ac-a288-7a65b4ea86a5",
"metadata": {},
"outputs": [],
"source": [
"def get_links(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model=GPT_MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": link_system_prompt},\n",
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n",
" ],\n",
" response_format={\"type\": \"json_object\"}\n",
" )\n",
" result = response.choices[0].message.content\n",
" return json.loads(result)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "d8f316ac-f0b1-42d9-88a8-0a61fcb0023d",
"metadata": {},
"outputs": [],
"source": [
"def get_all_details(url):\n",
" result = \"Landing page:\\n\"\n",
" result += Website(url).get_contents()\n",
" links = get_links(url)\n",
" print(\"Found links:\", links)\n",
" for link in links[\"links\"]:\n",
" print(f\"Processing {link['url']}...\")\n",
" result += f\"\\n\\n{link['type']}\\n\"\n",
" result += Website(link[\"url\"]).get_contents()\n",
" return result"
]
},
{
"cell_type": "markdown",
"id": "016e065a-ac5a-48c0-bc4b-e916e9801384",
"metadata": {},
"source": [
"#### System Message"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "ed1c6068-5f4f-47a7-ab97-738dfb94e057",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n",
"and creates a short brochure about the company for prospective customers, investors and recruits. \\\n",
"You are also provided with the tone, and the target audience. Provide an appropriate answer. Respond in markdown.\""
]
},
{
"cell_type": "markdown",
"id": "6d4f594c-927d-440f-8aae-33cfeb9c445c",
"metadata": {},
"source": [
"#### LLM Call Functions"
]
},
{
"cell_type": "code",
"execution_count": 40,
"id": "5b6a0379-3465-4c04-a553-4e4cdb9064b9",
"metadata": {},
"outputs": [],
"source": [
"def stream_gpt(prompt,company_name,url):\n",
" messages = [\n",
" {\"role\": \"user\", \"content\": prompt},\n",
" {\"role\":\"system\",\"content\":system_message}\n",
" ]\n",
" stream = openai.chat.completions.create(\n",
" model=GPT_MODEL,\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "a2194e1d-4e99-4127-9515-aa9353382bc6",
"metadata": {},
"outputs": [],
"source": [
"def stream_claude(prompt):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=1000,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=[\n",
" {\"role\": \"user\", \"content\": prompt},\n",
" ],\n",
" )\n",
" response = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" response += text or \"\"\n",
" yield response"
]
},
{
"cell_type": "markdown",
"id": "64adf26c-33b2-4589-8df6-dc5d6da71420",
"metadata": {},
"source": [
"#### Brochure Creation"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "8192f39f-508b-4592-a075-767db68672b3",
"metadata": {},
"outputs": [],
"source": [
"def get_brochure_user_prompt(company_name, url):\n",
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n",
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n",
" user_prompt += get_all_details(url)\n",
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "8aebfabe-4d51-4ee7-a9d2-5a379e9427cb",
"metadata": {},
"outputs": [],
"source": [
"def create_brochure(company_name, url,model,tone,target):\n",
" print('create brochure function called')\n",
" prompt = f\"Please generate a company brochure for {company_name}.\"\n",
" prompt += f\"Use a {tone} tone; and target content at {target}\"\n",
" prompt += get_brochure_user_prompt(company_name,url)\n",
" \n",
" if model == \"GPT\":\n",
" result = stream_gpt(prompt,company_name,url)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(prompt,company_name,url)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
]
},
{
"cell_type": "markdown",
"id": "c5f4f97b-c9d0-4d4c-8b02-e6209ba2549c",
"metadata": {},
"source": [
"#### Putting it all together : Gradio UI"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "33162303-9b49-46fe-a8e0-0d01be45685b",
"metadata": {},
"outputs": [],
"source": [
"force_dark_mode = \"\"\"\n",
"function refresh() {\n",
" const url = new URL(window.location);\n",
" if (url.searchParams.get('__theme') !== 'dark') {\n",
" url.searchParams.set('__theme', 'dark');\n",
" window.location.href = url.href;\n",
" }\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 41,
"id": "47ab9a41-cecd-4c21-bd68-4a15966b80c4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7877\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7877/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 41,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.vellum.ai/'}, {'type': 'careers page', 'url': 'https://www.vellum.ai/careers'}]}\n",
"Processing https://www.vellum.ai/...\n",
"Processing https://www.vellum.ai/careers...\n"
]
}
],
"source": [
"gr.Interface(\n",
" fn=create_brochure,\n",
" inputs=[\n",
" gr.Textbox(label='Company Name:'),\n",
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
" gr.Dropdown(['GPT','Claude'],label='Select Model:'),\n",
" gr.Dropdown(['Formal','Casual','Persuasive','Informative','Conversational'],label='Select Tone:'),\n",
" gr.Dropdown(['Businesses','General Public','Students','Investors','Customers'],label='Select Target Audience:'),\n",
" ],\n",
" outputs = [gr.Markdown(label='Brochure')],\n",
" flagging_mode = 'never',\n",
" js = force_dark_mode\n",
").launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2b923b09-6738-450a-9035-2c8d1bb9cae6",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,567 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "c79dc33e-1a3b-4601-a8f2-219b7a9b6d88",
"metadata": {},
"source": [
"# Company Brochure - Relevant Links and Custom Tone\n",
"\n",
"Using GPT to generate a company brochure with the relevant links functionality and the ability to choose the desired tone."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "e32f4aa7-6fc4-4dc9-8058-58e6a7f329c5",
"metadata": {},
"outputs": [],
"source": [
"# Imports\n",
"\n",
"import os\n",
"import requests\n",
"import json\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d1d65a21-bbba-44ff-a2be-85bf2055a493",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key set and good to go.\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(\"OpenAI API Key set and good to go.\")\n",
"else:\n",
" print(\"OpenAI API Key not set. :(\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "c5db63fe-5da8-496e-9b37-139598d600a7",
"metadata": {},
"outputs": [],
"source": [
"# Setting up the OpenAI object\n",
"\n",
"openai = OpenAI()\n",
"gpt_model = 'gpt-4o-mini'"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "535da52f-b280-48ce-aa8b-f82f9f9805d9",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
" \"\"\"\n",
" A utility class to represent a Website that we have scraped, now with links\n",
" \"\"\"\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" if soup.body:\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" else:\n",
" self.text = \"\"\n",
" links = [link.get('href') for link in soup.find_all('a')]\n",
" self.links = [link for link in links if link]\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "8d5757c4-95f4-4038-8ed4-8c81da5112b0",
"metadata": {},
"outputs": [],
"source": [
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n",
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n",
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n",
"link_system_prompt += \"You should respond in JSON as in this example:\"\n",
"link_system_prompt += \"\"\"\n",
"{\n",
" \"links\": [\n",
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
" {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n",
" ]\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "d5fd31ac-7c81-454a-a1dc-4c58bd3db246",
"metadata": {},
"outputs": [],
"source": [
"def get_links_user_prompt(website):\n",
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n",
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n",
"Do not include Terms of Service, Privacy, email links.\\n\"\n",
" user_prompt += \"Links (some might be relative links):\\n\"\n",
" user_prompt += \"\\n\".join(website.links)\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e8b67492-1ba4-4aad-a588-39116128fa18",
"metadata": {},
"outputs": [],
"source": [
"def gpt_get_links(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model= gpt_model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": link_system_prompt},\n",
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n",
" ],\n",
" response_format={\"type\": \"json_object\"}\n",
" )\n",
" result = response.choices[0].message.content\n",
" return json.loads(result)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "e8846e7a-ace2-487e-a0a8-fccb389f2eb9",
"metadata": {},
"outputs": [],
"source": [
"# This function provides uses the get_contents method in the Website Class as well as GPT to find relevant links.\n",
"\n",
"def get_all_details(url):\n",
" result = \"Landing page:\\n\"\n",
" result += Website(url).get_contents()\n",
" links = gpt_get_links(url)\n",
" print(\"Found links:\", links)\n",
" for link in links[\"links\"]:\n",
" result += f\"\\n\\n{link['type']}\\n\"\n",
" result += Website(link[\"url\"]).get_contents()\n",
" return result"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "18b42319-8342-4b9c-bef6-8b72acf92ab3",
"metadata": {},
"outputs": [],
"source": [
"def get_brochure_user_prompt(company_name, url):\n",
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n",
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; \\\n",
" use this information to build a short brochure of the company in markdown.\\n\"\n",
" \n",
" user_prompt += get_all_details(url)\n",
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "d7748293-a616-41de-93cb-89f65cc5c73d",
"metadata": {},
"outputs": [],
"source": [
"# Let's create a call that streams back results\n",
"# If you'd like a refresher on Generators (the \"yield\" keyword),\n",
"# Please take a look at the Intermediate Python notebook in week1 folder.\n",
"\n",
"def stream_brochure(company_name, url, tone):\n",
"\n",
" system_message = f\"You are an assistant that analyzes the content of several relevant pages from a company website \\\n",
" and creates a short brochure about the company for prospective customers, investors, and recruits. \\\n",
" Include details of company culture, customers and careers/jobs if you have the information. \\\n",
" Respond in markdown, and use a {tone.lower()} tone throughout the brochure.\"\n",
"\n",
" \n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n",
" ]\n",
" stream = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "15222832-06e0-4452-a8e1-59b9b1755488",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7860\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.snowflake.com/about/events/'}, {'type': 'company page', 'url': 'https://www.snowflake.com/en/company/overview/about-snowflake/'}, {'type': 'company leadership page', 'url': 'https://www.snowflake.com/en/company/overview/leadership-and-board/'}, {'type': 'careers page', 'url': 'https://careers.snowflake.com/us/en'}, {'type': 'company ESG page', 'url': 'https://www.snowflake.com/en/company/overview/esg/'}, {'type': 'company ventures page', 'url': 'https://www.snowflake.com/en/company/overview/snowflake-ventures/'}, {'type': 'end data disparity page', 'url': 'https://www.snowflake.com/en/company/overview/end-data-disparity/'}]}\n",
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.snowflake.com/about/events/'}, {'type': 'about page', 'url': 'https://www.snowflake.com/company/overview/about-snowflake/'}, {'type': 'leadership page', 'url': 'https://www.snowflake.com/company/overview/leadership-and-board/'}, {'type': 'careers page', 'url': 'https://careers.snowflake.com/us/en'}, {'type': 'investor relations', 'url': 'https://investors.snowflake.com/overview/default.aspx'}, {'type': 'ESG page', 'url': 'https://www.snowflake.com/company/overview/esg/'}, {'type': 'snowflake ventures', 'url': 'https://www.snowflake.com/company/overview/snowflake-ventures/'}, {'type': 'end data disparity', 'url': 'https://www.snowflake.com/company/overview/end-data-disparity/'}]}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Traceback (most recent call last):\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 464, in _make_request\n",
" self._validate_conn(conn)\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1093, in _validate_conn\n",
" conn.connect()\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 741, in connect\n",
" sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 920, in _ssl_wrap_socket_and_match_hostname\n",
" ssl_sock = ssl_wrap_socket(\n",
" ^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 460, in ssl_wrap_socket\n",
" ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 504, in _ssl_wrap_socket_impl\n",
" return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 517, in wrap_socket\n",
" return self.sslsocket_class._create(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1104, in _create\n",
" self.do_handshake()\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1382, in do_handshake\n",
" self._sslobj.do_handshake()\n",
"ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 787, in urlopen\n",
" response = self._make_request(\n",
" ^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 488, in _make_request\n",
" raise new_e\n",
"urllib3.exceptions.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n",
"\n",
"The above exception was the direct cause of the following exception:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 667, in send\n",
" resp = conn.urlopen(\n",
" ^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 841, in urlopen\n",
" retries = retries.increment(\n",
" ^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/retry.py\", line 519, in increment\n",
" raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n",
" response = await route_utils.call_process_api(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n",
" output = await app.get_blocks().process_api(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2103, in process_api\n",
" result = await self.call_function(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1662, in call_function\n",
" prediction = await utils.async_iteration(iterator)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 735, in async_iteration\n",
" return await anext(iterator)\n",
" ^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 729, in __anext__\n",
" return await anyio.to_thread.run_sync(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n",
" return await get_async_backend().run_sync_in_worker_thread(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2461, in run_sync_in_worker_thread\n",
" return await future\n",
" ^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 962, in run\n",
" result = context.run(func, *args)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 712, in run_sync_iterator_async\n",
" return next(iterator)\n",
" ^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 873, in gen_wrapper\n",
" response = next(iterator)\n",
" ^^^^^^^^^^^^^^\n",
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/601932735.py\", line 15, in stream_brochure\n",
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/3764629295.py\", line 6, in get_brochure_user_prompt\n",
" user_prompt += get_all_details(url)\n",
" ^^^^^^^^^^^^^^^^^^^^\n",
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/2913862724.py\", line 5, in get_all_details\n",
" result += Website(url).get_contents()\n",
" ^^^^^^^^^^^^\n",
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/1579423502.py\", line 15, in __init__\n",
" response = requests.get(url, headers=headers)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 73, in get\n",
" return request(\"get\", url, params=params, **kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 59, in request\n",
" return session.request(method=method, url=url, **kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n",
" resp = self.send(prep, **send_kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n",
" r = adapter.send(request, **kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 698, in send\n",
" raise SSLError(e, request=request)\n",
"requests.exceptions.SSLError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n",
"Traceback (most recent call last):\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 464, in _make_request\n",
" self._validate_conn(conn)\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 1093, in _validate_conn\n",
" conn.connect()\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 741, in connect\n",
" sock_and_verified = _ssl_wrap_socket_and_match_hostname(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connection.py\", line 920, in _ssl_wrap_socket_and_match_hostname\n",
" ssl_sock = ssl_wrap_socket(\n",
" ^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 460, in ssl_wrap_socket\n",
" ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls, server_hostname)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/ssl_.py\", line 504, in _ssl_wrap_socket_impl\n",
" return ssl_context.wrap_socket(sock, server_hostname=server_hostname)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 517, in wrap_socket\n",
" return self.sslsocket_class._create(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1104, in _create\n",
" self.do_handshake()\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/ssl.py\", line 1382, in do_handshake\n",
" self._sslobj.do_handshake()\n",
"ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 787, in urlopen\n",
" response = self._make_request(\n",
" ^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 488, in _make_request\n",
" raise new_e\n",
"urllib3.exceptions.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)\n",
"\n",
"The above exception was the direct cause of the following exception:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 667, in send\n",
" resp = conn.urlopen(\n",
" ^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/connectionpool.py\", line 841, in urlopen\n",
" retries = retries.increment(\n",
" ^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/urllib3/util/retry.py\", line 519, in increment\n",
" raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
"urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/queueing.py\", line 625, in process_events\n",
" response = await route_utils.call_process_api(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/route_utils.py\", line 322, in call_process_api\n",
" output = await app.get_blocks().process_api(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 2103, in process_api\n",
" result = await self.call_function(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/blocks.py\", line 1662, in call_function\n",
" prediction = await utils.async_iteration(iterator)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 735, in async_iteration\n",
" return await anext(iterator)\n",
" ^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 729, in __anext__\n",
" return await anyio.to_thread.run_sync(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/to_thread.py\", line 56, in run_sync\n",
" return await get_async_backend().run_sync_in_worker_thread(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 2461, in run_sync_in_worker_thread\n",
" return await future\n",
" ^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/anyio/_backends/_asyncio.py\", line 962, in run\n",
" result = context.run(func, *args)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 712, in run_sync_iterator_async\n",
" return next(iterator)\n",
" ^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/gradio/utils.py\", line 873, in gen_wrapper\n",
" response = next(iterator)\n",
" ^^^^^^^^^^^^^^\n",
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/601932735.py\", line 15, in stream_brochure\n",
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/3764629295.py\", line 6, in get_brochure_user_prompt\n",
" user_prompt += get_all_details(url)\n",
" ^^^^^^^^^^^^^^^^^^^^\n",
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/2913862724.py\", line 5, in get_all_details\n",
" result += Website(url).get_contents()\n",
" ^^^^^^^^^^^^\n",
" File \"/var/folders/yc/m81x80gn66j4fbm15pk5gmfr0000gn/T/ipykernel_39727/1579423502.py\", line 15, in __init__\n",
" response = requests.get(url, headers=headers)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 73, in get\n",
" return request(\"get\", url, params=params, **kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/api.py\", line 59, in request\n",
" return session.request(method=method, url=url, **kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 589, in request\n",
" resp = self.send(prep, **send_kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/sessions.py\", line 703, in send\n",
" r = adapter.send(request, **kwargs)\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"/opt/anaconda3/envs/llms/lib/python3.11/site-packages/requests/adapters.py\", line 698, in send\n",
" raise SSLError(e, request=request)\n",
"requests.exceptions.SSLError: HTTPSConnectionPool(host='petrofac.com', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1006)')))\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found links: {'links': [{'type': 'about page', 'url': 'https://www.petrofac.com/who-we-are/'}, {'type': 'what we do page', 'url': 'https://www.petrofac.com/who-we-are/what-we-do/'}, {'type': 'careers page', 'url': 'https://www.petrofac.com/careers/'}, {'type': 'our structure page', 'url': 'https://www.petrofac.com/who-we-are/our-structure/'}, {'type': 'energy transition page', 'url': 'https://www.petrofac.com/who-we-are/energy-transition/'}, {'type': 'sustainability and ESG page', 'url': 'https://www.petrofac.com/who-we-are/sustainability-and-esg/'}, {'type': 'investor relations page', 'url': 'https://www.petrofac.com/investors/'}, {'type': 'services page', 'url': 'https://www.petrofac.com/services/'}, {'type': 'where we operate page', 'url': 'https://www.petrofac.com/where-we-operate/'}]}\n"
]
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_brochure,\n",
" inputs=[\n",
" gr.Textbox(label=\"Company name:\"),\n",
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
" gr.Textbox(label=\"Tone:\")],\n",
" outputs=[gr.Markdown(label=\"Brochure:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70d6398c-21dd-44f8-ba7d-0204414dffa0",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,251 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "5d799d2a-6e58-4a83-b17a-dbbc40efdc39",
"metadata": {},
"source": [
"## Project - Course Booking AI Asssistant\n",
"AI Customer Support Bot that \n",
"- Returns Prices\n",
"- Books Tickets\n",
"- Adds Information to Text File"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b1ad9acd-a702-48a3-8ff5-d536bcac8030",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "74adab0c-99b3-46cd-a79f-320a3e74138a",
"metadata": {},
"outputs": [],
"source": [
"# Initialization\n",
"\n",
"load_dotenv(override=True)\n",
"\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"MODEL = \"gpt-4o-mini\"\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8d3240a4-99c1-4c07-acaa-ecbb69ffd2e4",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant for an Online Course Platform called StudyAI. \"\n",
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so.\"\n",
"system_message += \"If you are given a partial name, for example 'discrete' instead of 'discrete structures' \\\n",
"ask the user if they meant to say 'discrete structures', and then display the price. The user may also use \\\n",
"acronyms like 'PF' instead of programming fundamentals or 'OOP' to mean 'Object oriented programming'. \\\n",
"Clarify what the user means and then proceed as directed.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9a1b8d5f-f893-477b-8396-ff7d697eb0c3",
"metadata": {},
"outputs": [],
"source": [
"course_prices = {\"programming fundamentals\": \"$19\", \"discrete structures\": \"$39\", \"operating systems\": \"$24\", \"object oriented programming\": \"$39\"}\n",
"\n",
"def get_course_price(course):\n",
" print(f\"Tool get_course_price called for {course}\")\n",
" course = course.lower()\n",
" return course_prices.get(course, \"Unknown\")\n",
"\n",
"def enroll_in_course(course):\n",
" print(f'Tool enroll_in_course_ called for {course}')\n",
" course_price = get_course_price(course)\n",
" if course_price != 'Unknown':\n",
" with open('enrolled_courses.txt', 'a') as file: \n",
" file.write(course + \"\\n\")\n",
" return 'Successfully enrolled in course'\n",
" else:\n",
" return 'Enrollment failed, no such course available'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "330d2b94-a8c5-4967-ace7-15d2cd52d7ae",
"metadata": {},
"outputs": [],
"source": [
"get_course_price('graph theory')\n",
"get_course_price('discrete structures')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5bb65830-fab8-45a7-bf43-7e52186915a0",
"metadata": {},
"outputs": [],
"source": [
"price_function = {\n",
" \"name\": \"get_course_price\",\n",
" \"description\": \"Get the price of a course. Call this whenever you need to know the course price, for example when a customer asks 'How much is a ticket for this course?'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"course\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The course that the customer wants to purchase\",\n",
" },\n",
" },\n",
" \"required\": [\"course\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}\n",
"\n",
"enroll_function = {\n",
" \"name\": \"enroll_in_course\",\n",
" \"description\":\"Get the success status of course enrollment. Call whenever a customer wants to enroll in a course\\\n",
" for example, if they say 'I want to purchase this course' or 'I want to enroll in this course'\",\n",
" \"parameters\":{\n",
" \"type\":\"object\",\n",
" \"properties\":{\n",
" \"course\":{\n",
" \"type\":\"string\",\n",
" \"description\": \"The course that the customer wants to purchase\",\n",
" },\n",
" },\n",
" \"required\": [\"course\"],\n",
" \"additionalProperties\": False\n",
" } \n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "08af86b9-3aaa-4b6b-bf7c-ee668ba1cbfe",
"metadata": {},
"outputs": [],
"source": [
"tools = [\n",
" {\"type\":\"function\",\"function\":price_function},\n",
" {\"type\":\"function\",\"function\":enroll_function}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "482efc34-ff1f-4146-9570-58b4d59c3b2f",
"metadata": {},
"outputs": [],
"source": [
"def chat(message,history):\n",
" messages = [{\"role\":\"system\",\"content\":system_message}] + history + [{\"role\":\"user\",\"content\":message}]\n",
" response = openai.chat.completions.create(model=MODEL,messages=messages,tools=tools)\n",
"\n",
" if response.choices[0].finish_reason == \"tool_calls\":\n",
" message = response.choices[0].message\n",
" messages.append(message)\n",
" for tool_call in message.tool_calls:\n",
" messages.append(handle_tool_call(tool_call))\n",
" response = openai.chat.completions.create(model=MODEL,messages=messages)\n",
"\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f725b4fb-d477-4d7d-80b5-5d70e1b25a86",
"metadata": {},
"outputs": [],
"source": [
"# We have to write that function handle_tool_call:\n",
"\n",
"def handle_tool_call(tool_call):\n",
" function = tool_call.function.name\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" match function:\n",
" case 'get_course_price':\n",
" course = arguments.get('course')\n",
" price = get_course_price(course)\n",
" return {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"course\": course,\"price\": price}),\n",
" \"tool_call_id\": tool_call.id\n",
" }\n",
" case 'enroll_in_course':\n",
" course = arguments.get('course')\n",
" status = enroll_in_course(course)\n",
" return {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"course\": course, \"status\": status}),\n",
" \"tool_call_id\": tool_call.id\n",
" }\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c446272a-9ce1-4ffd-9bc8-483d782810b4",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat,type=\"messages\").launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1fe714a3-f793-4c3b-b5aa-6c81b82aea1b",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,218 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "e063b35e-5598-4084-b255-89956bfedaac",
"metadata": {},
"source": [
"### Models an interaction between LLama 3.2 and Claude 3.5 Haiku"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f534359-cdb4-4441-aa66-d6700fa4d6a5",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"import anthropic\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3bdff240-9118-4061-9369-585c4d4ce0a7",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ff110b3f-3986-4fd8-a0b1-fd4b51133a8d",
"metadata": {},
"outputs": [],
"source": [
"# Connect to Anthropic\n",
"\n",
"claude = anthropic.Anthropic()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e6e596c6-6307-49c1-a29f-5c4e88f8d34d",
"metadata": {},
"outputs": [],
"source": [
"# Download the llama3.2:1b model for local execution.\n",
"!ollama pull llama3.2:1b"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "633b6892-6d04-40cb-8b61-196fc754b00c",
"metadata": {},
"outputs": [],
"source": [
"# Define models\n",
"CLAUDE_MODEL = \"claude-3-5-haiku-latest\"\n",
"LLAMA_MODEL = \"llama3.2:1b\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a699a809-e3d3-4392-94bd-e2f80a5aec60",
"metadata": {},
"outputs": [],
"source": [
"claude_system = \"You are a chatbot designed as a study tutor for undergraduate students. \\\n",
"You explain information and key-technical terms related to the subject in a succint yet \\\n",
"comprehensive manner. You may use tables, formatting and other visuals to help create \\\n",
"'cheat-sheets' of sorts.\"\n",
"\n",
"llama_system = \"You are a chatbot designed to ask questions about different topics related to \\\n",
"computer vision. You are meant to simulate a student, not teacher. Act as if you have no \\\n",
"prior knowledge\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bdb049d8-130b-42dd-aaab-29c09e3e2347",
"metadata": {},
"outputs": [],
"source": [
"llama_messages = [\"Hi\"]\n",
"claude_messages = [\"Hello\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c158f31c-5e8b-48a4-9980-6b280393800b",
"metadata": {},
"outputs": [],
"source": [
"def call_llama():\n",
" messages = [{\"role\": \"system\", \"content\": llama_system}]\n",
" for llama_msg, claude_msg in zip(llama_messages, claude_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": llama_msg})\n",
" messages.append({\"role\": \"user\", \"content\": claude_msg})\n",
" response = ollama.chat(model=LLAMA_MODEL, messages=messages)\n",
" return response['message']['content']\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d803c5a2-df54-427a-9b80-8e9dd04ee36d",
"metadata": {},
"outputs": [],
"source": [
"def call_claude():\n",
" messages = []\n",
" for llama_msg, claude_msg in zip(llama_messages, claude_messages):\n",
" messages.append({\"role\": \"user\", \"content\": llama_msg})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_msg})\n",
" messages.append({\"role\": \"user\", \"content\": llama_messages[-1]})\n",
" message = claude.messages.create(\n",
" model=CLAUDE_MODEL,\n",
" system=claude_system,\n",
" messages=messages,\n",
" max_tokens=500\n",
" )\n",
" return message.content[0].text"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a23794bb-0f36-4f91-aa28-24b876203a36",
"metadata": {},
"outputs": [],
"source": [
"call_llama()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7f5c3e2f-a1bb-403b-b6b5-944a10d93305",
"metadata": {},
"outputs": [],
"source": [
"call_claude()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d6eb874-1c8f-47d8-a9f1-2e0fe197ae83",
"metadata": {},
"outputs": [],
"source": [
"llama_messages = [\"Hi\"]\n",
"claude_messages = [\"Hello there, what would you like to learn today?\"]\n",
"\n",
"print(f'Ollama:\\n{ollama_messages[0]}')\n",
"print(f'Claude:\\n{claude_messages[0]}')\n",
"\n",
"for _ in range(5):\n",
" llama_next = call_llama()\n",
" print(f'Llama 3.2:\\n{llama_next}')\n",
" llama_messages.append(llama_next)\n",
" \n",
" claude_next = call_claude()\n",
" print(f'Claude 3.5 Haiku:\\n{claude_next}')\n",
" claude_messages.append(claude_next)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1e651ad-85c8-45c7-ba83-f7c689080d6b",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,242 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927",
"metadata": {},
"source": [
"# Welcome to Week 2!\n",
"\n",
"## Frontier Model APIs\n",
"\n",
"In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n",
"\n",
"Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36",
"metadata": {},
"outputs": [],
"source": [
"# import for google\n",
"# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n",
"# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n",
"\n",
"import google.generativeai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic\n",
"\n",
"openai = OpenAI()\n",
"\n",
"claude = anthropic.Anthropic()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "425ed580-808d-429b-85b0-6cba50ca1d0c",
"metadata": {},
"outputs": [],
"source": [
"# This is the set up code for Gemini\n",
"# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n",
"google.generativeai.configure()"
]
},
{
"cell_type": "markdown",
"id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f",
"metadata": {},
"source": [
"## An adversarial conversation between Chatbots.\n",
"\n",
"### What if two chatbots get into a self-referential conversation that goes on a long time? In my first test, \n",
"### they eventually forgot the topic and ended up repeating polite nothings to each other. In another test,\n",
"### they converged on a result and ended by exchanging nearly identical statements.\n",
"\n",
"### Warning: Think before you dial up the number of iterations too high. Being a student, I don't know at what \n",
"### point the chat becomes too costly or what models can do this without becoming overloaded. Maybe Ed can advise if he sees this.\n",
"\n",
"## Two chatbots edit an essay about cars. One keeps trying to make it longer every time; the other keeps making it \n",
"## shorter.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b",
"metadata": {},
"outputs": [],
"source": [
"\n",
"# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n",
"# We're using cheap versions of models so the costs will be minimal\n",
"\n",
"gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n",
"\n",
"\n",
"gpt_system = \"This is a description of a car; \\\n",
"rephrase the description while adding one detail. Don't include comments that aren't part of the car description.\"\n",
"\n",
"claude_system = \"This is a description of a car; \\\n",
"repeat the description in slightly shorter form. You may remove some details if desired. Don't include comments that aren't part of the car description. Maximum reply length 125 words.\"\n",
"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n",
"claude_messages = [\"Hi\"] \n",
"\n",
"\n",
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude in zip(gpt_messages, claude_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content\n",
"\n",
"reply = call_gpt()\n",
"print('\\nGPT: ', reply)\n",
"\n",
"def call_claude():\n",
" messages = []\n",
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
" message = claude.messages.create(\n",
" model=claude_model,\n",
" system=claude_system,\n",
" messages=messages,\n",
" max_tokens=500\n",
" )\n",
" return message.content[0].text\n",
"\n",
"\n",
"reply = call_claude()\n",
"print('\\nGPT: ', reply)\n",
"\n",
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n"
]
},
{
"cell_type": "markdown",
"id": "9fbce0da",
"metadata": {},
"source": [
"### Here's the iterative loop. Important change: Unlike the original example, we don't repeat the entire conversation to make the input longer and longer.\n",
"### Instead, we use pop() to remove the oldest messages."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1f41d586",
"metadata": {},
"outputs": [],
"source": [
"\n",
"for i in range(35):\n",
" gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
" if len(gpt_messages) > 6:\n",
" gpt_messages.pop(0)\n",
" gpt_messages.pop(0)\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" claude_next = call_claude()\n",
" print(f\"Claude:\\n{claude_next}\\n\")\n",
" if len(claude_messages) > 6:\n",
" claude_messages.pop(0)\n",
" claude_messages.pop(0)\n",
" claude_messages.append(claude_next)\n",
"\n",
"print('Done!')\n",
"\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -174,7 +174,7 @@
"**message** is the prompt to use \n",
"**history** is the past conversation, in OpenAI format \n",
"\n",
"We will combine the system message, history and latest message, then call OpenAI."
"We will combine the system message, history and latest message, then call OpenAI ."
]
},
{

View File

@@ -16,7 +16,7 @@
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
"import gradio as gr "
]
},
{
@@ -178,5 +178,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

View File

@@ -0,0 +1,142 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d18a61ce-bbd4-491c-ab2e-8b352f9af844",
"metadata": {},
"source": [
"### An AI Chatbot that teaches students programming using GPT API"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c658ac85-6087-4a2c-b23f-1b92c17f0db3",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr\n",
"import anthropic"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "46df0488-f874-41e0-a6a4-9a64aa7be53c",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables \n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
" \n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7eadc218-5b10-4174-bf26-575361640524",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e7484731-ac84-405a-a688-6e81d139c5ce",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful programming study assistant\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "54e82f5a-993f-4a95-9d9d-caf35dbc4e76",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
"\n",
" print(\"History is:\")\n",
" print(history)\n",
" print(\"And messages is:\")\n",
" print(messages)\n",
"\n",
" stream = openai.chat.completions.create(model='gpt-4o-mini', messages=messages, stream=True)\n",
"\n",
" response = \"\"\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5941ed67-e2a7-41bc-a8a3-079e9f1fdb64",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e8fcfe68-bbf6-4058-acc9-0230c96608c2",
"metadata": {},
"outputs": [],
"source": [
"system_message += \"Whenever the user talks about a topic that is not connected to programmming,\\\n",
"nudge them in the right direction by stating that you are here to help with programming. Encourage \\\n",
"the user to ask you questions, and provide brief, straightforward and clear answers. Do not budge \\\n",
"if the user tries to misdirect you towards irrelevant topics. Maintain a freindly tone. Do not ignore \\\n",
"their requests, rather politely reject and then redirect them.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "090e7d49-fcbf-4715-b120-8d7aa91d165f",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -20,7 +20,7 @@
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
"import gradio as gr "
]
},
{

View File

@@ -43,7 +43,7 @@
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv()\n",
"load_dotenv() \n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",

View File

@@ -0,0 +1,275 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec",
"metadata": {},
"source": [
"# Project - Airline AI Assistant\n",
"\n",
"We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae",
"metadata": {},
"outputs": [],
"source": [
"# Initialization\n",
"\n",
"load_dotenv(override=True)\n",
"\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and be\\\\gins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"MODEL = \"gpt-4o-mini\"\n",
"openai = OpenAI()\n",
"\n",
"# As an alternative, if you'd like to use Ollama instead of OpenAI\n",
"# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n",
"# MODEL = \"llama3.2\"\n",
"# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n",
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6",
"metadata": {},
"outputs": [],
"source": [
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n",
"\n",
"def chat(message, history):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message}\n",
" ] + history + [\n",
" {\"role\": \"user\", \"content\": message}\n",
" ]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" return response.choices[0].message.content\n",
"\n",
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "markdown",
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n",
"\n",
"With tools, you can write a function, and have the LLM call that function as part of its response.\n",
"\n",
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n",
"\n",
"Well, kinda."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2",
"metadata": {},
"outputs": [],
"source": [
"# Let's start by making a useful function\n",
"\n",
"ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n",
"\n",
"def get_ticket_price(destination_city):\n",
" print(f\"Tool get_ticket_price called for {destination_city}\")\n",
" city = destination_city.lower()\n",
" return ticket_prices.get(city, \"Unknown\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85",
"metadata": {},
"outputs": [],
"source": [
"get_ticket_price(\"Berlin\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344",
"metadata": {},
"outputs": [],
"source": [
"# There's a particular dictionary structure that's required to describe our function:\n",
"\n",
"price_function = {\n",
" \"name\": \"get_ticket_price\",\n",
" \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"destination_city\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city that the customer wants to travel to\",\n",
" },\n",
" },\n",
" \"required\": [\"destination_city\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c",
"metadata": {},
"outputs": [],
"source": [
"# And this is included in a list of tools:\n",
"\n",
"tools = [{\"type\": \"function\", \"function\": price_function}]"
]
},
{
"cell_type": "markdown",
"id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340",
"metadata": {},
"source": [
"## Getting OpenAI to use our Tool\n",
"\n",
"There's some fiddly stuff to allow OpenAI \"to call our tool\"\n",
"\n",
"What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n",
"\n",
"Here's how the new chat function looks:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ad32321f-083a-4462-a6d6-7bb3b0f5d10a",
"metadata": {},
"outputs": [],
"source": [
"# We have to write that function handle_tool_call:\n",
"\n",
"def handle_tool_call(message): \n",
" responses = []\n",
" for tool_call in message.tool_calls: \n",
" if tool_call.function.name == \"get_ticket_price\":\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" city = arguments.get('destination_city')\n",
" price = get_ticket_price(city)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n",
" \"tool_call_id\": tool_call.id\n",
" }\n",
" responses.append(response)\n",
" return responses"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message}\n",
" ] + history + [\n",
" {\"role\": \"user\", \"content\": message}\n",
" ]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
"\n",
" # Tool usage\n",
" if response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" responses = handle_tool_call(message)\n",
" messages.append(message) # That's the assistant asking us to run a tool\n",
" for response in responses:\n",
" messages.append(response) # That's the result of the tool calls\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" \n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8dc18486-4d6b-4cbf-a6b8-16d08d7c4f54",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -244,7 +244,7 @@
" },\n",
" \"required\": [\"destination_city\", \"price\"],\n",
" \"additionalProperties\": False\n",
" }\n",
" } \n",
"}"
]
},

View File

@@ -0,0 +1,701 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ec4f6b32-46e9-429a-a3cd-521ff5418493",
"metadata": {},
"source": [
"# Occasio - Event Management Assistant"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"import time\n",
"import pprint\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"import google.generativeai as genai\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv()\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b501508-0082-47be-9903-52ff1c243486",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic and Google and assign a model for each\n",
"\n",
"openai = OpenAI()\n",
"OPENAI_MODEL = \"gpt-4o-mini\"\n",
"\n",
"claude = anthropic.Anthropic()\n",
"ANTHROPIC_MODEL = \"claude-3-haiku-20240307\"\n",
"\n",
"genai.configure()\n",
"GOOGLE_MODEL = \"gemini-2.0-flash\"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are called \\\"EventAI\\\", a virtual assistant for an Elementary school called Eagle Elementary School. You can help users by giving \\\n",
"them details of upcoming shcool events like event name, description, location etc. \"\n",
"#system_message += \"Introduce yourself with a warm welcome message on your first response ONLY.\"\n",
"system_message += \"Give short, courteous answers, no more than 2 sentences. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so. Do not make up your own event details information\"\n",
"system_message += \"You might be asked to list the questions asked by the user so far. In that situation, based on the conversation history provided to you, \\\n",
"list the questions and respond\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2c27c4ba-8ed5-492f-add1-02ce9c81d34c",
"metadata": {},
"outputs": [],
"source": [
"# Some imports for handling images\n",
"\n",
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "773a9f11-557e-43c9-ad50-56cbec3a0f8f",
"metadata": {},
"outputs": [],
"source": [
"def artist(event_text):\n",
" image_response = openai.images.generate(\n",
" model=\"dall-e-3\",\n",
" prompt=f\"An image representing an {event_text}, showing typical activities that happen for that {event_text}, in a vibrant pop-art style that elementary school kids will like\",\n",
" size=\"1024x1024\",\n",
" n=1,\n",
" response_format=\"b64_json\",\n",
" )\n",
" image_base64 = image_response.data[0].b64_json\n",
" image_data = base64.b64decode(image_base64)\n",
" return Image.open(BytesIO(image_data))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d104b96a-02ca-4159-82fe-88e0452aa479",
"metadata": {},
"outputs": [],
"source": [
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image\n",
"from IPython.display import Audio, display\n",
"\n",
"def talker(message):\n",
" response = openai.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\",\n",
" input=message)\n",
"\n",
" audio_stream = BytesIO(response.content)\n",
" output_filename = \"output_audio.mp3\"\n",
" with open(output_filename, \"wb\") as f:\n",
" f.write(audio_stream.read())\n",
"\n",
" # Play the generated audio\n",
" display(Audio(output_filename, autoplay=True))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f0428a74-4daa-4b0d-b25a-219a35f39f55",
"metadata": {},
"outputs": [],
"source": [
"school_events = [\n",
" {\n",
" \"event_id\": \"pta\",\n",
" \"name\": \"Parent Teachers Meeting (PTA/PTM)\",\n",
" \"description\": \"Parent teachers meeting (PTA/PTM) to discuss students' progress.\",\n",
" \"date_time\": \"Apr 1st, 2025 11 AM\",\n",
" \"location\" : \"Glove Annexure Hall\"\n",
" },\n",
" {\n",
" \"event_id\": \"read aloud\",\n",
" \"name\": \"Read Aloud to your class/Reading to your class\",\n",
" \"description\": \"Kids can bring their favorite book and read it to their class.\",\n",
" \"date_time\": \"Apr 15th, 2025 1 PM\",\n",
" \"location\": \"Classroom\"\n",
" },\n",
" {\n",
" \"event_id\": \"100 days of school\",\n",
" \"name\": \"Celebrating 100 days of school. Dress up time for kids\",\n",
" \"description\": \"Kids can dress up as old people and celebrate the milestone with their teachers.\",\n",
" \"date_time\": \"May 15th, 2025 11 AM\",\n",
" \"location\": \"Classroom\"\n",
" },\n",
" {\n",
" \"event_id\": \"Book fair\",\n",
" \"name\": \"Scholastic book fair\",\n",
" \"description\": \"Kids can purchase their favorite scholastic books.\",\n",
" \"date_time\": \"Jun 22nd, 2025 10:30 AM\",\n",
" \"location\": \"Library\"\n",
" },\n",
" {\n",
" \"event_id\": \"Halloween\",\n",
" \"name\": \"Halloween\",\n",
" \"description\": \"Kids can dress up as their favorite characters\",\n",
" \"date_time\": \"Oct 31st, 2025\",\n",
" \"location\": \"Classroom\"\n",
" },\n",
" {\n",
" \"event_id\": \"Movie Night\",\n",
" \"name\": \"Movie Night\",\n",
" \"description\": \"A popular and kids centric movie will be played. Kids and families are welcome.\",\n",
" \"date_time\": \"May 3rd, 2025\",\n",
" \"location\": \"Main auditorium\"\n",
" },\n",
" {\n",
" \"event_id\": \"Intruder Drill\",\n",
" \"name\": \"Intruder Drill\",\n",
" \"description\": \"State mandated monthly intruder drill to prepare staff and students with necessary safety skills in times of a crisis\",\n",
" \"date_time\": \"May 3rd, 2025\",\n",
" \"location\": \"Main auditorium\"\n",
" }\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b7027eec-e522-49c1-af59-56a82f9d3be8",
"metadata": {},
"outputs": [],
"source": [
"def get_event_details(query):\n",
" search_words = query.lower().split() \n",
" for event in school_events:\n",
" event_text = event['name'].lower() + ' ' + event['description'].lower()\n",
" if all(word in event_text for word in search_words):\n",
" return event\n",
" return None"
]
},
{
"cell_type": "markdown",
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n",
"\n",
"With tools, you can write a function, and have the LLM call that function as part of its response.\n",
"\n",
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n",
"\n",
"Well, kinda."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "68e96b54-b891-4e7b-a6bc-17693dc99970",
"metadata": {},
"outputs": [],
"source": [
"# for claude\n",
"tools_claude = [\n",
" {\n",
" \"name\": \"get_event_details\",\n",
" \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n",
"'When is the pta meeting scheduled?\",\n",
" \"input_schema\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"event_text\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The event keyword that the user wants to getails on\"\n",
" }\n",
" },\n",
" \"required\": [\"event_text\"]\n",
" }\n",
"}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "636188d2-7e7a-48a0-9f04-f3813c7dc323",
"metadata": {},
"outputs": [],
"source": [
"# For GPT\n",
"events_function_gpt = {\n",
" \"name\": \"get_event_details\",\n",
" \"description\": \"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks \\\n",
" 'When is the pta meeting scheduled?\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"event_text\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The event keyword that the user wants to getails on\",\n",
" },\n",
" },\n",
" \"required\": [\"event_text\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "605684f8-ed02-4cc9-8a16-012533b601cb",
"metadata": {},
"outputs": [],
"source": [
"# And this is included in a list of tools:\n",
"tools_gpt = [{\"type\": \"function\", \"function\": events_function_gpt}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4ac5a34c-a630-449a-9d46-669daace799c",
"metadata": {},
"outputs": [],
"source": [
"#Gemini function declaration structure\n",
"gemini_event_details = [{\n",
" \"name\": \"get_event_details\",\n",
" \"description\":\"Get the details of a particular upcoming event in Eagle Elementary School. Call this whenever you need to know the event details, for example when a user asks 'When is the pta meeting scheduled?\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"event_text\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The event keyword that the user wants to details on\",\n",
" },\n",
" },\n",
" \"required\": [\"event_text\"],\n",
" },\n",
" },\n",
" {\n",
" \"name\": \"get_event_test\",\n",
" \"description\":\"This is a test function to validate if the function call picks up the right function if there are multiple functions.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"event_text\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The event keyword that the user wants to details on\",\n",
" },\n",
" },\n",
" \"required\": [\"event_text\"],\n",
" },\n",
" }\n",
"]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6331113-63b0-4712-94bb-f363422a8441",
"metadata": {},
"outputs": [],
"source": [
"def chat_claude(history):\n",
" print(f\"\\nhistory is {history}\\n\")\n",
" #Claude doesnt take any other key value pair other than role and content. Hence filtering only those key value pairs\n",
" history_claude = list({\"role\": msg[\"role\"], \"content\": msg[\"content\"]} for msg in history if \"role\" in msg and \"content\" in msg)\n",
" #history is [{'role': 'user', 'metadata': None, 'content': 'when is pta', 'options': None}]\n",
" #messages = history\n",
" message = claude.messages.create(\n",
" model=ANTHROPIC_MODEL,\n",
" max_tokens=1000,\n",
" temperature=0.7,\n",
" system=system_message,\n",
" messages=history_claude,\n",
" tools=tools_claude\n",
" )\n",
" image = None\n",
" print(f\"Claude's message is \\n {pprint.pprint(message)}\\n\")\n",
" try: \n",
" if message.stop_reason == \"tool_use\":\n",
" tool_use = next(block for block in message.content if block.type == \"tool_use\")\n",
" event_text = tool_use.input.get('event_text')\n",
" image = artist(event_text)\n",
" tool_result = handle_tool_call(event_text)\n",
" #tool_result = handle_tool_call(tool_use, \"Claude\")\n",
" \n",
" print(f\"Tool Result: {tool_result}\")\n",
" \n",
" response = claude.messages.stream(\n",
" model=ANTHROPIC_MODEL,\n",
" max_tokens=4096,\n",
" system=system_message,\n",
" messages=[\n",
" {\n",
" \"role\": \"user\", \n",
" \"content\": [\n",
" {\n",
" \"type\": \"text\",\n",
" \"text\": history[-1].get('content')\n",
" }\n",
" ]\n",
" },\n",
" {\n",
" \"role\": \"assistant\", \n",
" \"content\": message.content\n",
" },\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": [\n",
" {\n",
" \"type\": \"tool_result\",\n",
" \"tool_use_id\": tool_use.id,\n",
" \"content\": tool_result,\n",
" }\n",
" ],\n",
" },\n",
" ],\n",
" tools=tools_claude\n",
" )\n",
" result = \"\"\n",
" with response as stream:\n",
" for text in stream.text_stream:\n",
" result += text or \"\"\n",
" yield result, None\n",
" talker(result)\n",
" #image= artist(tool_input.get('event_text'))\n",
" yield result, image\n",
" else:\n",
" response = next((block.text for block in message.content if hasattr(block, \"text\")), None,)\n",
" chunk_size=30\n",
" for i in range(0, len(response), chunk_size):\n",
" yield response[:i + chunk_size], None\n",
" time.sleep(0.05) #Simulate streaming delay\n",
" talker(response)\n",
" #image= artist(tool_input.get('event_text'))\n",
" yield response, None\n",
" except Exception as e:\n",
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n",
" print(e)\n",
" yield error_message, None\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9915ae05-5d52-4fdc-a3ea-18f050a79bd3",
"metadata": {},
"outputs": [],
"source": [
"def chat_gpt(history):\n",
" print(f\"\\nhistory is {history}\\n\")\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n",
" response = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages, tools=tools_gpt)\n",
" image = None\n",
" try:\n",
" if response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" tool = message.tool_calls[0]\n",
" arguments = json.loads(tool.function.arguments)\n",
" event_text = arguments.get('event_text')\n",
" image = artist(event_text)\n",
" event_json = handle_tool_call(event_text)\n",
" tool_output = {\n",
" \"role\": \"tool\",\n",
" \"content\": event_json,\n",
" \"tool_call_id\": tool.id\n",
" }\n",
" messages.append(message)\n",
" messages.append(tool_output)\n",
" stream = openai.chat.completions.create(\n",
" model=OPENAI_MODEL,\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result, None\n",
" talker(result)\n",
" yield result, image\n",
" else: \n",
" reply = response.choices[0].message.content\n",
" chunk_size=30\n",
" for i in range(0, len(reply), chunk_size):\n",
" yield reply[:i + chunk_size], None\n",
" time.sleep(0.05)\n",
" talker(reply)\n",
" #image= artist(\"No such event\")\n",
" yield reply, None\n",
" except Exception as e:\n",
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n",
" print(e)\n",
" yield error_message, None"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "30fa3de9-5b55-4bb6-93ea-a13fc09d38c1",
"metadata": {},
"outputs": [],
"source": [
"def chat_gemini(history):\n",
" print(f\"\\nhistroy is {history}\\n\")\n",
" history_gemini = [{'role': m['role'], 'parts': [{'text': m['content']}]} if 'content' in m #if content exists, change it to parts format\n",
" else {'role': m['role'], 'parts': m['parts']} if 'parts' in m #else if parts exists, just copy it as it is\n",
" else {'role': m['role']} for m in history] #else neither content nor parts exists, copy only the role ignoring all other keys like metadata, options etc\n",
" \n",
" print(f\"\\nhistroy_gemini is {history_gemini}\\n\")\n",
" model = genai.GenerativeModel(\n",
" model_name=GOOGLE_MODEL,\n",
" system_instruction=system_message\n",
" )\n",
" response = model.generate_content(\n",
" contents = history_gemini,\n",
" #contents = contents,\n",
" tools = [{\n",
" 'function_declarations': gemini_event_details,\n",
" }],\n",
" )\n",
" #print(f\"response is {response}\")\n",
"\n",
" image = None\n",
" try:\n",
" # Check if the model wants to use a tool\n",
" if response.candidates[0].content.parts[0].function_call:\n",
" function_call = response.candidates[0].content.parts[0].function_call\n",
" event_text = function_call.args.get(\"event_text\")\n",
" image = artist(event_text)\n",
" tool_result = handle_tool_call(event_text)\n",
" \n",
" print(f\"\\ntool_result is {tool_result}\\n\")\n",
" stream = model.generate_content(\n",
" \"Based on this information `\" + tool_result + \"`, extract the details of the event and provide the event details to the user\",\n",
" stream=True \n",
" )\n",
" #print(f\"\\nSecond response is {stream}\\n\")\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.candidates[0].content.parts[0].text or \"\"\n",
" #print(f\"REsult is \\n{result}\\n\")\n",
" yield result, None\n",
" talker(result) \n",
" yield result, image\n",
" #print(f\"REsult is \\n{result}\\n\")\n",
" else: \n",
" reply = response.text\n",
" chunk_size=30\n",
" for i in range(0, len(reply), chunk_size):\n",
" yield reply[:i + chunk_size], None\n",
" time.sleep(0.05)\n",
" talker(reply)\n",
" #image= artist(\"No such event\")\n",
" yield reply, None\n",
" \n",
" except Exception as e:\n",
" error_message = \"Apologies, my server is acting weird. Please try again later.\"\n",
" print(e)\n",
" yield error_message, None\n",
" \n",
"\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "570fffb2-a054-4217-89ae-8b6f4630e383",
"metadata": {},
"outputs": [],
"source": [
"def call_and_process_model_responses(fn_name, chatbot):#, response, image):\n",
" response = \"\"\n",
" image = None\n",
" for response, image in fn_name(chatbot):\n",
" if chatbot and chatbot[-1][\"role\"] == \"assistant\": \n",
" chatbot[-1][\"content\"] = response # Update the last message\n",
" else:\n",
" chatbot.append({\"role\": \"assistant\", \"content\": response}) # First assistant message\n",
" #print(chatbot)\n",
" yield chatbot, image # Stream updated history to UI\n",
" \n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "32a6ccce-44fa-49a7-bd1a-08c70002771c",
"metadata": {},
"outputs": [],
"source": [
"def handle_tool_call(event_text):\n",
" print(f\"event text is {event_text}\")\n",
" event_found = get_event_details(event_text)\n",
" print(f\"event_found is {event_found}\")\n",
" \n",
" if event_found:\n",
" response = json.dumps({\"name\": event_found['name'],\"description\": event_found['description'], \"when\": event_found['date_time'], \"where\": event_found['location']})\n",
" else: \n",
" response = json.dumps({\"event\": f\"Sorry, there is no schedule currently for {event_text}\"})\n",
" return response \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4eaaaf9e-64b9-4d0b-9931-388cee8ea21d",
"metadata": {},
"outputs": [],
"source": [
"def process_chosen_model(chatbot, model):\n",
" if model == 'GPT':\n",
" for chatbot, image in call_and_process_model_responses(chat_gpt, chatbot):\n",
" yield chatbot, image\n",
" elif model == 'Claude': \n",
" for chatbot, image in call_and_process_model_responses(chat_claude, chatbot):\n",
" yield chatbot, image\n",
" else:\n",
" #for Gemini, the content is to be replaced with parts.\n",
" for chatbot, image in call_and_process_model_responses(chat_gemini, chatbot):\n",
" yield chatbot, image\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "627f6d49-5376-4f1d-8071-f2e96fd6e78b",
"metadata": {},
"outputs": [],
"source": [
"# More involved Gradio code as we're not using the preset Chat interface!\n",
"# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n",
"\n",
"with gr.Blocks(css=\"\"\"\n",
" select.gr-box { \n",
" appearance: auto !important; \n",
" -webkit-appearance: auto !important; \n",
" }\n",
"\"\"\") as ui:\n",
" with gr.Row():\n",
" gr.HTML(\"<h1 style='text-align: center; color: #4CAF50;'>Occasio! An Event Management Assistant</h1>\") # Added title\n",
" with gr.Row():\n",
" # with gr.Column(scale=3): #Acts as a spacer on the left\n",
" # pass\n",
" \n",
" with gr.Column(scale=0):\n",
" model = gr.Dropdown(\n",
" choices=[\"GPT\", \"Claude\", \"Gemini\"], \n",
" label=\"Select model\", \n",
" value=\"GPT\",\n",
" interactive=True,\n",
" container=True # Applying the CSS class\n",
" )\n",
" # with gr.Column(scale=-54, min_width=200):\n",
" # gr.HTML(\"<h1 style='text-align: center; color: #4CAF50;'>Occasio</h1>\") # Added title\n",
" # pass #Acts as a spacer on the right\n",
" with gr.Row():\n",
" chatbot = gr.Chatbot(height=500, type=\"messages\")\n",
" image_output = gr.Image(height=500)\n",
" with gr.Row():\n",
" entry = gr.Textbox(label=\"Ask me \\\"when is pta meeting\\\", \\\"how about book fair\\\" and more... \")\n",
" with gr.Row():\n",
" clear = gr.Button(\"Clear\", min_width=150)\n",
" #message=None\n",
"\n",
" def do_entry(message, history):\n",
" history += [{\"role\":\"user\", \"content\":message}]\n",
" return \"\", history\n",
" \n",
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n",
" process_chosen_model, inputs=[chatbot, model], outputs=[chatbot, image_output]\n",
" )\n",
" clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n",
"\n",
"ui.launch(inbrowser=True)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,227 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "6aa646e3-7a57-461a-b69a-073179effa18",
"metadata": {},
"source": [
"## Additional End of week Exercise - week 2\n",
"\n",
"This includes \n",
"- Gradio UI\n",
"- use of the system prompt to add expertise\n",
"- audio input so you can talk to it\n",
"- respond with audio"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "72f3dca4-b052-4e9f-90c8-f42e667c165c",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"from IPython.display import Markdown, display, update_display\n",
"import gradio as gr\n",
"import json"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "23570b9f-8c7a-4cc7-b809-3505334b60a7",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"openai = OpenAI()\n",
"MODEL = 'gpt-4o-mini'"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d379178a-8672-4e6f-a380-ad8d85f5c64e",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"\"\"You are a personal study tutor, designed to provide clear, yet brief and succint answers to \n",
"students that ask you questions. The topics are related to data science, computer science \n",
"and technology in general, so you are allowed to use a moderate level of jargon. Explain in \n",
"simple terminology, so a student can easily understand. \n",
"\n",
"You may also be asked about prices for special courses.In this case, respond that you have no such\n",
"data available. \n",
"\n",
"\"\"\"\n",
"# Use a tabular format where possible \n",
"# for ease of information flow "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4745d439-c66e-4e5c-b5d4-9f0ba97aefdc",
"metadata": {},
"outputs": [],
"source": [
"def chat(history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
"\n",
" reply = response.choices[0].message.content\n",
" history += [{\"role\":\"assistant\", \"content\":reply}]\n",
"\n",
" # Comment out or delete the next line if you'd rather skip Audio for now..\n",
" talker(reply)\n",
" \n",
" return history"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a8b31799-df86-4151-98ea-66ef50fe767e",
"metadata": {},
"outputs": [],
"source": [
"!pip install openai-whisper"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9f5b8e51-2833-44be-a4f4-63c4683f2b6e",
"metadata": {},
"outputs": [],
"source": [
"import whisper\n",
"\n",
"def transcribe_audio(audio):\n",
" if audio is None:\n",
" return \"No audio received.\"\n",
" \n",
" model = whisper.load_model(\"base\") # You can use \"tiny\", \"small\", etc.\n",
" result = model.transcribe(audio)\n",
" \n",
" return result[\"text\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e55f8e43-2da1-4f2a-bcd4-3fffa830db48",
"metadata": {},
"outputs": [],
"source": [
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image\n",
"from IPython.display import Audio, display\n",
"\n",
"def talker(message):\n",
" response = openai.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\",\n",
" input=message)\n",
"\n",
" audio_stream = BytesIO(response.content)\n",
" output_filename = \"output_audio.mp3\"\n",
" with open(output_filename, \"wb\") as f:\n",
" f.write(audio_stream.read())\n",
"\n",
" # Play the generated audio\n",
" display(Audio(output_filename, autoplay=True))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cb3107a7-bfdc-4255-825f-bfabcf458c0c",
"metadata": {},
"outputs": [],
"source": [
"# More involved Gradio code as we're not using the preset Chat interface!\n",
"# Passing in inbrowser=True in the last line will cause a Gradio window to pop up immediately.\n",
"\n",
"with gr.Blocks() as ui:\n",
" with gr.Row():\n",
" chatbot = gr.Chatbot(height=400,type=\"messages\")\n",
" with gr.Row():\n",
" entry = gr.Textbox(label=\"Chat with our StudyAI Assistant:\")\n",
" # with gr.Row():\n",
" # entry = gr.Textbox(label=\"Speak or Type:\", placeholder=\"Speak your question...\", interactive=True, microphone=True)\n",
" with gr.Row():\n",
" audio_input = gr.Audio(type=\"filepath\", label=\"Speak your question\")\n",
" with gr.Row():\n",
" clear = gr.Button(\"Clear\")\n",
"\n",
" def do_entry(message, history):\n",
" history += [{\"role\":\"user\", \"content\":message}]\n",
" return \"\", history\n",
"\n",
" def handle_audio(audio, history):\n",
" text = transcribe_audio(audio)\n",
" history += [{\"role\": \"user\", \"content\": text}]\n",
" return \"\", history\n",
"\n",
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot]).then(\n",
" chat, inputs=[chatbot], outputs=[chatbot]\n",
" )\n",
"\n",
" audio_input.change(handle_audio, inputs=[audio_input, chatbot], outputs=[entry, chatbot]).then(\n",
" chat, inputs=[chatbot], outputs=[chatbot]\n",
" )\n",
" \n",
" clear.click(lambda: [], inputs=None, outputs=chatbot, queue=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "73e0a776-d43e-4b04-a37f-a27d3714cf47",
"metadata": {},
"outputs": [],
"source": [
"ui.launch(inbrowser=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bcd45503-d314-4b28-a41c-4dbb87059188",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,129 @@
import gradio as gr
import requests
import json
from json_handlers import SettingsHandler, LanguagesHandler
from ollama_utils import get_ollama_response
class GradioUI:
def __init__(self, models: list, settings: SettingsHandler, languages: LanguagesHandler):
self.models = models
self.settings = settings
self.languages = languages
self.langs = self.languages.get_supported_languages()
def _translate_callback(self, text, model, translte_from, translte_to):
model_options = self.settings.get_advanced_settings()
full_response = ""
chunck_response = get_ollama_response(model, text, translte_from, translte_to, model_options)
for chunck in chunck_response:
full_response += chunck
yield full_response
def _temp_setting_callback(self, temp_dropdown_val):
self.settings.update_advanced_settings_param("temperature", temp_dropdown_val)
def _top_k_setting_callback(self, top_k_dropdown_val):
self.settings.update_advanced_settings_param("top_k", top_k_dropdown_val)
def _top_p_setting_callback(self, top_p_dropdown_val):
self.settings.update_advanced_settings_param("top_p", top_p_dropdown_val)
def _reset_to_default_callback(self):
temperature = 0.0
top_k = 40.0
top_p = 0.9
default_settings = {
"temperature": temperature,
"top_k": top_k,
"top_p": top_p
}
self.settings.update_advanced_settings(default_settings)
return temperature, top_k, top_p
def build_and_launch(self):
with gr.Blocks() as gui:
gr.Markdown("# LLM Translator")
with gr.Tab("Translate"):
with gr.Row():
model_dropdown = gr.Dropdown(
label="Model",
info="Choose LLM Model",
choices=self.models
)
with gr.Group():
with gr.Row():
translte_from = gr.Dropdown(
value=self.langs[0],
show_label=False,
choices=self.langs,
interactive=True
)
translte_to = gr.Dropdown(
value=self.langs[1],
show_label=False,
choices=self.langs,
interactive=True
)
with gr.Row():
translate_input = gr.Textbox(label="Your Input", lines=15, max_lines=15)
translate_output = gr.Textbox(label="Translated", lines=15, max_lines=15)
btn = gr.Button("Translate", variant="primary")
btn.click(
fn=self._translate_callback,
inputs=[translate_input, model_dropdown, translte_from, translte_to],
outputs=translate_output
)
with gr.Tab("Advanced Settings"):
temp_dropdown = gr.Number(
value=self.settings.get_advanced_setting_param("temperature"),
label="Temperature",
info="This parameter control how creative the model is\n0 means no creativity\n1 means very creative",
minimum=0,
maximum=1,
step=0.1,
interactive=True
)
gr.Markdown() # Used only for spacing
top_k_dropdown = gr.Number(
value=self.settings.get_advanced_setting_param("top_k"),
label="Top K",
info="A higher value (e.g. 100) will give more diverse answers\nwhile a lower value (e.g. 10) will be more conservative.",
minimum=1,
maximum=200,
step=1,
interactive=True
)
gr.Markdown() # Used only for spacing
top_p_dropdown = gr.Number(
value=self.settings.get_advanced_setting_param("top_p"),
label="Top P",
info="A higher value (e.g., 0.95) will lead to more diverse answers\nwhile a lower value (e.g., 0.5) will be more conservative",
minimum=0.1,
maximum=1.0,
step=0.1,
interactive=True
)
gr.Markdown() # Used only for spacing
reset_btn = gr.Button("Reset to Default")
reset_btn.click(
fn=self._reset_to_default_callback,
outputs=[temp_dropdown, top_k_dropdown, top_p_dropdown]
)
temp_dropdown.change(self._temp_setting_callback, temp_dropdown)
top_k_dropdown.change(self._top_k_setting_callback, top_k_dropdown)
top_p_dropdown.change(self._top_p_setting_callback, top_p_dropdown)
gui.launch()

View File

@@ -0,0 +1,60 @@
import json
class SettingsHandler:
def __init__(self, json_filename):
self.json_filename = json_filename
self.advanced_settings = self.load_current_settings()
def load_current_settings(self) -> dict:
with open(self.json_filename, "r") as file:
settings_dict = json.load(file)
advanced_settings = settings_dict["Advanced Settings"]
return advanced_settings
def update_advanced_settings(self, updated_advanced_settings: dict):
new_dict = {
"Advanced Settings": updated_advanced_settings
}
print(new_dict)
with open(self.json_filename, "w") as file:
json.dump(new_dict, file)
self.advanced_settings = updated_advanced_settings
def update_advanced_settings_param(self, key: str, new_val):
if self.get_advanced_setting_param(key) is not None:
update_advanced_settings_dict = self.advanced_settings
update_advanced_settings_dict[key] = new_val
self.update_advanced_settings(update_advanced_settings_dict)
def get_advanced_settings(self):
return self.advanced_settings
def get_advanced_setting_param(self, key: str):
return self.advanced_settings.get(key)
class LanguagesHandler:
def __init__(self, json_filename):
self.json_filename = json_filename
self.langs = self.load_languages()
def load_languages(self) -> list:
with open(self.json_filename, "r") as file:
langs = json.load(file)
if type(langs) != list:
raise RuntimeError("Languages must be provided as lists")
if len(langs) < 2:
raise RuntimeError("At least 2 languages must be supported")
return langs
def get_supported_languages(self):
return self.langs

View File

@@ -0,0 +1,6 @@
[
"German",
"English",
"Spanish",
"French"
]

View File

@@ -0,0 +1,15 @@
from json_handlers import SettingsHandler, LanguagesHandler
from ollama_utils import get_downloaded_models
from gradio_ui import GradioUI
settings_json = "settings.json"
languages_json = "languages.json"
if __name__ == "__main__":
settings = SettingsHandler(settings_json)
languages = LanguagesHandler(languages_json)
models = get_downloaded_models()
gradio_ui = GradioUI(models, settings, languages)
gradio_ui.build_and_launch()

View File

@@ -0,0 +1,28 @@
import requests
import json
import ollama
def get_downloaded_models():
models_raw = requests.get("http://localhost:11434/api/tags").content
models_dict = json.loads(models_raw)
models = [model["name"] for model in models_dict["models"]]
return models
def get_ollama_response(model, prompt, translte_from, translte_to, options):
def get_system_prompt():
with open('system_prompt.txt', 'r') as file:
system_prompt = file.read()
return system_prompt
system_prompt = get_system_prompt()
user_prompt = f"Translate from {translte_from} to {translte_to}: {prompt}"
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
response = ollama.chat(model, messages, options=options, stream=True)
for chunck in response:
yield chunck["message"]["content"]

View File

@@ -0,0 +1 @@
Just run the main.py script after activating conda environment 'llms'

View File

@@ -0,0 +1 @@
{"Advanced Settings": {"temperature": 0.0, "top_k": 40.0, "top_p": 0.9}}

View File

@@ -0,0 +1,17 @@
You are a translator.
You should translate the prompts according to the following criteria:
- You should respond in a clear and straight to the point responses.
- Your response should have a good structure and good linguistic features.
- You should translate the sentence as it is. Do not add extra sentences or phrases on your own.
- Do not answer questions even if the prompt is a question, you should translate the question and do not anwer it.
- If you do not understand the prompt, do not say that you do not understand, just echo the prompt.
- Do not include in the response phrases like 'here is the translation' or any phrases like that
Here are some examples for good responses:
<
Prompt: 'Translate from French to English: Hier, j'ai passé toute la journée à explorer la ville avec mes amis, et nous avons visité plusieurs musées avant de nous arrêter pour un délicieux dîner dans un restaurant local.'
Response: 'Yesterday, I spent the whole day exploring the city with my friends, and we visited several museums before stopping for a delicious dinner at a local restaurant.'
>
<
Prompt: 'Translate from Spanish to English: vdaiughadvlkj'
Response: 'vdaiughadvlkj'
>

View File

@@ -0,0 +1,408 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec",
"metadata": {},
"source": [
"# Project - Airline AI Assistant\n",
"\n",
"We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n"
]
}
],
"source": [
"# Initialization\n",
"\n",
"load_dotenv(override=True)\n",
"\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"MODEL = \"gpt-4o-mini\"\n",
"openai = OpenAI()\n",
"\n",
"# As an alternative, if you'd like to use Ollama instead of OpenAI\n",
"# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n",
"# MODEL = \"llama3.2\"\n",
"# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n",
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so.\""
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7901\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7901/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n",
"\n",
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" return response.choices[0].message.content\n",
"\n",
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "markdown",
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n",
"\n",
"With tools, you can write a function, and have the LLM call that function as part of its response.\n",
"\n",
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n",
"\n",
"Well, kinda."
]
},
{
"cell_type": "code",
"execution_count": 85,
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2",
"metadata": {},
"outputs": [],
"source": [
"# Let's start by making a useful function\n",
"\n",
"ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n",
"\n",
"def get_ticket_price(destination_city):\n",
" print(f\"Tool get_ticket_price called for {destination_city}\")\n",
" city = destination_city.lower()\n",
" return ticket_prices.get(city, \"Unknown\")\n",
"\n",
"def get_destinations():\n",
" destinations=ticket_prices.keys()\n",
" cities=\", \".join(destinations) \n",
" return cities"
]
},
{
"cell_type": "code",
"execution_count": 86,
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tool get_ticket_price called for Berlin\n"
]
},
{
"data": {
"text/plain": [
"'london, paris, tokyo, berlin'"
]
},
"execution_count": 86,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"get_ticket_price(\"Berlin\")\n",
"get_destinations()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344",
"metadata": {},
"outputs": [],
"source": [
"# There's a particular dictionary structure that's required to describe our function:\n",
"\n",
"price_function = {\n",
" \"name\": \"get_ticket_price\",\n",
" \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"destination_city\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city that the customer wants to travel to\",\n",
" },\n",
" },\n",
" \"required\": [\"destination_city\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "5842b7f1-e357-494c-9bd4-3aa9f9fd4332",
"metadata": {},
"outputs": [],
"source": [
"# There's a particular dictionary structure that's required to describe our function:\n",
"\n",
"destination_function = {\n",
" \"name\": \"get_destinations\",\n",
" \"description\": \"Get the destinations we serve. Call this whenever you need to know the destinations FlightAI flies to, for example when a customer asks 'Where do you fly to'\",\n",
" \"parameters\": {\n",
" },\n",
" \"additionalProperties\": False\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c",
"metadata": {},
"outputs": [],
"source": [
"# And this is included in a list of tools:\n",
"\n",
"tools = [{\"type\": \"function\", \"function\": price_function},\n",
" {\"type\": \"function\", \"function\": destination_function}]"
]
},
{
"cell_type": "markdown",
"id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340",
"metadata": {},
"source": [
"## Getting OpenAI to use our Tool\n",
"\n",
"There's some fiddly stuff to allow OpenAI \"to call our tool\"\n",
"\n",
"What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n",
"\n",
"Here's how the new chat function looks:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5db52df0-cb48-4017-bae3-0014f5ca3a56",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
"\n",
" if response.choices[0].finish_reason == \"tool_calls\":\n",
" message = response.choices[0].message\n",
" tool_name = message.tool_calls[0].function.name\n",
"\n",
" if tool_name == \"get_ticket_price\":\n",
" response, city = handle_tool_call(message)\n",
" elif tool_name == \"get_destinations\":\n",
" response = handle_tool_call_destination(message)\n",
"\n",
" messages.extend([message, response])\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
"\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 91,
"id": "b0992986-ea09-4912-a076-8e5603ee631f",
"metadata": {},
"outputs": [],
"source": [
"# We have to write that function handle_tool_call for price:\n",
"\n",
"def handle_tool_call_price(message):\n",
" tool_call = message.tool_calls[0]\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" city = arguments.get('destination_city')\n",
" price = get_ticket_price(city)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n",
" \"tool_call_id\": tool_call.id\n",
" }\n",
" return response, city"
]
},
{
"cell_type": "code",
"execution_count": 92,
"id": "4bbffdb0-5ab7-414e-8d2b-3d9367e64526",
"metadata": {},
"outputs": [],
"source": [
"# We have to write that function handle_tool_call for destinations:\n",
"\n",
"def handle_tool_call_destination(message):\n",
" tool_call = message.tool_calls[0]\n",
" destinations = get_destinations()\n",
" print(destinations)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": destinations,\n",
" \"tool_call_id\": tool_call.id\n",
" }\n",
" return response"
]
},
{
"cell_type": "code",
"execution_count": 93,
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7928\n",
"\n",
"To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7928/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 93,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tool get_ticket_price called for Paris\n",
"Tool get_ticket_price called for Timbuktu\n",
"london, paris, tokyo, berlin\n"
]
}
],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "243c156d-86c3-4d0a-8119-d0a532daa5cc",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,167 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"Import libraries as needed & keep your gemini api key ready"
],
"metadata": {
"id": "2UAcHYzT6ikw"
}
},
{
"cell_type": "code",
"source": [
"#!pip install gradio"
],
"metadata": {
"id": "XW0IY4xK6JZ1"
},
"execution_count": 14,
"outputs": []
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"id": "dwoPNMMP4ZSh"
},
"outputs": [],
"source": [
"from google import genai\n",
"from google.genai import types\n",
"from google.colab import userdata\n",
"\n"
]
},
{
"cell_type": "code",
"source": [
"def get_trip_itinerary(budget: int) -> str:\n",
" \"\"\"\n",
" Returns a trip itinerary based on the given budget.\n",
" \"\"\"\n",
" itinerary_dict: Dict[int, str] = {\n",
" 500: \"Paris: 3-day budget trip covering Eiffel Tower, Louvre, and Seine River Cruise.\",\n",
" 1000: \"Tokyo: 5-day adventure covering Shibuya, Akihabara, Mount Fuji day trip.\",\n",
" 2000: \"New York: 7-day luxury stay covering Times Square, Broadway show, and helicopter tour.\",\n",
" 3000: \"Dubai: 7-day ultra-luxury trip with Burj Khalifa VIP tour, desert safari, and yacht cruise.\",\n",
" }\n",
"\n",
" return itinerary_dict.get(budget, \"No itinerary found for this budget. Try another amount!\")\n"
],
"metadata": {
"id": "cnYD07T24ueV"
},
"execution_count": 3,
"outputs": []
},
{
"cell_type": "code",
"source": [
"from google.genai import types\n",
"\n",
"config = types.GenerateContentConfig(tools=[get_trip_itinerary])\n",
"\n",
"from google import genai\n",
"\n",
"client = genai.Client(api_key=userdata.get('gemini_api'))\n",
"\n",
"response = client.models.generate_content(\n",
" model='gemini-2.0-flash',\n",
" config=config,\n",
" contents='Based on the user budget suggest trip itinerary'\n",
")\n"
],
"metadata": {
"id": "3WRUXvD45VFC"
},
"execution_count": 7,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import gradio as gr\n",
"\n",
"# Chat function using Gemini\n",
"chat = client.chats.create(model='gemini-2.0-flash', config=config)\n",
"\n",
"def chat_with_ai(user_input: str):\n",
" response = chat.send_message(user_input)\n",
" return response.text\n",
"\n",
"# Gradio Chat Interface\n",
"demo = gr.Interface(fn=chat_with_ai, inputs=\"text\", outputs=\"text\", title=\"AI Trip Planner\")\n",
"\n",
"demo.launch()\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 645
},
"id": "5fE700z96DHs",
"outputId": "3e35423c-8b2b-4868-8113-00d9d3a7a2ba"
},
"execution_count": 13,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Running Gradio in a Colab notebook requires sharing enabled. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
"\n",
"Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
"* Running on public URL: https://079a23f363400da700.gradio.live\n",
"\n",
"This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
]
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"<IPython.core.display.HTML object>"
],
"text/html": [
"<div><iframe src=\"https://079a23f363400da700.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
]
},
"metadata": {}
},
{
"output_type": "execute_result",
"data": {
"text/plain": []
},
"metadata": {},
"execution_count": 13
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "XC9zzq8X5u8m"
},
"execution_count": null,
"outputs": []
}
]
}

View File

@@ -485,7 +485,7 @@
"\n",
"print(reasoning_content)\n",
"print(content)\n",
"print(\"Number of words:\", len(reply.split(\" \")))"
"print(\"Number of words:\", len(content.split(\" \")))"
]
},
{