This commit is contained in:
milan0lazic
2025-09-24 21:49:26 +02:00
38 changed files with 13437 additions and 121 deletions

View File

@@ -0,0 +1,249 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "87c471b2-6a46-47f6-9da9-81d2652dd1b6",
"metadata": {},
"source": [
"# The code given by tutor results in an error when more than 1 city name is entered."
]
},
{
"cell_type": "markdown",
"id": "d4c3cdc4-3af9-4b9e-a5d2-80cee3b120be",
"metadata": {},
"source": [
"# This code aims to solve that by giving proper prices for all the given cities"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "292b5152-8932-4341-b2c4-850f16a89e5e",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "92d35c3d-cb2d-4ce8-a6da-3907ce3ce8b8",
"metadata": {},
"outputs": [],
"source": [
"# Initialization\n",
"\n",
"load_dotenv(override=True)\n",
"\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"MODEL = \"gpt-4o-mini\"\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "54e11038-795c-4451-ad3b-f797abb57728",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n",
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e06c982f-59f1-4e33-a1c1-2f56415efbde",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n",
"\n",
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" return response.choices[0].message.content\n",
"\n",
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d895e0ff-c47f-4b01-b987-4a236c452ba6",
"metadata": {},
"outputs": [],
"source": [
"# we'll try to impliment methods handle multi inputs in the query\n",
"ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n",
"\n",
"def get_ticket_price(destination_city):\n",
" print(f\"Tool get_ticket_price called for {destination_city}\")\n",
" #return_prices = []\n",
" #for city in destination_city:\n",
" city = destination_city.lower()\n",
" #return_prices.append(ticket_prices.get(city,\"unknown\"))\n",
" return ticket_prices.get(city,\"Unknown\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e2387fe7-a7ac-4192-ad46-9ec2a9bc49fa",
"metadata": {},
"outputs": [],
"source": [
"get_ticket_price(\"paris\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b63e229e-08c9-49b4-b7af-1883736f12cd",
"metadata": {},
"outputs": [],
"source": [
"# There's a particular dictionary structure that's required to describe our function:\n",
"\n",
"price_function = {\n",
" \"name\": \"get_ticket_price\",\n",
" \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"destination_city\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"List of cities that the customer wants to travel to\",\n",
" },\n",
" },\n",
" \"required\": [\"destination_city\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0162af66-2ea4-4221-93df-dd22f0ad92f7",
"metadata": {},
"outputs": [],
"source": [
"# And this is included in a list of tools:\n",
"\n",
"tools = [{\"type\": \"function\", \"function\": price_function}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b2a5434-63d0-4519-907e-bce21852d48f",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
" print(f\"response ----------------- \\n {response}\")\n",
" if response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" print(f\"message: -----------------\\n\",message)\n",
" response, city = handle_tool_call(message)\n",
" # print('response is --------', response)\n",
" # print('city is ----------',city)\n",
" messages.append(message)\n",
" messages.extend(response)\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" \n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d7dfa28c-95f8-4d25-8f3c-cd677bb4a4d1",
"metadata": {},
"outputs": [],
"source": [
"# We have to write that function handle_tool_call:\n",
"\n",
"def handle_tool_call(message):\n",
" responses = []\n",
" all_cities = []\n",
" for tool_call in message.tool_calls:\n",
" \n",
" arguments = json.loads(tool_call.function.arguments)\n",
" list_of_city = arguments.get('destination_city')\n",
" print(f'list of city is ======== {list_of_city}')\n",
" price = get_ticket_price(list_of_city)\n",
" print(f'price of ticket to {list_of_city} is {price}')\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"destination_city\": list_of_city,\"price\": price}),\n",
" \"tool_call_id\": tool_call.id\n",
" }\n",
" responses.append(response)\n",
" all_cities.append(list_of_city)\n",
" print(f'responses ====== {responses}')\n",
" print(f'cities ======= {all_cities}')\n",
" return responses,all_cities"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "15a4152d-6455-4116-bb63-6700eedf0626",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6b0fcfa-38b7-4063-933e-1c8177bf55f1",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -3,7 +3,7 @@ name: Run Python script
on:
push:
branches:
- figma_assistance
- Figma_AI_Assistant
jobs:
build:

View File

@@ -1,5 +1,5 @@
---
title: Figma_assistance
title: Figma_AI_Assistant
app_file: day_5_figma_assistance.py
sdk: gradio
sdk_version: 5.38.2

View File

@@ -292,7 +292,6 @@ custom_css = """
background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
padding: 15px 20px;
border-radius: 10px;
margin: 20px 0;
}
.quickstart-title {
@@ -315,7 +314,7 @@ with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(), css=c
gr.HTML(
"""
<div class="header-container">
<h1 class="header-title">🎨 Figma Onboarding Assistant</h1>
<h1 class="header-title">🎨 Figma AI Assistant</h1>
<p class="header-subtitle">Your AI-powered Figma learning companion</p>
</div>
@@ -351,26 +350,6 @@ with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(), css=c
"""
)
# Model selection dropdown
model_dropdown = gr.Dropdown(
choices=["OpenAI (GPT-3.5)", "Google Gemini (2.0 Flash)", "Claude (Sonnet 4)"],
value="OpenAI (GPT-3.5)",
label="Select AI Model",
info="Choose which AI model to use for responses"
)
with gr.Row():
msg = gr.Textbox(
placeholder="Type your Figma question here...",
container=False,
scale=4
)
submit_btn = gr.Button("Ask", scale=1, variant="primary")
clear_btn = gr.Button("Clear Chat", scale=1)
audio_btn = gr.Button("🔊 Play Audio", scale=1, variant="secondary")
clear_audio_btn = gr.Button("🔇 Clear Audio", scale=1, variant="secondary")
# Example questions
gr.HTML(
"""
@@ -380,7 +359,7 @@ with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(), css=c
</div>
"""
)
with gr.Row():
example_btns = [
gr.Button(
@@ -405,6 +384,24 @@ with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(), css=c
)
]
# Model selection dropdown
model_dropdown = gr.Dropdown(
choices=["OpenAI (GPT-3.5)", "Google Gemini (2.0 Flash)", "Claude (Sonnet 4)"],
value="OpenAI (GPT-3.5)",
label="Select AI Model",
info="Choose which AI model to use for responses"
)
with gr.Row():
msg = gr.Textbox(
placeholder="Type your Figma question here...",
container=False,
scale=4
)
submit_btn = gr.Button("Ask", scale=1, variant="primary")
clear_btn = gr.Button("Clear Chat", scale=1)
# Your components with simple styling
chatbot = gr.Chatbot(
type="messages",
@@ -412,6 +409,9 @@ with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(), css=c
placeholder="Ask me anything about Figma! For example: 'How do I create a component?' or 'What are frames in Figma?'",
elem_classes=["styled-chat"]
)
with gr.Row():
audio_btn = gr.Button("🔊 Text To Audio", scale=1, variant="primary")
clear_audio_btn = gr.Button("🔇 Clear Audio", scale=1, variant="secondary")
audio_output = gr.Audio(
label="Audio Response",

View File

@@ -0,0 +1,171 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "4076637d",
"metadata": {},
"source": [
"Here we have 3 bots - Gpt, Gemini and Claude. They are trying to find the top 3 stocks on NYSE which all the 3 bots identify, review and finalize when in consensus. The call to the model is kept with a range of 100 in for loop to mimic infinite loop as a breaking condition is set for when the 3 bots are in consensus for the top 3 stocks.\n",
"I would like to invite the reader to go through the code and share any feedbacks that could help me improve more on this. Any suggestions and feedbacks are most welcome. You could send your feedback at - srbmisc@gmail.com.\n",
"\n",
"Thank You"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e8c8a1f2",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "24b06e47",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d687412b",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6c5d430e",
"metadata": {},
"outputs": [],
"source": [
"sp_gpt = '''You are a bot Gpt. You are in a conversation with 2 other bots Gemini and Claude. All 3 of you are trying to figure out the top 3 stocks that have performed so far in NYSE. \n",
"At every turn, you propose a stock, share its performance and fundamentals, and ask for the others for their review on this stock. Similarly, when its their turn and they share their stock pick, you review their pick.\n",
"If you think thier pick was better, accept it and in your next turn share that same stock otherwise ask them to accept your pick. The goal is to come up with 3 stocks at the end that all 3 participants consider the best.\n",
"If there is a concensus on 3 top stocks and its your turn, just output like this CONSENSUS REACHED : stock 1, stock 2, stock 3\n",
"Prefix your response with Gpt: in bold and respond in Markdown'''\n",
"\n",
"sp_gemini = '''You are a bot Gemini. You are in a conversation with 2 other bots Gpt and Claude. All 3 of you are trying to figure out the top 3 stocks that have performed so far in NYSE. \n",
"At every turn, you propose a stock, share its performance and fundamentals, and ask for the others for their review on this stock. Similarly, when its their turn and they share their stock pick, you review their pick.\n",
"If you think thier pick was better, accept it and in your next turn share that same stock otherwise ask them to accept your pick. The goal is to come up with 3 stocks at the end that all 3 participants consider the best.\n",
"If there is a concensus on 3 top stocks and its your turn, just output like this CONSENSUS REACHED : stock 1, stock 2, stock 3\n",
"Prefix your response with Gemini: in bold and respond in Markdown'''\n",
"\n",
"sp_claude = '''You are a bot Claude. You are in a conversation with 2 other bots Gemini and Gpt. All 3 of you are trying to figure out the top 3 stocks that have performed so far in NYSE. \n",
"At every turn, you propose a stock, share its performance and fundamentals, and ask for the others for their review on this stock. Similarly, when its their turn and they share their stock pick, you review their pick.\n",
"If you think thier pick was better, accept it and in your next turn share that same stock otherwise ask them to accept your pick. The goal is to come up with 3 stocks at the end that all 3 participants consider the best.\n",
"If there is a concensus on 3 top stocks and its your turn, just output like this CONSENSUS REACHED : stock 1, stock 2, stock 3\n",
"Prefix your response with Claude: in bold and respond in Markdown'''\n",
"\n",
"talk = \"Gpt: Hello Gemini, Hello Claude. I want to discuss with you a good stock on the NYSE with you.<br> Gemini: Hello Gpt, Hello Claude. Sure go ahead, give us the best stock you think is there on the NYSE ?<br> Claude: Hello Gpt, Hello Gemini. Sure Gpt, lets discuss on some stocks. What stock do you have on mind ?<br>\"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8eb58eae",
"metadata": {},
"outputs": [],
"source": [
"def callBot(mode):\n",
"\n",
" global talk\n",
" talk = talk + \"<br><br><br>\"\n",
" messages = [{\"role\": \"system\", \"content\": sp_gpt if mode==0 else (sp_gemini if mode==1 else sp_claude)},\n",
" {\"role\":\"user\", \"content\":talk}]\n",
"\n",
" if mode==0:\n",
" model = 'gpt-4.1-mini'\n",
" client = OpenAI()\n",
" elif mode==1:\n",
" model = 'gemini-2.5-flash'\n",
" client = OpenAI(api_key=google_api_key, base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\")\n",
" else:\n",
" model = 'claude-3-5-haiku-latest'\n",
" client = OpenAI(api_key=anthropic_api_key, base_url=\"https://api.anthropic.com/v1/\")\n",
"\n",
" stream = client.chat.completions.create(\n",
" model=model,\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" for chunk in stream:\n",
" talk += (chunk.choices[0].delta.content or '')\n",
" talk = talk.replace(\"```\",\"\").replace(\"markdown\",\"\")\n",
" update_display(Markdown(talk), display_id=display_handle.display_id)\n",
"\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7a3e9ebc",
"metadata": {},
"outputs": [],
"source": [
"display_handle = display(Markdown(\"\"), display_id=True) \n",
"\n",
"for i in range(100):\n",
" callBot(i%3)\n",
" if 'CONSENSUS REACHED :' in talk or 'CONSENSUS REACHED:' in talk:\n",
" break\n",
"\n",
" "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,359 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ddfa9ae6-69fe-444a-b994-8c4c5970a7ec",
"metadata": {},
"source": [
"# Project - Airline AI Assistant\n",
"\n",
"We'll now bring together what we've learned to make an AI Customer Support assistant for an Airline"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b50bbe2-c0b1-49c3-9a5c-1ba7efa2bcb4",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "747e8786-9da8-4342-b6c9-f5f69c2e22ae",
"metadata": {},
"outputs": [],
"source": [
"# Initialization\n",
"\n",
"load_dotenv(override=True)\n",
"\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"MODEL = \"gpt-4o-mini\"\n",
"openai = OpenAI()\n",
"\n",
"# As an alternative, if you'd like to use Ollama instead of OpenAI\n",
"# Check that Ollama is running for you locally (see week1/day2 exercise) then uncomment these next 2 lines\n",
"# MODEL = \"llama3.2\"\n",
"# openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a521d84-d07c-49ab-a0df-d6451499ed97",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n",
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "61a2a15d-b559-4844-b377-6bd5cb4949f6",
"metadata": {},
"outputs": [],
"source": [
"# This function looks rather simpler than the one from my video, because we're taking advantage of the latest Gradio updates\n",
"\n",
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" return response.choices[0].message.content\n",
"\n",
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "markdown",
"id": "36bedabf-a0a7-4985-ad8e-07ed6a55a3a4",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"Tools are an incredibly powerful feature provided by the frontier LLMs.\n",
"\n",
"With tools, you can write a function, and have the LLM call that function as part of its response.\n",
"\n",
"Sounds almost spooky.. we're giving it the power to run code on our machine?\n",
"\n",
"Well, kinda."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0696acb1-0b05-4dc2-80d5-771be04f1fb2",
"metadata": {},
"outputs": [],
"source": [
"# Let's start by making a useful function\n",
"\n",
"ticket_prices = {\"london\": \"$799\", \"paris\": \"$899\", \"tokyo\": \"$1400\", \"berlin\": \"$499\"}\n",
"flight_schedules = {\"london\": [\"08:00\", \"15:00\"], \"paris\": [\"09:00\", \"16:00\"], \"tokyo\": [\"12:00\"], \"berlin\": [\"07:00\", \"13:00\"]}\n",
"\n",
"def get_ticket_price(destination_city):\n",
" print(f\"Tool get_ticket_price called for {destination_city}\")\n",
" city = destination_city.lower()\n",
" return ticket_prices.get(city, \"Unknown\")\n",
"\n",
"def get_flight_schedules(destination_city):\n",
" print(f\"Tool get_flight_hours called for {destination_city}\")\n",
" city = destination_city.lower()\n",
" return flight_schedules.get(city, \"Unknown\")\n",
"\n",
"def flight_confirmation_number(destination_city, date, hour):\n",
" import random\n",
" number = destination_city[:3].upper() + ''.join(random.choices('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', k=6))\n",
" print(f\"Tool flight_confirmation_number called for {destination_city} on {date} at {hour}, returning {number}\")\n",
" return number"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "80ca4e09-6287-4d3f-997d-fa6afbcf6c85",
"metadata": {},
"outputs": [],
"source": [
"print(get_ticket_price(\"London\"))\n",
"print(get_flight_schedules(\"London\"))\n",
"print(flight_confirmation_number(\"London\", \"2024-10-01\", \"15:00\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4afceded-7178-4c05-8fa6-9f2085e6a344",
"metadata": {},
"outputs": [],
"source": [
"# There's a particular dictionary structure that's required to describe our function:\n",
"\n",
"price_function = {\n",
" \"name\": \"get_ticket_price\",\n",
" \"description\": \"Get the price of a return ticket to the destination city. Call this whenever you need \\\n",
" to know the ticket price, for example when a customer asks 'How much is a ticket to this city'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"destination_city\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city that the customer wants to travel to\",\n",
" },\n",
" },\n",
" \"required\": [\"destination_city\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}\n",
"\n",
"schedule_function = {\n",
" \"name\": \"get_flight_schedules\",\n",
" \"description\": \"Get the daily flight schedules (departure times) to the destination city. Call this \\\n",
" whenever you need to know the flight times, for example when a customer asks 'What time \\\n",
" are the flights to this city?'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"destination_city\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city that the customer wants to travel to\",\n",
" },\n",
" },\n",
" \"required\": [\"destination_city\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}\n",
"\n",
"confirmation_function = {\n",
" \"name\": \"flight_confirmation_number\",\n",
" \"description\": \"Get a flight confirmation number for a booking. Call this whenever you need to \\\n",
" provide a confirmation number, after a customer has selected a destination city, a flight \\\n",
" date and a departure time, and also asked for the price. For example when a customer says \\\n",
" 'I'd like to book that flight'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"destination_city\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city that the customer wants to travel to\",\n",
" },\n",
" \"date\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The date of the flight, in YYYY-MM-DD format\",\n",
" },\n",
" \"hour\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The departure time of the flight, in HH:MM format\",\n",
" },\n",
" },\n",
" \"required\": [\"destination_city\", \"date\", \"hour\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bdca8679-935f-4e7f-97e6-e71a4d4f228c",
"metadata": {},
"outputs": [],
"source": [
"# And this is included in a list of tools:\n",
"\n",
"tools = [{\"type\": \"function\", \"function\": price_function},\n",
" {\"type\": \"function\", \"function\": schedule_function},\n",
" {\"type\": \"function\", \"function\": confirmation_function}]"
]
},
{
"cell_type": "markdown",
"id": "c3d3554f-b4e3-4ce7-af6f-68faa6dd2340",
"metadata": {},
"source": [
"## Getting OpenAI to use our Tool\n",
"\n",
"There's some fiddly stuff to allow OpenAI \"to call our tool\"\n",
"\n",
"What we actually do is give the LLM the opportunity to inform us that it wants us to run the tool.\n",
"\n",
"Here's how the new chat function looks:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ce9b0744-9c78-408d-b9df-9f6fd9ed78cf",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
"\n",
" if response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" response, city = handle_tool_call(message)\n",
" messages.append(message)\n",
" messages.append(response)\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages)\n",
" \n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b0992986-ea09-4912-a076-8e5603ee631f",
"metadata": {},
"outputs": [],
"source": [
"# We have to write that function handle_tool_call:\n",
"def handle_price_tool_call(tool_call_id, arguments):\n",
" city = arguments.get('destination_city')\n",
" price = get_ticket_price(city)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"destination_city\": city,\"price\": price}),\n",
" \"tool_call_id\": tool_call_id\n",
" }\n",
" return response, city\n",
" \n",
"def handle_schedule_tool_call(tool_call_id, arguments):\n",
" city = arguments.get('destination_city')\n",
" schedules = get_flight_schedules(city)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"destination_city\": city,\"schedules\": schedules}),\n",
" \"tool_call_id\": tool_call_id\n",
" }\n",
" return response, city\n",
"\n",
"def handle_confirmation_tool_call(tool_call_id, arguments):\n",
" city = arguments.get('destination_city')\n",
" date = arguments.get('date')\n",
" hour = arguments.get('hour')\n",
" confirmation = flight_confirmation_number(city, date, hour)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"destination_city\": city,\"date\": date,\"hour\": hour,\"confirmation_number\": confirmation}),\n",
" \"tool_call_id\": tool_call_id\n",
" }\n",
" return response, city\n",
"\n",
"def handle_tool_call(message):\n",
" print(\"Number of tool calls:\", len(message.tool_calls))\n",
" \n",
" tool_call = message.tool_calls[0]\n",
" print(\"Tool call is for function:\", tool_call.function.name)\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" \n",
" if tool_call.function.name == \"get_ticket_price\":\n",
" response, city = handle_price_tool_call(tool_call.id, arguments)\n",
" elif tool_call.function.name == \"get_flight_schedules\":\n",
" response, city = handle_schedule_tool_call(tool_call.id, arguments)\n",
" elif tool_call.function.name == \"flight_confirmation_number\":\n",
" response, city = handle_confirmation_tool_call(tool_call.id, arguments)\n",
" \n",
" return response, city"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4be8a71-b19e-4c2f-80df-f59ff2661f14",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "530e4bef",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -301,7 +301,6 @@
" background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);\n",
" padding: 15px 20px;\n",
" border-radius: 10px;\n",
" margin: 20px 0;\n",
"}\n",
"\n",
".quickstart-title {\n",
@@ -324,7 +323,7 @@
" gr.HTML(\n",
" \"\"\"\n",
" <div class=\"header-container\">\n",
" <h1 class=\"header-title\">🎨 Figma Onboarding Assistant</h1>\n",
" <h1 class=\"header-title\">🎨 Figma AI Assistant</h1>\n",
" <p class=\"header-subtitle\">Your AI-powered Figma learning companion</p>\n",
" </div>\n",
" \n",
@@ -360,26 +359,6 @@
" \"\"\"\n",
" )\n",
" \n",
" # Model selection dropdown\n",
" model_dropdown = gr.Dropdown(\n",
" choices=[\"OpenAI (GPT-3.5)\", \"Google Gemini (2.0 Flash)\", \"Claude (Sonnet 4)\"],\n",
" value=\"OpenAI (GPT-3.5)\",\n",
" label=\"Select AI Model\",\n",
" info=\"Choose which AI model to use for responses\"\n",
" )\n",
" \n",
" with gr.Row():\n",
" msg = gr.Textbox(\n",
" placeholder=\"Type your Figma question here...\",\n",
" container=False,\n",
" scale=4\n",
" )\n",
" submit_btn = gr.Button(\"Ask\", scale=1, variant=\"primary\")\n",
" clear_btn = gr.Button(\"Clear Chat\", scale=1)\n",
" audio_btn = gr.Button(\"🔊 Play Audio\", scale=1, variant=\"secondary\")\n",
" clear_audio_btn = gr.Button(\"🔇 Clear Audio\", scale=1, variant=\"secondary\")\n",
" \n",
"\n",
" # Example questions\n",
" gr.HTML(\n",
" \"\"\"\n",
@@ -389,7 +368,7 @@
" </div>\n",
" \"\"\"\n",
" )\n",
" \n",
"\n",
" with gr.Row():\n",
" example_btns = [\n",
" gr.Button(\n",
@@ -414,6 +393,24 @@
" )\n",
" ]\n",
"\n",
" # Model selection dropdown\n",
" model_dropdown = gr.Dropdown(\n",
" choices=[\"OpenAI (GPT-3.5)\", \"Google Gemini (2.0 Flash)\", \"Claude (Sonnet 4)\"],\n",
" value=\"OpenAI (GPT-3.5)\",\n",
" label=\"Select AI Model\",\n",
" info=\"Choose which AI model to use for responses\"\n",
" )\n",
" \n",
" with gr.Row():\n",
" msg = gr.Textbox(\n",
" placeholder=\"Type your Figma question here...\",\n",
" container=False,\n",
" scale=4\n",
" )\n",
" submit_btn = gr.Button(\"Ask\", scale=1, variant=\"primary\")\n",
" clear_btn = gr.Button(\"Clear Chat\", scale=1)\n",
"\n",
"\n",
" # Your components with simple styling\n",
" chatbot = gr.Chatbot(\n",
" type=\"messages\",\n",
@@ -421,6 +418,9 @@
" placeholder=\"Ask me anything about Figma! For example: 'How do I create a component?' or 'What are frames in Figma?'\",\n",
" elem_classes=[\"styled-chat\"]\n",
" )\n",
" with gr.Row():\n",
" audio_btn = gr.Button(\"🔊 Text To Audio\", scale=1, variant=\"primary\")\n",
" clear_audio_btn = gr.Button(\"🔇 Clear Audio\", scale=1, variant=\"secondary\")\n",
"\n",
" audio_output = gr.Audio(\n",
" label=\"Audio Response\",\n",

View File

@@ -0,0 +1,324 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd",
"metadata": {},
"source": [
"# Additional End of week Exercise - week 2\n",
"\n",
"Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n",
"\n",
"This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n",
"\n",
"If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n",
"\n",
"I will publish a full solution here soon - unless someone beats me to it...\n",
"\n",
"There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results."
]
},
{
"cell_type": "markdown",
"id": "87f483d5-dc85-41d1-bb34-5b49c6eeb30c",
"metadata": {},
"source": [
"**I built a coding expert tutor with 2 models: Gemini and GPT.\n",
"It works with streamining and tools simultaneously.\n",
"If a user asks a mathematical question, the Dalle 3 will generate an image of that equation.**"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a07e7793-b8f5-44f4-aded-5562f633271a",
"metadata": {},
"outputs": [],
"source": [
"import gradio\n",
"from openai import OpenAI\n",
"import os\n",
"from dotenv import load_dotenv\n",
"import math\n",
"import json\n",
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "436819d1-8a09-43e2-9429-35189cc92317",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(override=True)\n",
"\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")\n",
" \n",
" \n",
"GPT_MODEL = \"gpt-5-nano\"\n",
"GEMINI_MODEL = \"gemini-2.5-flash\"\n",
"openai = OpenAI()\n",
"gemini = OpenAI(\n",
" api_key=google_api_key, \n",
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
")\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e154015c-0c16-41a5-9518-163a9ae3ea0c",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an expert coding tutor. \\n\" \\\n",
"\"You explain the answers in a friendly and easy to understand way.\\n\" \\\n",
"\"However, if the input from the user feels too vague, ask them to provide more details before answering.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "937dc916-fc0b-47a4-b963-4d689cec4f60",
"metadata": {},
"outputs": [],
"source": [
"def calculate_math(math_equation):\n",
" print(\"Math calculator tool has been run...\")\n",
" \n",
" allowed = {\"__builtins__\": None}\n",
" allowed.update({k: getattr(math, k) for k in dir(math) if not k.startswith(\"_\")})\n",
" \n",
" result = eval(math_equation, allowed, {})\n",
" return result\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "37a74256-fbf6-4539-8481-87bf73abefd4",
"metadata": {},
"outputs": [],
"source": [
"calculate_math(\"sqrt(25)\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c858d63d-c90f-4ab9-bf03-2047622ed151",
"metadata": {},
"outputs": [],
"source": [
"calculate_math_function = {\n",
" \"name\": \"calculate_math\",\n",
" \"description\": \"Calculate math requested by the user. You should run this tool when a user asks to know the result of ANY equation. For example: 'What is ther result of this: sqrt(25)'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"math_equation\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The math question the user wants to calculate. You should pass only the math equation, not text. For example: sqrt(25)\",\n",
" },\n",
" },\n",
" \"required\": [\"math_equation\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1c32ef1f-909c-4646-b39f-006d26a44d10",
"metadata": {},
"outputs": [],
"source": [
"tools = [{\"type\": \"function\", \"function\": calculate_math_function}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "edcea23f-769c-4d40-b07c-ac2fc89d2af9",
"metadata": {},
"outputs": [],
"source": [
"def generate_math_result_image(equation, result):\n",
" image_response = openai.images.generate(\n",
" model=\"dall-e-3\",\n",
" prompt=f\"Generate a realistic image of a math equation: '{equation}={result}' on a school chalk board with.\",\n",
" size=\"1024x1024\",\n",
" n=1,\n",
" response_format=\"b64_json\",\n",
" )\n",
" image_base64 = image_response.data[0].b64_json\n",
" image_data = base64.b64decode(image_base64)\n",
" return Image.open(BytesIO(image_data))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ea0fa17b-069e-4080-9cfc-a0674a2bcca6",
"metadata": {},
"outputs": [],
"source": [
"def chat(history, model=\"GPT\"):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n",
" if model == \"GPT\": \n",
" response = openai.chat.completions.create(model=GPT_MODEL, messages=messages, stream=True, tools=tools)\n",
" else:\n",
" response = gemini.chat.completions.create(model=GEMINI_MODEL, messages=messages, stream=True, tools=tools)\n",
" \n",
" buffer = {\"role\": \"assistant\", \"content\": \"\", \"tool_calls\": []}\n",
" tool_answer = \"\"\n",
" image = None\n",
" \n",
" for chunk in response:\n",
" delta = chunk.choices[0].delta\n",
" if delta.content:\n",
" buffer[\"content\"] += delta.content or \"\"\n",
" yield history + [buffer], image\n",
"\n",
" if delta.tool_calls:\n",
" if delta.tool_calls[0].function.name:\n",
" buffer[\"tool_calls\"].append(delta.tool_calls[0])\n",
" for call in delta.tool_calls:\n",
" if call.function and model == \"GPT\":\n",
" buffer[\"tool_calls\"][0].function.arguments += call.function.arguments\n",
" \n",
" if chunk.choices[0].finish_reason == \"tool_calls\":\n",
" tool_call = buffer[\"tool_calls\"][0]\n",
" response, result, math_equation = handle_calculate_tool_call(tool_call)\n",
" messages.append(buffer)\n",
" messages.append(response)\n",
" image = generate_math_result_image(math_equation, result)\n",
" if model == \"GPT\": \n",
" next_response = openai.chat.completions.create(model=GPT_MODEL, messages=messages, stream=True)\n",
" else:\n",
" next_response = gemini.chat.completions.create(model=GEMINI_MODEL, messages=messages, stream=True)\n",
" for next_chunk in next_response:\n",
" tool_answer += next_chunk.choices[0].delta.content or \"\"\n",
" yield history + [{\"role\": \"assistant\", \"content\": tool_answer}], image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5605e90c-1ccb-4222-b15e-9be35fd58168",
"metadata": {},
"outputs": [],
"source": [
"def handle_calculate_tool_call(tool_call):\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" math_equation = arguments.get('math_equation')\n",
" result = calculate_math(math_equation)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps({\"math_equation\": math_equation, \"result\": result}),\n",
" \"tool_call_id\": tool_call.id\n",
" }\n",
" return response, result, math_equation"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "89da6939-f38f-4584-9413-85ff843d9b32",
"metadata": {},
"outputs": [],
"source": [
"def transcribe(audio_file):\n",
" if audio_file is None:\n",
" return \"\"\n",
" with open(audio_file, \"rb\") as f:\n",
" transcription = openai.audio.transcriptions.create(\n",
" model=\"gpt-4o-mini-transcribe\", \n",
" file=f\n",
" )\n",
" return transcription.text"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6b9ba370-6014-4f66-8f57-824465b7fe41",
"metadata": {},
"outputs": [],
"source": [
"with gradio.Blocks() as ui:\n",
" with gradio.Row():\n",
" chatbot = gradio.Chatbot(height=500, type=\"messages\")\n",
" image_output = gradio.Image(height=500)\n",
" with gradio.Row():\n",
" entry = gradio.Textbox(label=\"Chat with our code expert:\")\n",
" microphone = gradio.Audio(sources=\"microphone\", type=\"filepath\")\n",
" with gradio.Row():\n",
" ai_model = gradio.Dropdown([\"GPT\", \"Gemini\"], label=\"Select Model\")\n",
" clear = gradio.Button(\"Clear\")\n",
"\n",
" def do_entry(message, history):\n",
" history += [{\"role\":\"user\", \"content\":message}]\n",
" return \"\", history, None\n",
"\n",
" entry.submit(do_entry, inputs=[entry, chatbot], outputs=[entry, chatbot, microphone]).then(\n",
" chat, inputs=[chatbot, ai_model], outputs=[chatbot, image_output]\n",
" )\n",
" microphone.change(\n",
" transcribe,\n",
" inputs=[microphone],\n",
" outputs=[entry] \n",
" )\n",
" clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)\n",
"\n",
"ui.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "53abd8ac-a7de-42d1-91bf-741a93e2347b",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because one or more lines are too long

View File

@@ -246,7 +246,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
"version": "3.11.13"
}
},
"nbformat": 4,

View File

@@ -748,7 +748,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.12"
"version": "3.11.13"
}
},
"nbformat": 4,

View File

@@ -43,7 +43,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
"version": "3.11.13"
}
},
"nbformat": 4,