Merge pull request #678 from tylin7111095022/pr

Simulate the function which reserve ticket by mock api and make llm use it as tool.
This commit is contained in:
Ed Donner
2025-10-07 15:39:25 -04:00
committed by GitHub
4 changed files with 610 additions and 0 deletions

View File

@@ -0,0 +1,173 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "27fa33cf",
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"from dotenv import load_dotenv\n",
"import gradio as gr\n",
"import os\n",
"import json\n",
"from datetime import datetime\n",
"\n",
"import httpx\n",
"from fastapi import FastAPI\n",
"import uvicorn\n",
"import threading\n",
"\n",
"load_dotenv('.env',override=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e9407192",
"metadata": {},
"outputs": [],
"source": [
"app = FastAPI()\n",
"\n",
"@app.post('/mock/ticket')\n",
"def booking(payload:dict):\n",
" dt = datetime.strptime(payload.get('date'), \"%Y/%m/%d\") \n",
" isoStr = dt.date().isoformat()\n",
" return {\"status\": \"success\", \"order_id\": f\"MOCK-FLIGHT-{isoStr}-001\"}\n",
"\n",
"# start server\n",
"def run():\n",
" uvicorn.run(app, host=\"127.0.0.1\", port=8000)\n",
"\n",
"thread = threading.Thread(target=run, daemon=True)\n",
"thread.start()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2229b6db",
"metadata": {},
"outputs": [],
"source": [
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"llm = openai.OpenAI(api_key=openai_api_key)\n",
"\n",
"system_message = \"You are a helpful assistant for an Airline called FlightAI. \"\n",
"system_message += \"Give short, courteous answers, no more than 1 sentence. \"\n",
"system_message += \"Always be accurate. If you don't know the answer, say so.\"\n",
"\n",
"async def booking_flight(departure, destination, date):\n",
" print(f\"Book the Flight Automatically, {departure} to {destination} at {date}.\")\n",
" reqBody = {\n",
" \"departure\": departure,\n",
" \"destination\": destination,\n",
" \"date\": date\n",
" }\n",
" async with httpx.AsyncClient() as client:\n",
" res = await client.post('http://127.0.0.1:8000/mock/ticket', json=reqBody)\n",
" print(res.text)\n",
" return res.text\n",
" \n",
"book_function = {\n",
" \"name\": \"booking_flight\",\n",
" \"description\": \"async function for booking the flight ticket for customers and it will return the status and id of flight. Call this function whenever you were asked to book the flight, and you will automatically tell the status of the order and the book number! if customers don't provide their departure or destination or date you should inquire them courteous. Note that the date format you should keep them with %Y/%m/%d. for example when a customer asks 'Please help me book the ticket from <departure> to <destination>'\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"departure\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city where the customer departure\",\n",
" },\n",
" \"destination\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The city that the customer wants to travel to\",\n",
" },\n",
" \"date\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The date of the flight \",\n",
" },\n",
" },\n",
" \"required\": [\"destination\", \"departure\", \"date\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}\n",
"\n",
"async def handle_tool_call(message):\n",
" tool_call = message.tool_calls[0]\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" departure = arguments.get('departure')\n",
" destination = arguments.get('destination')\n",
" date = arguments.get('date')\n",
" res = await booking_flight(departure, destination, date)\n",
" response = {\n",
" \"role\": \"tool\",\n",
" \"content\": json.dumps(res),\n",
" \"tool_call_id\": tool_call.id\n",
" }\n",
" return response\n",
"\n",
"tools = [{\"type\": \"function\", \"function\": book_function}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5bf9656f",
"metadata": {},
"outputs": [],
"source": [
"res = await booking_flight('Taiwan', \"NewYork\", \"2025/12/03\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d2924055",
"metadata": {},
"outputs": [],
"source": [
"async def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
" res = llm.chat.completions.create(messages=messages,\n",
" model=\"gpt-4.1-mini\",\n",
" tools=tools)\n",
" \n",
" if res.choices[0].finish_reason == 'tool_calls':\n",
" message = res.choices[0].message\n",
" toolResponse = await handle_tool_call(message)\n",
" messages.append(message)\n",
" messages.append(toolResponse)\n",
" res = llm.chat.completions.create(messages=messages,\n",
" model=\"gpt-4.1-mini\")\n",
"\n",
" return res.choices[0].message.content\n",
"\n",
"gr.ChatInterface(fn=chat,type=\"messages\").launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "3.10.15",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.15"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,175 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd",
"metadata": {},
"source": [
"# Additional End of week Exercise - week 2\n",
"\n",
"Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n",
"\n",
"This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n",
"\n",
"If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n",
"\n",
"I will publish a full solution here soon - unless someone beats me to it...\n",
"\n",
"There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "05fc552b",
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"import anthropic\n",
"import gradio as gr\n",
"import dotenv\n",
"import os"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a07e7793-b8f5-44f4-aded-5562f633271a",
"metadata": {},
"outputs": [],
"source": [
"class Chatbot:\n",
" def __init__(self, apiKey, publisher='openai'):\n",
" if publisher not in ['openai', 'claude']:\n",
" raise ValueError(f\"publisher must be openai or claude, but got {publisher}\")\n",
" self.publisher = publisher\n",
" self.systemPrompt = None\n",
" self.historyPrompt = []\n",
" self.llm = openai.OpenAI(api_key=apiKey) if publisher == 'openai' else anthropic.Anthropic(api_key=apiKey)\n",
" \n",
" def setSystemPrompt(self, systemPrompt:str):\n",
" self.systemPrompt = systemPrompt.strip()\n",
" if len(self.historyPrompt) == 0:\n",
" self.historyPrompt.append({\"role\": \"system\", \"content\": f\"{systemPrompt}\"})\n",
" else:\n",
" self.historyPrompt[0] = {\"role\": \"system\", \"content\": f\"{systemPrompt}\"}\n",
" \n",
" def _prompt2obj(self, role:str, prompt:str):\n",
" return {\n",
" \"role\": role,\n",
" \"content\": prompt.strip()\n",
" }\n",
" \n",
" def unpackText(self, chunk):\n",
" text = ''\n",
" if self.publisher == 'openai':\n",
" text = chunk.choices[0].delta.content or ''\n",
" elif self.publisher == 'claude':\n",
" if chunk.type == \"content_block_delta\":\n",
" text = chunk.delta.text or ''\n",
" \n",
" return text\n",
" \n",
" def chat(self, message):\n",
" self.historyPrompt.append(self._prompt2obj(\"user\", message))\n",
" completeReply = \"\"\n",
"\n",
" if self.publisher == 'openai':\n",
" stream = self.llm.chat.completions.create(model='gpt-4o-mini',\n",
" messages=self.historyPrompt,\n",
" stream=True)\n",
" elif self.publisher == 'claude':\n",
" stream = self.llm.messages.create(system=self.historyPrompt[0][\"content\"],\n",
" model=\"claude-sonnet-4-20250514\",\n",
" max_tokens=200,\n",
" messages=self.historyPrompt[1:],\n",
" stream=True)\n",
" \n",
" for chunk in stream:\n",
" completeReply += self.unpackText(chunk)\n",
" yield completeReply\n",
" \n",
" \n",
" self.historyPrompt.append(self._prompt2obj(\"assistant\", completeReply))\n",
" \n",
" def _gradioChatWrapper(self):\n",
" def gradioChatFn(message, history):\n",
" for partial_reply in self.chat(message):\n",
" yield partial_reply\n",
" return gradioChatFn\n",
" \n",
" def getAllPrompt(self):\n",
" return self.historyPrompt\n",
" \n",
" def run(self):\n",
" gradioFn = self._gradioChatWrapper()\n",
" gr.ChatInterface(fn=gradioFn, type=\"messages\").launch()\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1fca53e8",
"metadata": {},
"outputs": [],
"source": [
"# initial env\n",
"dotenv.load_dotenv(\".env\", override=True)\n",
"openaiKey = os.getenv(\"OPENAI_API_KEY\")\n",
"claudeKey = os.getenv(\"ANTHROPIC_API_KEY\")\n",
"openaiInfo = {\n",
" 'apiKey': openaiKey,\n",
" 'publisher': 'openai'\n",
"}\n",
"claudeInfo = {\n",
" 'apiKey': claudeKey,\n",
" 'publisher': 'claude'\n",
"}\n",
"\n",
"SYSTEM_PROMPT = \"\"\"\n",
"You are a technical experts and responds every question I asked with an explanation.\n",
"\"\"\"\n",
"\n",
"openaiChatbot = Chatbot(**openaiInfo)\n",
"openaiChatbot.setSystemPrompt(SYSTEM_PROMPT)\n",
"openaiChatbot.run()\n",
"\n",
"# claudeChatbot = Chatbot(**claudeInfo)\n",
"# claudeChatbot.setSystemPrompt(SYSTEM_PROMPT)\n",
"# claudeChatbot.run()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59a2ac0f",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "3.10.15",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.15"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,39 @@
OPENAI_MODEL = "gpt-4o-mini"
CLAUDE_MODEL = "claude-3-5-haiku-20241022"
OUTPUT_MAX_TOKEN = 2000
PYTHON_CODE = '''
import math
def pairwise_distance(points_a, points_b):
"""
Compute the pairwise Euclidean distance between two sets of 3D points.
Args:
points_a: list of (x, y, z)
points_b: list of (x, y, z)
Returns:
A 2D list of shape (len(points_a), len(points_b)) representing distances
"""
distances = []
for i in range(len(points_a)):
row = []
for j in range(len(points_b)):
dx = points_a[i][0] - points_b[j][0]
dy = points_a[i][1] - points_b[j][1]
dz = points_a[i][2] - points_b[j][2]
d = math.sqrt(dx * dx + dy * dy + dz * dz)
row.append(d)
distances.append(row)
return distances
# Example usage
if __name__ == "__main__":
import random
points_a = [(random.random(), random.random(), random.random()) for _ in range(100)]
points_b = [(random.random(), random.random(), random.random()) for _ in range(100)]
dists = pairwise_distance(points_a, points_b)
print(f"Distance[0][0] = {dists[0][0]:.4f}")
'''

View File

@@ -0,0 +1,223 @@
import dotenv
import os
from openai import OpenAI
from anthropic import Anthropic
import gradio as gr
# from .config import *
OPENAI_MODEL = "gpt-4o-mini"
CLAUDE_MODEL = "claude-3-5-haiku-20241022"
OUTPUT_MAX_TOKEN = 2000
CSS = """
body {
background: #f4f6fa;
font-family: 'Segoe UI', Roboto, sans-serif;
}
.raw textarea {
border: 1.5px solid #00FFBF !important;
box-shadow: 0 0 10px rgba(229, 115, 115, 0.3);
color: #00FFBF !important;
font-size: 24px;
}
.optimize textarea {
border: 1.5px solid #FFBF00 !important;
box-shadow: 0 0 10px rgba(129, 199, 132, 0.3);
color: #FFBF00 !important;
font-size: 24px
}
button {
background: linear-gradient(90deg, #2196f3, #00BFFF);
color: white !important;
font-weight: bold;
border-radius: 8px !important;
transition: all 0.2s ease-in-out;
}
button:hover {
background: linear-gradient(90deg, #21cbf3, #2196f3);
transform: scale(1.05);
}
h1 {
text-align: center;
color: #1565c0;
font-size: 38px;
}
"""
PYTHON_CODE = '''
import math
def pairwise_distance(points_a, points_b):
"""
Compute the pairwise Euclidean distance between two sets of 3D points.
Args:
points_a: list of (x, y, z)
points_b: list of (x, y, z)
Returns:
A 2D list of shape (len(points_a), len(points_b)) representing distances
"""
distances = []
for i in range(len(points_a)):
row = []
for j in range(len(points_b)):
dx = points_a[i][0] - points_b[j][0]
dy = points_a[i][1] - points_b[j][1]
dz = points_a[i][2] - points_b[j][2]
d = math.sqrt(dx * dx + dy * dy + dz * dz)
row.append(d)
distances.append(row)
return distances
# Example usage
if __name__ == "__main__":
import random
points_a = [(random.random(), random.random(), random.random()) for _ in range(100)]
points_b = [(random.random(), random.random(), random.random()) for _ in range(100)]
dists = pairwise_distance(points_a, points_b)
print(f"Distance[0][0] = {dists[0][0]:.4f}")
'''
def main():
dotenv.load_dotenv(override=True)
os.environ['OPENAI_API_KEY'] = os.getenv(
'OPENAI_API_KEY', 'your-key-if-not-using-env')
os.environ['ANTHROPIC_API_KEY'] = os.getenv(
'ANTHROPIC_API_KEY', 'your-key-if-not-using-env')
# codeReviser = CodeAccelerator('openai', os.getenv('OPENAI_API_KEY'))
codeReviser = CodeAccelerator('anthropic', os.getenv('ANTHROPIC_API_KEY'))
display_ui(codeReviser)
def safe_exec(code_str):
import io
import sys
import time
import ast
# Build the buffer of IO to extract ouput of stdout
stdout_buffer = io.StringIO()
old_stdout = sys.stdout
sys.stdout = stdout_buffer
try:
tree = ast.parse(code_str)
compiled = compile(tree, filename="<ast>", mode="exec")
local_vars = {}
start = time.time()
exec(compiled, {}, local_vars)
exec_time = time.time() - start
print(f"This code spend {exec_time:.8f} seconds\n")
# recover sys.stdout
sys.stdout = old_stdout
output_text = stdout_buffer.getvalue()
return output_text
except Exception as e:
sys.stdout = old_stdout
return f"Error: {e}"
def display_ui(codeReviser):
def _optimize(pythonCode):
for text in codeReviser.respond(pythonCode):
yield text.replace("```python", "").replace("```", "")
with gr.Blocks(css=CSS) as ui:
gr.Markdown("# ✨Convert Python code for accelation")
with gr.Row():
beforeBlock = gr.Textbox(
label="raw python code", value=PYTHON_CODE, lines=20, elem_classes=["raw"])
afterBlock = gr.Textbox(
label="optimized python code", lines=20, elem_classes=["optimize"])
with gr.Row():
convert = gr.Button("Convert code")
with gr.Row():
rawRunButton = gr.Button("Run raw code")
optRunButton = gr.Button("Run optimized code")
with gr.Row():
rawOut = gr.TextArea(label="Raw result:",
elem_classes=["raw"])
optimizeOut = gr.TextArea(
label="Optimize result:", elem_classes=["optimize"])
convert.click(_optimize,
inputs=[beforeBlock], outputs=[afterBlock])
rawRunButton.click(safe_exec, inputs=[beforeBlock], outputs=[rawOut])
optRunButton.click(safe_exec, inputs=[
afterBlock], outputs=[optimizeOut])
ui.launch(inbrowser=True)
class CodeAccelerator:
def __init__(self, frontierModel: str, apiKey):
self.frontierModel = frontierModel
if frontierModel == "openai":
self.llm = OpenAI(api_key=apiKey)
elif frontierModel == "anthropic":
self.llm = Anthropic(api_key=apiKey)
else:
raise ValueError(f'frontierModel {frontierModel} is invalid.')
def _getChatTemplate(self, pythonCode):
_code = pythonCode.strip()
systemPrompt = '''
You are an assistant that reimplements Python code in high performance and just spend the fastest possible time for an windows laptop.
Respond only with Python code; use comments sparingly and do not provide any explanation other than occasional comments.
The new Python code response needs to produce an identical output in the fastest possible time.
'''
userPrompt = f'''
Rewrite this Python code with the fastest possible implementation that produces identical output in the least time.
Respond only with Python code; do not explain your work other than a few comments.
Remember to import all necessary python packages such as numpy.\n\n
{_code}
'''
return [
{"role": "system", "content": systemPrompt},
{"role": "user", "content": userPrompt}
]
def respond(self, pythonCode):
"""Generator"""
chatTemplate = self._getChatTemplate(pythonCode)
reply = ""
if self.frontierModel == 'openai':
stream = self.llm.chat.completions.create(messages=chatTemplate,
model=OPENAI_MODEL,
max_tokens=OUTPUT_MAX_TOKEN,
stream=True)
for chunk in stream:
chunkText = chunk.choices[0].delta.content or ""
reply += chunkText
yield reply
elif self.frontierModel == "anthropic":
stream = self.llm.messages.create(model=CLAUDE_MODEL,
system=chatTemplate[0]['content'],
messages=chatTemplate[1:],
max_tokens=OUTPUT_MAX_TOKEN,
stream=True)
for chunk in stream:
chunkText = chunk.delta.text if chunk.type == "content_block_delta" else ""
reply += chunkText
yield reply
if __name__ == "__main__":
main()