Merge pull request #807 from muthash/stephen/week2-exercise-2

[Bootcamp] Week 2 Bookstore Assistant (Stephen)
This commit is contained in:
Ed Donner
2025-10-23 09:02:34 -04:00
committed by GitHub

View File

@@ -0,0 +1,296 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd",
"metadata": {},
"source": [
"# End of week 2 Exercise - Bookstore Assistant\n",
"\n",
"Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n",
"\n",
"This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n",
"\n",
"If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n",
"\n",
"I will publish a full solution here soon - unless someone beats me to it...\n",
"\n",
"There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "a07e7793-b8f5-44f4-aded-5562f633271a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Google API Key exists and begins AIzaSyCL\n"
]
}
],
"source": [
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr\n",
"\n",
"load_dotenv(override=True)\n",
"\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")\n",
" \n",
"MODEL_GPT = \"gpt-4.1-mini\"\n",
"MODEL_GEMINI = \"gemini-2.5-pro\"\n",
"\n",
"\n",
"openai = OpenAI()\n",
"\n",
"gemini_url = \"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
"gemini = OpenAI(api_key=google_api_key, base_url=gemini_url)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0a3aa8bf",
"metadata": {},
"outputs": [],
"source": [
"# Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models\n",
"\n",
"system_message= \"\"\"\n",
" You are an assistant in a software engineering bookstore that analyzes the content of technical books and generates concise, informative summaries for readers.\n",
" Your goal is to help customers quickly understand what each book covers, its practical value, and who would benefit most from reading it.\n",
" Respond in markdown without code blocks.\n",
" Each summary should include:\n",
" Overview: The books main topic, scope, and focus area (e.g., software architecture, DevOps, system design).\n",
" Key Insights: The most important lessons, principles, or methodologies discussed.\n",
" Recommended For: The type of reader who would benefit most (e.g., junior developers, engineering managers, backend specialists).\n",
" Related Reads: Suggest one or two similar or complementary titles available in the store.\n",
" Maintain a professional and knowledgeable tone that reflects expertise in software engineering literature. \n",
"\"\"\"\n",
"\n",
"def stream_gpt(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = openai.chat.completions.create(\n",
" model=MODEL_GPT,\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result\n",
"\n",
"def stream_gemini(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = openai.chat.completions.create(\n",
" model=MODEL_GEMINI,\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result\n",
"\n",
"def stream_model(prompt, model):\n",
" if model==\"GPT\":\n",
" result = stream_gpt(prompt)\n",
" elif model==\"Gemini\":\n",
" result = stream_gemini(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result\n",
"\n",
"\n",
"message_input = gr.Textbox(label=\"Your message:\", info=\"Enter a software engineering book title for the LLM\", lines=4)\n",
"model_selector = gr.Dropdown([\"GPT\", \"Gemini\"], label=\"Select model\", value=\"GPT\")\n",
"message_output = gr.Markdown(label=\"Response:\")\n",
"\n",
"view = gr.Interface(\n",
" fn=stream_model,\n",
" title=\"Bookstore Assistant\", \n",
" inputs=[message_input, model_selector], \n",
" outputs=[message_output], \n",
" examples=[\n",
" [\"Explain Clean Code by Robert C. Martin\", \"GPT\"],\n",
" [\"Explain Clean Code by Robert C. Martin\", \"Gemini\"]\n",
" ], \n",
" flagging_mode=\"never\"\n",
" )\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a4d7980c",
"metadata": {},
"outputs": [],
"source": [
"import sqlite3\n",
"\n",
"DB = \"books.db\"\n",
"\n",
"with sqlite3.connect(DB) as conn:\n",
" cursor = conn.cursor()\n",
" cursor.execute('CREATE TABLE IF NOT EXISTS prices (title TEXT PRIMARY KEY, price REAL)')\n",
" conn.commit()\n",
"\n",
"def get_book_price(title):\n",
" print(f\"DATABASE TOOL CALLED: Getting price for {title}\", flush=True)\n",
" with sqlite3.connect(DB) as conn:\n",
" cursor = conn.cursor()\n",
" cursor.execute('SELECT price FROM prices WHERE title = ?', (title.lower(),))\n",
" result = cursor.fetchone()\n",
" return f\"Book -> {title} price is ${result[0]}\" if result else \"No price data available for this title\"\n",
"\n",
"def set_book_price(title, price):\n",
" with sqlite3.connect(DB) as conn:\n",
" cursor = conn.cursor()\n",
" cursor.execute('INSERT INTO prices (title, price) VALUES (?, ?) ON CONFLICT(title) DO UPDATE SET price = ?', (title.lower(), price, price))\n",
" conn.commit()\n",
"\n",
"book_prices = {\"Clean code\":20, \"Clean architecture\": 30, \"System design\": 40, \"Design patterns\": 50}\n",
"for title, price in book_prices.items():\n",
" set_book_price(title, price)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "86741761",
"metadata": {},
"outputs": [],
"source": [
"# use of a tool\n",
"MODEL = \"gpt-4.1-mini\"\n",
"\n",
"system_message = \"\"\"\n",
"You are a helpful assistant in a software engineering bookstore BookEye. \n",
"Give short, courteous answers, no more than 1 sentence.\n",
"Always be accurate. If you don't know the answer, say so.\n",
"\"\"\"\n",
"\n",
"price_function = {\n",
" \"name\": \"get_book_price\",\n",
" \"description\": \"Get the price of a book.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"book_title\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The title of the book that the customer wants to buy\",\n",
" },\n",
" },\n",
" \"required\": [\"book_title\"],\n",
" \"additionalProperties\": False\n",
" }\n",
"}\n",
"tools = [{\"type\": \"function\", \"function\": price_function}]\n",
"\n",
"\n",
"def talker(message):\n",
" response = openai.audio.speech.create(\n",
" model=\"gpt-4o-mini-tts\",\n",
" voice=\"coral\",\n",
" input=message\n",
" )\n",
" return response.content\n",
"\n",
"def handle_tool_calls(message):\n",
" responses = []\n",
" for tool_call in message.tool_calls:\n",
" if tool_call.function.name == \"get_book_price\":\n",
" arguments = json.loads(tool_call.function.arguments)\n",
" title = arguments.get('book_title')\n",
" price_details = get_book_price(title)\n",
" responses.append({\n",
" \"role\": \"tool\",\n",
" \"content\": price_details,\n",
" \"tool_call_id\": tool_call.id\n",
" })\n",
" return responses\n",
"\n",
"def chat(history):\n",
" history = [{\"role\":h[\"role\"], \"content\":h[\"content\"]} for h in history]\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
"\n",
" while response.choices[0].finish_reason==\"tool_calls\":\n",
" message = response.choices[0].message\n",
" responses = handle_tool_calls(message)\n",
" messages.append(message)\n",
" messages.extend(responses)\n",
" response = openai.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n",
"\n",
" reply = response.choices[0].message.content\n",
" history += [{\"role\":\"assistant\", \"content\":reply}]\n",
"\n",
" voice = talker(reply)\n",
" \n",
" return history, voice\n",
"\n",
"def put_message_in_chatbot(message, history):\n",
" return \"\", history + [{\"role\":\"user\", \"content\":message}]\n",
"with gr.Blocks() as ui:\n",
" with gr.Row():\n",
" chatbot = gr.Chatbot(height=300, type=\"messages\")\n",
" audio_output = gr.Audio(autoplay=True)\n",
" \n",
" with gr.Row():\n",
" message = gr.Textbox(label=\"Chat with our AI Assistant:\")\n",
"\n",
" message.submit(put_message_in_chatbot, inputs=[message, chatbot], outputs=[message, chatbot]).then(\n",
" chat, inputs=chatbot, outputs=[chatbot, audio_output]\n",
" )\n",
"\n",
"ui.launch(inbrowser=True, auth=(\"ted\", \"mowsb\"))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}