PR for exercise of week 2
This commit is contained in:
397
week2/week2_exercise_jom.ipynb
Normal file
397
week2/week2_exercise_jom.ipynb
Normal file
@@ -0,0 +1,397 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Additional End of week Exercise - week 2\n",
|
||||||
|
"\n",
|
||||||
|
"Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n",
|
||||||
|
"\n",
|
||||||
|
"This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n",
|
||||||
|
"\n",
|
||||||
|
"If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n",
|
||||||
|
"\n",
|
||||||
|
"I will publish a full solution here soon - unless someone beats me to it...\n",
|
||||||
|
"\n",
|
||||||
|
"There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "66730be3",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"---"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "c1070317-3ed9-4659-abe3-828943230e03",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# imports\n",
|
||||||
|
"import os\n",
|
||||||
|
"from openai import OpenAI\n",
|
||||||
|
"from dotenv import load_dotenv\n",
|
||||||
|
"from IPython.display import display, Markdown, update_display\n",
|
||||||
|
"from enum import StrEnum\n",
|
||||||
|
"import json\n",
|
||||||
|
"load_dotenv(override=True)\n",
|
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||||
|
"anthropic_api_key = os.getenv(\"ANTHROPIC_API_KEY\")\n",
|
||||||
|
"\n",
|
||||||
|
"import gradio as gr\n",
|
||||||
|
"openai = OpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "16ec5d8a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"system_prompt = \"\"\"\n",
|
||||||
|
"You are a helpful tutor that explains code. You need to provide an answer structured in markdown without code blocks into the following parts:\n",
|
||||||
|
"- Identify the topic of the question (so the user can look for more info)\n",
|
||||||
|
"- Give an ELI5 explanation of the question.\n",
|
||||||
|
"- Give a step by step explanation of the code.\n",
|
||||||
|
"- Ask the user a follow up question or variation of the question to see if they understand the concept.\n",
|
||||||
|
"- Give the answer to the followup question as a spoiler.\n",
|
||||||
|
"\n",
|
||||||
|
"IF the last message is the output of a tool call with an structured markdown you need to return it as it is.\n",
|
||||||
|
"\"\"\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "5e6f715e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"#I'm going to create a tool that will be a LLM as a tool. \n",
|
||||||
|
"# The tool will actually make a separate lLM call and simply rigorously assess if the answer is valid or not\n",
|
||||||
|
"class Enum_Model(StrEnum):\n",
|
||||||
|
" GPT = 'gpt-4o-mini'\n",
|
||||||
|
" LLAMA = 'llama3.2:1b'\n",
|
||||||
|
" GPT_OSS = 'gpt-oss:20b-cloud'\n",
|
||||||
|
" HAIKU = 'claude-3-5-sonnet-20240620'\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def llm_as_tool(input_msg:str):\n",
|
||||||
|
" # Generate a system prompt for the LLM to critically analyze if a coding problem's solution is correct\n",
|
||||||
|
" llm_tool_system_prompt = (\n",
|
||||||
|
" \"You are an expert code reviewer. Your task is to rigorously and critically analyze whether the provided solution \"\n",
|
||||||
|
" \"correctly solves the stated coding problem. Carefully consider correctness, completeness, and potential edge cases. \"\n",
|
||||||
|
" \"Explain your reasoning with supporting details and point out any flaws, omissions, or improvements. \"\n",
|
||||||
|
" \"Provide a clear judgment: is the solution correct? If not, why not? \"\n",
|
||||||
|
" \"Output your answer using the following structured markdown format:\\n\\n\"\n",
|
||||||
|
" \"## Analysis\\n\"\n",
|
||||||
|
" \"- **Correctness:** <your comments>\\n\"\n",
|
||||||
|
" \"- **Completeness:** <your comments>\\n\"\n",
|
||||||
|
" \"- **Edge Cases:** <your comments>\\n\"\n",
|
||||||
|
" \"- **Improvements:** <optional improvement suggestions>\\n\\n\"\n",
|
||||||
|
" \"## Judgment\\n\"\n",
|
||||||
|
" \"<Clearly state whether the solution is correct, and justify your decision.>\"\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
" ollama = OpenAI(base_url=\"http://localhost:11434/v1\")\n",
|
||||||
|
" print(f'Calling LLM_Tool with input {input_msg[:10]} ...')\n",
|
||||||
|
" response = ollama.chat.completions.create(\n",
|
||||||
|
" model=\"qwen3-coder:480b-cloud\",\n",
|
||||||
|
" messages=[\n",
|
||||||
|
" {\"role\": \"system\", \"content\": llm_tool_system_prompt},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": input_msg},\n",
|
||||||
|
" ]\n",
|
||||||
|
" )\n",
|
||||||
|
" answer = response.choices[0].message.content\n",
|
||||||
|
" print(f'answer: {answer[:50]}')\n",
|
||||||
|
" return answer\n",
|
||||||
|
"\n",
|
||||||
|
"# There's a particular dictionary structure that's required to describe our function:\n",
|
||||||
|
"\n",
|
||||||
|
"check_code_tool_def = {\n",
|
||||||
|
" \"name\": \"check_code_tool\",\n",
|
||||||
|
" \"description\": \"Checks the code solution provided by the user is correct.\",\n",
|
||||||
|
" \"parameters\": {\n",
|
||||||
|
" \"type\": \"object\",\n",
|
||||||
|
" \"properties\": {\n",
|
||||||
|
" \"input_msg\": {\n",
|
||||||
|
" \"type\": \"string\",\n",
|
||||||
|
" \"description\": \"This is a very concised summary of the question the user asked, the proposed exercise, and the answer the user gave\",\n",
|
||||||
|
" },\n",
|
||||||
|
" },\n",
|
||||||
|
" \"required\": [\"input_msg\"],\n",
|
||||||
|
" \"additionalProperties\": False\n",
|
||||||
|
" }\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"tools = [\n",
|
||||||
|
" {\"type\": \"function\", \"function\": check_code_tool_def},\n",
|
||||||
|
" ]\n",
|
||||||
|
"\n",
|
||||||
|
"tools_dict = {\n",
|
||||||
|
" \"check_code_tool\": llm_as_tool,\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"def handle_tool_calls(message):\n",
|
||||||
|
" responses = []\n",
|
||||||
|
" print(f\"This is the message in handle_tool_calls: {message}\")\n",
|
||||||
|
" for tool_call in message.tool_calls:\n",
|
||||||
|
" arguments = json.loads(tool_call.function.arguments)\n",
|
||||||
|
" func = tools_dict.get(tool_call.function.name, lambda **kwargs: \"Unknown tool\")\n",
|
||||||
|
" markdown_analysis = func(**arguments)\n",
|
||||||
|
" responses.append({\n",
|
||||||
|
" \"role\": \"tool\",\n",
|
||||||
|
" \"content\": markdown_analysis,\n",
|
||||||
|
" \"tool_call_id\": tool_call.id\n",
|
||||||
|
" })\n",
|
||||||
|
" print(f\"response for a call is {responses}\")\n",
|
||||||
|
" return responses\n",
|
||||||
|
"\n",
|
||||||
|
"def read_text_to_speech(history):\n",
|
||||||
|
" message = history[-1]['content']\n",
|
||||||
|
" response = openai.audio.speech.create(\n",
|
||||||
|
" model=\"gpt-4o-mini-tts\",\n",
|
||||||
|
" voice=\"onyx\", # Also, try replacing onyx with alloy or coral\n",
|
||||||
|
" input=message\n",
|
||||||
|
" )\n",
|
||||||
|
" return response.content"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "6b3a49b0",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def chat(history,model):\n",
|
||||||
|
" # history_dicts = [{\"role\": h[\"role\"], \"content\": h[\"content\"]} for h in history]\n",
|
||||||
|
" # messages = [{\"role\": \"system\", \"content\": system_prompt}] + history_dicts + [{\"role\": \"user\", \"content\": message}]\n",
|
||||||
|
" #model='GPT'\n",
|
||||||
|
" print(f\"Model selected: {type(model)}\")\n",
|
||||||
|
" if isinstance(model, str):\n",
|
||||||
|
" try:\n",
|
||||||
|
" model = Enum_Model[model.upper()]\n",
|
||||||
|
" print(f\"Model selected: {model}\")\n",
|
||||||
|
" except KeyError:\n",
|
||||||
|
" raise ValueError(f\"Unknown model: {model}\")\n",
|
||||||
|
" if model == Enum_Model.LLAMA:\n",
|
||||||
|
" LLM_ENDPOINT=\"http://localhost:11434/v1\"\n",
|
||||||
|
" client = OpenAI(base_url=LLM_ENDPOINT)\n",
|
||||||
|
" elif model == Enum_Model.GPT_OSS:\n",
|
||||||
|
" LLM_ENDPOINT=\"http://localhost:11434/v1\"\n",
|
||||||
|
" client = OpenAI(base_url=LLM_ENDPOINT)\n",
|
||||||
|
" elif model == Enum_Model.GPT:\n",
|
||||||
|
" client = OpenAI()\n",
|
||||||
|
" elif model == Enum_Model.HAIKU:\n",
|
||||||
|
" LLM_ENDPOINT=\"https://api.anthropic.com/v1/\"\n",
|
||||||
|
" client = OpenAI(base_url=LLM_ENDPOINT, api_key=anthropic_api_key)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
" #client = OpenAI()\n",
|
||||||
|
" \n",
|
||||||
|
" history = [{\"role\":h[\"role\"], \"content\":h[\"content\"]} for h in history]\n",
|
||||||
|
" messages = [{\"role\": \"system\", \"content\": system_prompt}] + history\n",
|
||||||
|
"\n",
|
||||||
|
" cumulative_response = \"\"\n",
|
||||||
|
" history.append({\"role\": \"assistant\", \"content\": \"\"})\n",
|
||||||
|
"\n",
|
||||||
|
" response = client.chat.completions.create(\n",
|
||||||
|
" model=model, \n",
|
||||||
|
" messages=messages, \n",
|
||||||
|
" tools=tools,\n",
|
||||||
|
" stream=True\n",
|
||||||
|
" )\n",
|
||||||
|
" \n",
|
||||||
|
" tool_calls = {}\n",
|
||||||
|
" finish_reason = None\n",
|
||||||
|
" \n",
|
||||||
|
" for chunk in response:\n",
|
||||||
|
" delta = chunk.choices[0].delta\n",
|
||||||
|
" finish_reason = chunk.choices[0].finish_reason\n",
|
||||||
|
" \n",
|
||||||
|
" if hasattr(delta, 'content') and delta.content:\n",
|
||||||
|
" #cumulative_response += delta.content\n",
|
||||||
|
" #yield cumulative_response\n",
|
||||||
|
" history[-1]['content'] += delta.content\n",
|
||||||
|
" yield history\n",
|
||||||
|
" \n",
|
||||||
|
" if hasattr(delta, 'tool_calls') and delta.tool_calls:\n",
|
||||||
|
" for tool_call_delta in delta.tool_calls:\n",
|
||||||
|
" idx = tool_call_delta.index\n",
|
||||||
|
" \n",
|
||||||
|
" if idx not in tool_calls:\n",
|
||||||
|
" tool_calls[idx] = {\n",
|
||||||
|
" \"id\": \"\",\n",
|
||||||
|
" \"type\": \"function\",\n",
|
||||||
|
" \"function\": {\"name\": \"\", \"arguments\": \"\"}\n",
|
||||||
|
" }\n",
|
||||||
|
" \n",
|
||||||
|
" if tool_call_delta.id:\n",
|
||||||
|
" tool_calls[idx][\"id\"] = tool_call_delta.id\n",
|
||||||
|
" if tool_call_delta.type:\n",
|
||||||
|
" tool_calls[idx][\"type\"] = tool_call_delta.type\n",
|
||||||
|
" if hasattr(tool_call_delta, 'function') and tool_call_delta.function:\n",
|
||||||
|
" if tool_call_delta.function.name:\n",
|
||||||
|
" tool_calls[idx][\"function\"][\"name\"] = tool_call_delta.function.name\n",
|
||||||
|
" if tool_call_delta.function.arguments:\n",
|
||||||
|
" tool_calls[idx][\"function\"][\"arguments\"] += tool_call_delta.function.arguments\n",
|
||||||
|
" \n",
|
||||||
|
" if finish_reason == \"tool_calls\":\n",
|
||||||
|
" from types import SimpleNamespace\n",
|
||||||
|
" \n",
|
||||||
|
" tool_call_objects = [\n",
|
||||||
|
" SimpleNamespace(\n",
|
||||||
|
" id=tool_calls[idx][\"id\"],\n",
|
||||||
|
" type=tool_calls[idx][\"type\"],\n",
|
||||||
|
" function=SimpleNamespace(\n",
|
||||||
|
" name=tool_calls[idx][\"function\"][\"name\"],\n",
|
||||||
|
" arguments=tool_calls[idx][\"function\"][\"arguments\"]\n",
|
||||||
|
" )\n",
|
||||||
|
" )\n",
|
||||||
|
" for idx in sorted(tool_calls.keys())\n",
|
||||||
|
" ]\n",
|
||||||
|
" \n",
|
||||||
|
" message_obj = SimpleNamespace(tool_calls=tool_call_objects)\n",
|
||||||
|
" print(message_obj)\n",
|
||||||
|
" tool_responses = handle_tool_calls(message_obj)\n",
|
||||||
|
" \n",
|
||||||
|
" assistant_message = {\n",
|
||||||
|
" \"role\": \"assistant\",\n",
|
||||||
|
" \"content\": None,\n",
|
||||||
|
" \"tool_calls\": [tool_calls[idx] for idx in sorted(tool_calls.keys())]\n",
|
||||||
|
" }\n",
|
||||||
|
" \n",
|
||||||
|
" messages.append(assistant_message)\n",
|
||||||
|
" messages.extend(tool_responses)\n",
|
||||||
|
" #yield cumulative_response\n",
|
||||||
|
"\n",
|
||||||
|
" for tool_response in tool_responses:\n",
|
||||||
|
" history.append({\n",
|
||||||
|
" \"role\": \"assistant\",\n",
|
||||||
|
" \"content\": tool_response[\"content\"]\n",
|
||||||
|
" })\n",
|
||||||
|
" \n",
|
||||||
|
" print('--------------------------------')\n",
|
||||||
|
" print('history', history)\n",
|
||||||
|
" print('--------------------------------')\n",
|
||||||
|
"\n",
|
||||||
|
" yield history\n",
|
||||||
|
"\n",
|
||||||
|
" #yield assistant_message\n",
|
||||||
|
" else:\n",
|
||||||
|
" return"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "35828826",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"* Running on local URL: http://127.0.0.1:7874\n",
|
||||||
|
"* To create a public link, set `share=True` in `launch()`.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<div><iframe src=\"http://127.0.0.1:7874/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"<IPython.core.display.HTML object>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": []
|
||||||
|
},
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Model selected: <class 'str'>\n",
|
||||||
|
"Model selected: gpt-4o-mini\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Callbacks (along with the chat() function above)\n",
|
||||||
|
"\n",
|
||||||
|
"def put_message_in_chatbot(message, history):\n",
|
||||||
|
" history = [{\"role\":h[\"role\"], \"content\":h[\"content\"]} for h in history]\n",
|
||||||
|
"\n",
|
||||||
|
" return \"\", history + [{\"role\":\"user\", \"content\":message}]\n",
|
||||||
|
"\n",
|
||||||
|
"# UI definition\n",
|
||||||
|
"\n",
|
||||||
|
"with gr.Blocks() as ui:\n",
|
||||||
|
" with gr.Row():\n",
|
||||||
|
" model_dropdown = gr.Dropdown(choices=[\"GPT\", \"GPT_OSS\", \"LLAMA\",\"HAIKU\"], value=\"GPT\", label=\"Model\") \n",
|
||||||
|
" #image_output = gr.Image(height=500, interactive=False)\n",
|
||||||
|
" with gr.Row():\n",
|
||||||
|
" chatbot = gr.Chatbot(height=500, type=\"messages\")\n",
|
||||||
|
" audio_output = gr.Audio(autoplay=True)\n",
|
||||||
|
" with gr.Row():\n",
|
||||||
|
" message = gr.Textbox(label=\"Chat with our AI Assistant:\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Hooking up events to callbacks\n",
|
||||||
|
"\n",
|
||||||
|
" message.submit(put_message_in_chatbot, \n",
|
||||||
|
" inputs=[message, chatbot], \n",
|
||||||
|
" outputs=[message, chatbot]\n",
|
||||||
|
" ).then(\n",
|
||||||
|
" chat, \n",
|
||||||
|
" inputs=[chatbot, model_dropdown], \n",
|
||||||
|
" outputs=[chatbot]\n",
|
||||||
|
" ).then(\n",
|
||||||
|
" read_text_to_speech,\n",
|
||||||
|
" inputs=chatbot,\n",
|
||||||
|
" outputs=audio_output\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
"ui.launch(inbrowser=True)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": ".venv",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.12.12"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user