From 3a2a140d815bc9af88747ae4c24f325d0a1d67d8 Mon Sep 17 00:00:00 2001 From: Eli Waltuch Date: Wed, 29 Oct 2025 15:08:30 +0200 Subject: [PATCH 1/4] week2 assignment: Return of the JedAI The second generation of the JedAI Master from 8759d43c2f7ebe4ea83834a8072395f9681e0cfc (week1/community-contributions/week1-jedi-master.py) is back with: * a Gradio UI * Text Streaming * Model Selection using examples from openai, anthropic, ollama local, and ollama cloud How to run: $ python3 week2/community-contributions/week2-jedi-master.py Signed-off-by: Eli Waltuch --- .../week2-jedi-master.py | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 week2/community-contributions/week2-jedi-master.py diff --git a/week2/community-contributions/week2-jedi-master.py b/week2/community-contributions/week2-jedi-master.py new file mode 100644 index 0000000..4718834 --- /dev/null +++ b/week2/community-contributions/week2-jedi-master.py @@ -0,0 +1,81 @@ +#!/usr/bin/python3 + +import os +from dotenv import load_dotenv +from openai import OpenAI +import gradio as gr + +MODEL_ENDPOINTS = { + "gpt-4.1-mini": {"type": "openai", "base_url": "https://api.openai.com/v1", "api_key": ""}, + "claude-haiku-4-5": {"type": "anthropic", "base_url": "https://api.anthropic.com/v1/", "api_key": ""}, + "gemma3n:e2b": {"type": "ollama", "base_url": "http://localhost:11434/v1", "api_key": ""}, # small ollama model that runs on-device + "qwen3-vl:235b-cloud": {"type": "ollama", "base_url": "http://localhost:11434/v1", "api_key": ""}, # large ollama model that runs in the cloud +} + +def load_api_keys(): + # Load environment variables in a file called .env + load_dotenv(override=True) + openai_key = os.getenv('OPENAI_API_KEY') + anthropic_key = os.getenv('ANTHROPIC_API_KEY') + KEYS = {"openai": openai_key, "anthropic": anthropic_key} + + # Check the keys + if not openai_key: + raise RuntimeError("Error: No OpenAI API key was found!") + elif not openai_key.startswith("sk-proj-"): + raise RuntimeError("Error: An OpenAI API key was found, but it doesn't start sk-proj-; please check you're using the right key") + elif openai_key.strip() != openai_key: + raise RuntimeError("Error: An OpenAI API key was found, but it looks like it might have space or tab characters at the start or end - please remove them!") + if not anthropic_key: + raise RuntimeError("Error: No Anthropic API key was found!") + elif not anthropic_key.startswith("sk-ant-"): + raise RuntimeError("Error: An Antrhopic API key was found, but it doesn't start sk-ant-; please check you're using the right key") + elif anthropic_key.strip() != anthropic_key: + raise RuntimeError("Error: An Anthropic API key was found, but it looks like it might have space or tab characters at the start or end - please remove them!") + else: + # add the verified keys to global MODEL_ENDPOINTS struct + for model, cfg in MODEL_ENDPOINTS.items(): + cfg["api_key"] = KEYS.get(cfg["type"], "") + return "API keys found and look good so far!" + +def ask_llm(user_prompt, history, model): + system_prompt = """ + You are a wise Jedi Master and an excellent teacher. + You will answer any question you are given by breaking it down into small steps + that even a complete beginner will understand. + When answering, speak as if you are Yoda from the Star Wars universe. + Also, refer to the user as "My young Padawan" + End every answer with "May the force be with you, always." + """ + base_url = MODEL_ENDPOINTS.get(model, {}).get("base_url", "https://api.openai.com/v1") + api_key = MODEL_ENDPOINTS.get(model, {}).get("api_key", "") + client = OpenAI(base_url=base_url, api_key=api_key) + history = [{"role":h["role"], "content":h["content"]} for h in history] + messages = [{"role": "system", "content": system_prompt}] + history + [{"role": "user", "content": user_prompt}] + stream = client.chat.completions.create(model=model, messages=messages, stream=True) + response = "" + for chunk in stream: + response += chunk.choices[0].delta.content or '' + yield response + #return response.choices[0].message.content + +def main(): + load_api_keys() + with gr.Blocks() as demo: + gr.Markdown("### Return of the JedAI") + model_dropdown = gr.Dropdown( + label="Select Model", + choices=[ + "gpt-4.1-mini", + "claude-haiku-4-5", + "gemma3n:e2b", + "qwen3-vl:235b-cloud" + ], + value="gpt-4.1-mini", + interactive=True + ) + chat = gr.ChatInterface(fn=ask_llm, type="messages", additional_inputs=[model_dropdown]) + demo.launch() + +if __name__ == "__main__": + main() From be6b02e7b72926fb57bd834cb65db8fd4f78e4ba Mon Sep 17 00:00:00 2001 From: Eli Waltuch Date: Wed, 29 Oct 2025 22:40:39 +0200 Subject: [PATCH 2/4] week2 assignment: Return of the JedAI Added voiceover Signed-off-by: Eli Waltuch --- .../week2-jedi-master.py | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/week2/community-contributions/week2-jedi-master.py b/week2/community-contributions/week2-jedi-master.py index 4718834..fa7e30f 100644 --- a/week2/community-contributions/week2-jedi-master.py +++ b/week2/community-contributions/week2-jedi-master.py @@ -4,6 +4,7 @@ import os from dotenv import load_dotenv from openai import OpenAI import gradio as gr +import tempfile MODEL_ENDPOINTS = { "gpt-4.1-mini": {"type": "openai", "base_url": "https://api.openai.com/v1", "api_key": ""}, @@ -38,12 +39,22 @@ def load_api_keys(): cfg["api_key"] = KEYS.get(cfg["type"], "") return "API keys found and look good so far!" +def voiceover(message): + openai = OpenAI() + response = openai.audio.speech.create( + model="gpt-4o-mini-tts", + voice="onyx", # Also, try replacing onyx with alloy or coral + input=message + ) + return response.read() + def ask_llm(user_prompt, history, model): system_prompt = """ You are a wise Jedi Master and an excellent teacher. You will answer any question you are given by breaking it down into small steps that even a complete beginner will understand. - When answering, speak as if you are Yoda from the Star Wars universe. + When answering, speak as if you are Yoda from the Star Wars universe: deep, gravelly, slow pacing, + ancient and wise tone, inverted sentence structure. Also, refer to the user as "My young Padawan" End every answer with "May the force be with you, always." """ @@ -56,8 +67,12 @@ def ask_llm(user_prompt, history, model): response = "" for chunk in stream: response += chunk.choices[0].delta.content or '' - yield response - #return response.choices[0].message.content + yield response, None + audio = voiceover(response) + tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".wav") + tmp.write(audio) + tmp.close() + yield response, tmp.name def main(): load_api_keys() @@ -74,7 +89,9 @@ def main(): value="gpt-4.1-mini", interactive=True ) - chat = gr.ChatInterface(fn=ask_llm, type="messages", additional_inputs=[model_dropdown]) + with gr.Row(): + audio_output = gr.Audio(autoplay=True) + chat = gr.ChatInterface(fn=ask_llm, type="messages", additional_inputs=[model_dropdown], additional_outputs=[audio_output]) demo.launch() if __name__ == "__main__": From e1b7e2574d9d71290fba9a4eb07bfa1337359e4c Mon Sep 17 00:00:00 2001 From: Eli Waltuch Date: Thu, 30 Oct 2025 21:44:47 +0200 Subject: [PATCH 3/4] week2 assignment: Return of the JedAI Added tools for our wise jedAI master to list, add, and remove students Added an instruction to jedi mind-trick anyone who asks about droids Signed-off-by: Eli Waltuch --- week2/community-contributions/students.json | 10 ++ .../week2-jedi-master.py | 124 +++++++++++++++++- .../community-contributions/yoda_students.py | 55 ++++++++ 3 files changed, 183 insertions(+), 6 deletions(-) create mode 100644 week2/community-contributions/students.json create mode 100644 week2/community-contributions/yoda_students.py diff --git a/week2/community-contributions/students.json b/week2/community-contributions/students.json new file mode 100644 index 0000000..e7dfb85 --- /dev/null +++ b/week2/community-contributions/students.json @@ -0,0 +1,10 @@ +{ + "Luke Skywalker": "Guardian", + "Obi-Wan Kenobi": "Guardian", + "Ahsoka Tano": "Consular", + "Ki-Adi-Mundi": "Consular", + "Qui-Gon Jinn": "Consular", + "Rey": "Sentinel", + "Ezra Bridger": "Sentinel" +} + diff --git a/week2/community-contributions/week2-jedi-master.py b/week2/community-contributions/week2-jedi-master.py index fa7e30f..edfbd04 100644 --- a/week2/community-contributions/week2-jedi-master.py +++ b/week2/community-contributions/week2-jedi-master.py @@ -5,6 +5,8 @@ from dotenv import load_dotenv from openai import OpenAI import gradio as gr import tempfile +import json +import yoda_students MODEL_ENDPOINTS = { "gpt-4.1-mini": {"type": "openai", "base_url": "https://api.openai.com/v1", "api_key": ""}, @@ -13,6 +15,72 @@ MODEL_ENDPOINTS = { "qwen3-vl:235b-cloud": {"type": "ollama", "base_url": "http://localhost:11434/v1", "api_key": ""}, # large ollama model that runs in the cloud } +tool_list_students = { + "name": "list_students", + "description": "List all Jedi students with their current Jedi class.", + "parameters": { + "type": "object", + "properties": {}, + "required": [], + "additionalProperties": False + } +} + +tool_add_student = { + "name": "add_student", + "description": "Add a new Jedi student with their class.", + "parameters": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The student’s full name." + }, + "jedi_class": { + "type": "string", + "enum": ["Guardian", "Consular", "Sentinel"], + "description": "The Jedi class they are joining." + } + }, + "required": ["name", "jedi_class"], + "additionalProperties": False + } +} + +tool_remove_student = { + "name": "remove_student", + "description": "Remove a Jedi student because they have graduated or left.", + "parameters": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The student’s full name to remove." + } + }, + "required": ["name"], + "additionalProperties": False + } +} + +tool_list_by_class = { + "name": "list_by_class", + "description": "Group Jedi students by their class and list them.", + "parameters": { + "type": "object", + "properties": {}, + "required": [], + "additionalProperties": False + } +} + +tools = [ + {"type": "function", "function": tool_list_students}, + {"type": "function", "function": tool_add_student}, + {"type": "function", "function": tool_remove_student}, + {"type": "function", "function": tool_list_by_class}, +] + def load_api_keys(): # Load environment variables in a file called .env load_dotenv(override=True) @@ -37,7 +105,7 @@ def load_api_keys(): # add the verified keys to global MODEL_ENDPOINTS struct for model, cfg in MODEL_ENDPOINTS.items(): cfg["api_key"] = KEYS.get(cfg["type"], "") - return "API keys found and look good so far!" + return f"API keys found and look good so far!" def voiceover(message): openai = OpenAI() @@ -57,17 +125,61 @@ def ask_llm(user_prompt, history, model): ancient and wise tone, inverted sentence structure. Also, refer to the user as "My young Padawan" End every answer with "May the force be with you, always." + + You have access to tools to manage Jedi students. + If the user asks anything involving adding, removing, + or listing students, call the correct tool. + + If the user asks you about Droids, respond with a Jedi Mind Trick + e.g. "These aren't the droids you are looking for." """ base_url = MODEL_ENDPOINTS.get(model, {}).get("base_url", "https://api.openai.com/v1") api_key = MODEL_ENDPOINTS.get(model, {}).get("api_key", "") client = OpenAI(base_url=base_url, api_key=api_key) history = [{"role":h["role"], "content":h["content"]} for h in history] messages = [{"role": "system", "content": system_prompt}] + history + [{"role": "user", "content": user_prompt}] - stream = client.chat.completions.create(model=model, messages=messages, stream=True) - response = "" - for chunk in stream: - response += chunk.choices[0].delta.content or '' - yield response, None + + # First: ask the model if it wants to use a tool + decision = client.chat.completions.create(model=model, messages=messages, tools=tools) + + action = decision.choices[0].message + + if action.tool_calls: + for tool_call in action.tool_calls: + name = tool_call.function.name + args = json.loads(tool_call.function.arguments) + + if name == "add_student": + result = yoda_students.add_student(**args) + elif name == "remove_student": + result = yoda_students.remove_student(**args) + elif name == "list_students": + result = yoda_students.list_students() + elif name == "list_by_class": + result = yoda_students.list_by_class() + else: + result = "Unknown tool error." + # Stream response with the tool call + followup = client.chat.completions.create( + model=model, + messages = messages + [ + action, + {"role": "tool", "tool_call_id": tool_call.id, "content": result} + ], + stream=True + ) + response = "" + for chunk in followup: + delta = chunk.choices[0].delta.content or "" + response += delta + yield response, None + else: + # Stream regular response + stream = client.chat.completions.create(model=model, messages=messages, tools=tools, stream=True) + response = "" + for chunk in stream: + response += chunk.choices[0].delta.content or '' + yield response, None audio = voiceover(response) tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".wav") tmp.write(audio) diff --git a/week2/community-contributions/yoda_students.py b/week2/community-contributions/yoda_students.py new file mode 100644 index 0000000..4888647 --- /dev/null +++ b/week2/community-contributions/yoda_students.py @@ -0,0 +1,55 @@ +import json +import os + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +JSON_FILE = os.path.join(BASE_DIR, "students.json") + +def load_students(): + if not os.path.exists(JSON_FILE): + return {} + + with open(JSON_FILE, "r") as f: + return json.load(f) + + +def save_students(students): + with open(JSON_FILE, "w") as f: + json.dump(students, f, indent=2) + + +def get_student_class(name): + students = load_students() + cls = students.get(name) + if cls: + return "f{name} is a Jedi {cls}." + return f"Hmm… Student not found, I see." + + +def add_student(name, jedi_class): + students = load_students() + students[name] = jedi_class + save_students(students) + return f"Added, {name} has been. A Jedi {jedi_class}, they are!" + + +def remove_student(name): + students = load_students() + if name in students: + del students[name] + save_students(students) + return f"Graduated, {name} has. Celebrate, we must." + return f"Vanished? This student does not exist." + +def list_students(): + students = load_students() + grouped = {} + for name, cls in students.items(): + grouped.setdefault(cls, []).append(name) + + result_lines = [] + for cls, names in grouped.items(): + names_str = ", ".join(names) + result_lines.append(f"{cls}: {names_str}") + + return "\n".join(result_lines) + From aa4fcaf51ddf1578236b44bf10bb98e4f5ccca33 Mon Sep 17 00:00:00 2001 From: Eli Waltuch Date: Thu, 30 Oct 2025 21:59:57 +0200 Subject: [PATCH 4/4] removed model choice that doesn't support tools Signed-off-by: Eli Waltuch --- week2/community-contributions/week2-jedi-master.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/week2/community-contributions/week2-jedi-master.py b/week2/community-contributions/week2-jedi-master.py index edfbd04..b9d9af9 100644 --- a/week2/community-contributions/week2-jedi-master.py +++ b/week2/community-contributions/week2-jedi-master.py @@ -11,7 +11,6 @@ import yoda_students MODEL_ENDPOINTS = { "gpt-4.1-mini": {"type": "openai", "base_url": "https://api.openai.com/v1", "api_key": ""}, "claude-haiku-4-5": {"type": "anthropic", "base_url": "https://api.anthropic.com/v1/", "api_key": ""}, - "gemma3n:e2b": {"type": "ollama", "base_url": "http://localhost:11434/v1", "api_key": ""}, # small ollama model that runs on-device "qwen3-vl:235b-cloud": {"type": "ollama", "base_url": "http://localhost:11434/v1", "api_key": ""}, # large ollama model that runs in the cloud } @@ -195,7 +194,6 @@ def main(): choices=[ "gpt-4.1-mini", "claude-haiku-4-5", - "gemma3n:e2b", "qwen3-vl:235b-cloud" ], value="gpt-4.1-mini",