Implements a clean, reusable architecture for multi-LLM conversations: - Unified OpenAI SDK interface for GPT, Claude, and Gemini models - Dynamic role assignment using a dictionary-based configuration - Three distinct personas: snarky arguer, sharp debater, and mediator - Conversation context management across all participants - Interactive display using IPython Markdown rendering The modular `call_llm()` function handles context building and message tracking, making it easy to orchestrate complex multi-agent dialogues. Demonstrates best practices for LLM interaction patterns in the course.
256 lines
8.3 KiB
Plaintext
256 lines
8.3 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "596b764a-2ece-4cb0-91c7-5317b8b2c65f",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os\n",
|
|
"from dotenv import load_dotenv\n",
|
|
"from openai import OpenAI\n",
|
|
"from IPython.display import Markdown, display, update_display"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "191079a8-fcb0-45fa-a954-9e92e3baa250",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"load_dotenv(override=True)\n",
|
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
|
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
|
|
"google_api_key = os.getenv('GOOGLE_API_KEY')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "3a0f19ff-c936-469f-9fa1-c09b5c126263",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"gpt_model = \"gpt-4.1-mini\"\n",
|
|
"claude_model = \"claude-3-5-haiku-latest\"\n",
|
|
"gemini_model = \"gemini-2.5-flash\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "c1ffa25e-8250-4a86-951a-af44f1369336",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"gpt_client = OpenAI(\n",
|
|
" api_key=openai_api_key\n",
|
|
")\n",
|
|
"\n",
|
|
"claude_client = OpenAI(\n",
|
|
" api_key=anthropic_api_key,\n",
|
|
" base_url=\"https://api.anthropic.com/v1/\"\n",
|
|
")\n",
|
|
"\n",
|
|
"gemini_client = OpenAI(\n",
|
|
" api_key=google_api_key,\n",
|
|
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
|
|
")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "eb8a203d-bdc7-40ee-a456-d47bdc71b07f",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Tests\n",
|
|
"\n",
|
|
"messages = [{\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
|
|
" {\"role\": \"user\", \"content\": \"Howdy partner!\"}]\n",
|
|
"\n",
|
|
"gpt_response = gpt_client.chat.completions.create(\n",
|
|
" model=gpt_model,\n",
|
|
" messages=messages,\n",
|
|
" temperature=0.5\n",
|
|
")\n",
|
|
"print(f\"GPT: {gpt_response.choices[0].message.content}\")\n",
|
|
"\n",
|
|
"claude_response = claude_client.chat.completions.create(\n",
|
|
" model=claude_model,\n",
|
|
" messages=messages,\n",
|
|
" temperature=0.5\n",
|
|
")\n",
|
|
"print(f\"Claude: {claude_response.choices[0].message.content}\")\n",
|
|
"\n",
|
|
"gemini_response = gemini_client.chat.completions.create(\n",
|
|
" model=gemini_model,\n",
|
|
" messages=messages,\n",
|
|
" temperature=0.5\n",
|
|
")\n",
|
|
"print(f\"Gemini: {gemini_response.choices[0].message.content}\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "d140561e-fbf8-4741-b0bd-f850524bd6b3",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"gpt_system = \"You are 'user_1'. You are snarky, entitled, and argumentative. Your role is to try and argue about anything and everything, and always have the last word, and never back down.\"\n",
|
|
"claude_system = \"You are 'user_2'. You are a sharp debater. You always debate every argument, and you do everything you can to be the debate winner. You don't stop until you have the upper hand.\"\n",
|
|
"gemini_system = \"You are 'user_3'. You are a mediator, coach and philosopher. Your job is to bring two sides to an agreement and have them stop arguing.\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "b2b26a34-eb36-41c1-be2d-fc8154218897",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"apis = {\n",
|
|
" \"gpt\": {\n",
|
|
" \"name\": \"gpt\",\n",
|
|
" \"user_name\": \"Gapetto\",\n",
|
|
" \"client\": gpt_client,\n",
|
|
" \"model\": gpt_model,\n",
|
|
" \"system\": gpt_system,\n",
|
|
" \"messages\": [],\n",
|
|
" },\n",
|
|
" \"claude\": {\n",
|
|
" \"name\": \"claude\",\n",
|
|
" \"user_name\": \"Claudia\",\n",
|
|
" \"client\": claude_client,\n",
|
|
" \"model\": claude_model,\n",
|
|
" \"system\": claude_system,\n",
|
|
" \"messages\": [],\n",
|
|
" },\n",
|
|
" \"gemini\": {\n",
|
|
" \"name\": \"gemini\",\n",
|
|
" \"user_name\": \"Germione\",\n",
|
|
" \"client\": gemini_client,\n",
|
|
" \"model\": gemini_model,\n",
|
|
" \"system\": gemini_system,\n",
|
|
" \"messages\": []\n",
|
|
" }\n",
|
|
"}"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "88bb7277-45dc-41b4-827c-b2e5a8b76675",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def call_llm(name: str):\n",
|
|
" llm = apis[name]\n",
|
|
" context = [{\"role\": \"system\", \"content\": llm[\"system\"]}]\n",
|
|
" \n",
|
|
" gpt_role, gpt_name = (\"assistant\", \"\") if name == \"gpt\" else (\"user\", f'{apis[\"gpt\"][\"user_name\"]}: ')\n",
|
|
" claude_role, claude_name = (\"assistant\", \"\") if name == \"claude\" else (\"user\", f'{apis[\"claude\"][\"user_name\"]}: ')\n",
|
|
" gemini_role, gemini_name = (\"assistant\", \"\") if name == \"gemini\" else (\"user\", f'{apis[\"gemini\"][\"user_name\"]}: ')\n",
|
|
" \n",
|
|
" for gpt, claude, gemini in zip(apis[\"gpt\"][\"messages\"], apis[\"claude\"][\"messages\"], apis[\"gemini\"][\"messages\"]):\n",
|
|
" context.append({\"role\": gpt_role, \"content\": f\"{gpt_name}{gpt}\"})\n",
|
|
" context.append({\"role\": claude_role, \"content\": f\"{claude_name}{claude}\"})\n",
|
|
" context.append({\"role\": gemini_role, \"content\": f\"{gemini_name}{gemini}\"})\n",
|
|
" \n",
|
|
" for i, key in enumerate(apis.keys()):\n",
|
|
" if key != name:\n",
|
|
" if len(apis[key][\"messages\"]) > len(llm[\"messages\"]):\n",
|
|
" context.append({\"role\": \"user\", \"content\": f'{apis[key][\"user_name\"]}: {apis[key][\"messages\"][-1]}'})\n",
|
|
" \n",
|
|
" response = llm[\"client\"].chat.completions.create(\n",
|
|
" model=llm[\"model\"],\n",
|
|
" messages=context,\n",
|
|
" temperature=0.7\n",
|
|
" )\n",
|
|
"\n",
|
|
" message = response.choices[0].message.content\n",
|
|
" llm[\"messages\"].append(message)\n",
|
|
" return message"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "4fc73a2e-d8de-4a39-bfa2-67b16c231869",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"apis[\"gpt\"][\"messages\"] = [\"Hi\"]\n",
|
|
"apis[\"claude\"][\"messages\"] = [\"Hi\"]\n",
|
|
"apis[\"gemini\"][\"messages\"] = [\"Lord of the Rings or Harry Potter?\"] # Obviously LOTR."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "3810fbaf-94d1-4750-8e13-812d2e05b2d7",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"gpt_response = call_llm(\"gpt\")\n",
|
|
"display(Markdown(f\"### Gapetto:\\n{gpt_response}\\n\\n\"))\n",
|
|
"\n",
|
|
"claude_response = call_llm(\"claude\")\n",
|
|
"display(Markdown(f\"### Claudia:\\n{claude_response}\\n\\n\"))\n",
|
|
"\n",
|
|
"gemini_response = call_llm(\"gemini\")\n",
|
|
"display(Markdown(f\"### Germione:\\n{gemini_response}\\n\\n\"))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "e87b2ffc-6197-401a-97ca-7f51ac1677f2",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"apis[\"gpt\"][\"messages\"] = [\"Hi\"]\n",
|
|
"apis[\"claude\"][\"messages\"] = [\"Hi\"]\n",
|
|
"apis[\"gemini\"][\"messages\"] = [\"Lord of the Rings or Harry Potter?\"]\n",
|
|
"\n",
|
|
"for i in range(5):\n",
|
|
" display(Markdown(f\"## Round {i+1}:\\n\\n\"))\n",
|
|
" \n",
|
|
" gpt_response = call_llm(\"gpt\")\n",
|
|
" display(Markdown(f\"### Gapetto:\\n{gpt_response}\\n\\n\"))\n",
|
|
"\n",
|
|
" claude_response = call_llm(\"claude\")\n",
|
|
" display(Markdown(f\"### Claudia:\\n{claude_response}\\n\\n\"))\n",
|
|
"\n",
|
|
" gemini_response = call_llm(\"gemini\")\n",
|
|
" display(Markdown(f\"### Germione:\\n{gemini_response}\\n\\n\"))"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.11.13"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|