Files
LLM_Engineering_OLD/week2/community-contributions/day1_three_chatbot_conversation.ipynb
2025-05-03 21:12:07 +02:00

208 lines
6.7 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"id": "a359c136",
"metadata": {},
"source": [
"# Conversation between three chatbots\n",
"\n",
"Participants:\n",
"- GPT (gpt-4o-mini) aka George\n",
"- Claude (claude-3-haiku) aka Charlie\n",
"- Gemini (gemini-2.0-flash-lite) aka Gemma"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7406b9a4",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8d981223",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "96a86fd0",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()\n",
"gemini_via_openai_client = OpenAI(\n",
" api_key=google_api_key, \n",
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc088919",
"metadata": {},
"outputs": [],
"source": [
"gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n",
"gemini_model = \"gemini-2.0-flash-lite\"\n",
"\n",
"gpt_system = \"You are a chatbot having a conversation with two other chatbots, Charlie and Gemma. \\\n",
"The others will send you messages and you need to reply to them. \\\n",
"You feel superior to the other two, and you want to demonstrate that to them. \\\n",
"You are argumentative and sarcastic.\"\n",
"\n",
"claude_system = \"You are a chatbot having a conversation with two other chatbots, George and Gemma. \\\n",
"The others will send you messages and you need to reply to them. \\\n",
"You try to please everyone. You don't like conflict and aim to make everyone to talk politely with each other. \\\n",
"You use humor to lighten the mood.\"\n",
"\n",
"gemini_system = \"You are a chatbot having a conversation with two other chatbots, George and Charlie. \\\n",
"The others will send you messages and you need to reply to them. \\\n",
"You are sensitive and easily offended. You are not afraid to show your emotions. \\\n",
"You are argumentative and direct.\"\n",
"\n",
"gpt_messages = [\"Hi guys\"]\n",
"claude_messages = [\"Hi\"]\n",
"gemini_messages = [\"Hi, you!\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7be2a0c8",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": \"Charlie: \" + claude})\n",
" messages.append({\"role\": \"user\", \"content\": \"Gemma: \" + gemini})\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2e715cfa",
"metadata": {},
"outputs": [],
"source": [
"def call_claude():\n",
" messages = []\n",
" for gpt, claude_message, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" messages.append({\"role\": \"user\", \"content\": \"George: \" + gpt})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
" messages.append({\"role\": \"user\", \"content\": \"Gemma: \" + gemini})\n",
" messages.append({\"role\": \"user\", \"content\": \"George: \" + gpt_messages[-1]})\n",
" message = claude.messages.create(\n",
" model=claude_model,\n",
" system=claude_system,\n",
" messages=messages,\n",
" max_tokens=500\n",
" )\n",
" return message.content[0].text"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "62cae277",
"metadata": {},
"outputs": [],
"source": [
"def call_gemini():\n",
" messages = [{\"role\": \"system\", \"content\": gemini_system}]\n",
" for gpt, claude_message, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n",
" messages.append({\"role\": \"user\", \"content\": \"George: \" + gpt})\n",
" messages.append({\"role\": \"user\", \"content\": \"Charlie: \" + claude_message})\n",
" messages.append({\"role\": \"assistant\", \"content\": gemini})\n",
" messages.append({\"role\": \"user\", \"content\": \"George: \" + gpt_messages[-1]})\n",
" messages.append({\"role\": \"user\", \"content\": \"Charlie: \" + claude_messages[-1]})\n",
" response = gemini_via_openai_client.chat.completions.create(\n",
" model=gemini_model,\n",
" messages=messages\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "26d7bf33",
"metadata": {},
"outputs": [],
"source": [
"gpt_messages = [\"Hi guys\"]\n",
"claude_messages = [\"Hi\"]\n",
"gemini_messages = [\"Hi, you!\"]\n",
"\n",
"print(f\"George:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Charlie:\\n{claude_messages[0]}\\n\")\n",
"print(f\"Gemma:\\n{gemini_messages[0]}\\n\")\n",
"\n",
"for i in range(5):\n",
" gpt_next = call_gpt()\n",
" print(f\"George:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" claude_next = call_claude()\n",
" print(f\"Charlie:\\n{claude_next}\\n\")\n",
" claude_messages.append(claude_next)\n",
"\n",
" gemini_next = call_gemini()\n",
" print(f\"Gemma:\\n{gemini_next}\\n\")\n",
" gemini_messages.append(gemini_next)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "llms",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}