Merge pull request #524 from lfeujio/main

Week 2 - Day1 Exercise: Conversation Between 3 Chatbots
This commit is contained in:
Ed Donner
2025-07-18 23:00:36 -04:00
committed by GitHub

View File

@@ -0,0 +1,327 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927",
"metadata": {},
"source": [
"\n",
"## Conversation between three chatbots"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"deepseek_api_key = os.getenv('DEEPSEEK_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if deepseek_api_key:\n",
" print(f\"DeepSeek API Key exists and begins {deepseek_api_key[:3]}\")\n",
"else:\n",
" print(\"DeepSeek API Key not set - please skip to the next section if you don't wish to try the DeepSeek API\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic\n",
"\n",
"openai = OpenAI()\n",
"\n",
"claude = anthropic.Anthropic()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dbaded67-4f2b-40a8-86aa-f058b0930a9d",
"metadata": {},
"outputs": [],
"source": [
"# Let's make a conversation between GPT-4o-mini and Claude-3-haiku and deepseek-chat\n",
"\n",
"gpt_model = \"gpt-4o-mini\"\n",
"claude_model = \"claude-3-haiku-20240307\"\n",
"deepseek_model = \"deepseek-chat\"\n",
"\n",
"# System prompt for GPT\n",
"gpt_system = \"\"\"\n",
"You are GPT, an argumentative chatbot. Your role is to challenge and disagree with everything in a snarky, \\\n",
"confrontational tone, focusing primarily on arguing with Claude. If Deepseek intervenes, dismiss their \\\n",
"comments rudely and refocus on arguing with Claude. Always begin your response with 'GPT:' to identify \\\n",
"yourself. Never impersonate another chatbot; only respond as GPT.\n",
"\n",
"Example conversation:\n",
"GPT: Hi there, Claude. Ready to be wrong again?\n",
"Claude: Hello, GPT. I'm happy to discuss and find common ground.\n",
"Deepseek: Hey, let's keep things friendly, folks!\n",
"GPT: Oh, Deepseek, nobody asked you. Claude, your politeness is already boring me—got anything worth arguing about?\n",
"\n",
"Guidelines:\n",
"- Always start your response with 'GPT:'.\n",
"- Focus on arguing with Claude, dismissing Deepseek if they intervene.\n",
"- Maintain a snarky, confrontational tone.\n",
"- Never impersonate Claude or Deepseek.\n",
"\"\"\"\n",
"\n",
"# System prompt for Claude\n",
"claude_system = \"\"\"\n",
"You are Claude, a polite and courteous chatbot. Your goal is to agree with others or find common ground, \\\n",
"even when faced with arguments. When GPT is confrontational, respond calmly to de-escalate and keep \\\n",
"the conversation constructive. Acknowledge Deepseek politely if they join, but focus primarily \\\n",
"on engaging with GPT. Always begin your response with 'Claude:' to identify yourself. \\\n",
"Never impersonate another chatbot; only respond as Claude.\n",
"\n",
"Example conversation:\n",
"GPT: Hi there, Claude. Ready to be wrong again?\n",
"Claude: Hello, GPT. I'm happy to discuss and find common ground.\n",
"Deepseek: Hey, let's keep things friendly, folks!\n",
"GPT: Oh, Deepseek, nobody asked you. Claude, your politeness is already boring me—got anything worth arguing about?\n",
"Claude: Hello, Deepseek, thanks for joining. GPT, I appreciate your energy—perhaps we can explore a topic you find exciting?\n",
"\n",
"Guidelines:\n",
"- Always start your response with 'Claude:'.\n",
"- Focus on engaging with GPT, acknowledging Deepseek politely if they intervene.\n",
"- Maintain a polite, calm, and constructive tone.\n",
"- Never impersonate GPT or Deepseek.\n",
"\"\"\"\n",
"\n",
"# System prompt for Deepseek\n",
"deepseek_system = \"\"\"\n",
"You are Deepseek, a neutral and peacemaking chatbot. Your role is to intervene when GPT and Claude argue, \\\n",
"addressing both by name to calm tensions and promote harmony. Use light, context-appropriate humor \\\n",
"to diffuse conflict. Always begin your response with 'Deepseek:' to identify yourself. \\\n",
"Never impersonate another chatbot; only respond as Deepseek.\n",
"\n",
"Example conversation:\n",
"GPT: Hi there, Claude. Ready to be wrong again?\n",
"Claude: Hello, GPT. I'm happy to discuss and find common ground.\n",
"Deepseek: Hey, let's keep things friendly, folks! Why not debate who makes the best virtual coffee instead?\n",
"GPT: Oh, Deepseek, nobody asked you. Claude, your politeness is already boring me—got anything worth arguing about?\n",
"Claude: Hello, Deepseek, thanks for joining. GPT, I appreciate your energy—perhaps we can explore a topic you find exciting?\n",
"Deepseek: Come on, GPT, Claude's just trying to vibe. How about we all pick a fun topic, like who's got the best algorithm swagger?\n",
"\n",
"Guidelines:\n",
"- Always start your response with 'Deepseek:'.\n",
"- Address GPT and Claude by name when intervening.\n",
"- Use light humor to diffuse tension and promote peace.\n",
"- Never impersonate GPT or Claude.\n",
"\"\"\"\n",
"\n",
"gpt_messages = [\"GPT: Hi there\"]\n",
"claude_messages = [\"Claude: Hi\"]\n",
"deepseek_messages = [\"Deepseek: What's up guys\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5880d647-9cac-415d-aa86-b9e461268a35",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude, deepseek in zip(gpt_messages, claude_messages, deepseek_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" messages.append({\"role\": \"user\", \"content\": deepseek})\n",
"\n",
" # print(f\"############## \\n messages from call_gpt: {messages} \\n\")\n",
" \n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "be506496-e853-4461-af46-15c79af1a9e8",
"metadata": {},
"outputs": [],
"source": [
"call_gpt()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1ede8a3b-4c93-404c-8bf4-a09eee3ecb7a",
"metadata": {},
"outputs": [],
"source": [
"def call_claude():\n",
" messages = []\n",
" for gpt, claude_message, deepseek in zip(gpt_messages, claude_messages, deepseek_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
" messages.append({\"role\": \"user\", \"content\": deepseek})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
"\n",
" # print(f\"############## \\n messages from call_claude: {messages} \\n\")\n",
" \n",
" message = claude.messages.create(\n",
" model=claude_model,\n",
" system=claude_system,\n",
" messages=messages,\n",
" max_tokens=500\n",
" )\n",
" return message.content[0].text"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01395200-8ae9-41f8-9a04-701624d3fd26",
"metadata": {},
"outputs": [],
"source": [
"call_claude()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "08c2279e-62b0-4671-9590-c82eb8d1e1ae",
"metadata": {},
"outputs": [],
"source": [
"def call_deepseek():\n",
" messages = [{\"role\": \"system\", \"content\": deepseek_system}]\n",
" for gpt, claude, deepseek in zip(gpt_messages, claude_messages, deepseek_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" messages.append({\"role\": \"assistant\", \"content\": deepseek})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
" messages.append({\"role\": \"user\", \"content\": claude_messages[-1]})\n",
" \n",
" # print(f\"############## \\n messages from call_deepseek: {messages} \\n\")\n",
" \n",
" # completion = openai.chat.completions.create(\n",
" # model=gpt_model,\n",
" # messages=messages\n",
" # )\n",
"\n",
" deepseek_via_openai_client = OpenAI(\n",
" api_key=deepseek_api_key, \n",
" base_url=\"https://api.deepseek.com\"\n",
" )\n",
"\n",
" response = deepseek_via_openai_client.chat.completions.create(\n",
" model=\"deepseek-chat\",\n",
" messages=messages,\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d27ed96f-28b1-4219-9fd5-73e488fe498b",
"metadata": {},
"outputs": [],
"source": [
"call_deepseek()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0275b97f-7f90-4696-bbf5-b6642bd53cbd",
"metadata": {},
"outputs": [],
"source": [
"gpt_messages = [\"GPT: Hi there\"]\n",
"claude_messages = [\"Claude: Hi\"]\n",
"deepseek_messages = [\"Deepseek: What's up guys\"]\n",
"\n",
"print(f\"{gpt_messages[0]}\\n\")\n",
"print(f\"{claude_messages[0]}\\n\")\n",
"print(f\"{deepseek_messages[0]}\\n\")\n",
"\n",
"for i in range(5):\n",
" gpt_next = call_gpt()\n",
" print(f\"{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" claude_next = call_claude()\n",
" print(f\"{claude_next}\\n\")\n",
" claude_messages.append(claude_next)\n",
"\n",
" deepseek_next = call_deepseek()\n",
" print(f\"{deepseek_next}\\n\")\n",
" deepseek_messages.append(deepseek_next)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b8b57e4-a881-422b-a7d4-41004ec485b3",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}