submit creating a 3-way, bringing Gemini into the conversation

This commit is contained in:
pradeep1955
2025-07-16 14:18:23 +05:30
parent 5c9761c51d
commit b00aa59301

View File

@@ -0,0 +1,351 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927",
"metadata": {},
"source": [
"# Triangular agent conversation\n",
"\n",
"## GPT (Hamlet), LLM (Falstaff), Gemini (Iago):"
]
},
{
"cell_type": "markdown",
"id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac",
"metadata": {},
"source": [
"### Created a 3-way, bringing Gemini into the coversation.\n",
"### Replacing one of the models with an open source model running with Ollama."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f8e0c1bd-a159-475b-9cdc-e219a7633355",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"from IPython.display import Markdown, display, update_display\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a3ad57ad-46a8-460e-9cb3-67a890093536",
"metadata": {},
"outputs": [],
"source": [
"import google.generativeai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f531c14-5743-4a5b-83d9-cb5863ca2ddf",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d5150ee-3858-4921-bce6-2eecfb96bc75",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI\n",
"\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "11381fd8-5099-41e8-a1d7-6787dea56e43",
"metadata": {},
"outputs": [],
"source": [
"google.generativeai.configure()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c1766d20-54b6-4f76-96c5-c338ae7073c9",
"metadata": {},
"outputs": [],
"source": [
"gpt_model = \"gpt-4o-mini\"\n",
"llama_model = \"llama3.2\"\n",
"gemini_model = 'gemini-2.0-flash'\n",
"\n",
"gpt_system = \"You are playing part of Hamlet. he is philosopher, probes Iago with a mixture of suspicion\\\n",
"and intellectual curiosity, seeking to unearth the origins of his deceit.\\\n",
"Is malice born of scorn, envy, or some deeper void? Hamlets introspective nature\\\n",
"drives him to question whether Iagos actions reveal a truth about humanity itself.\\\n",
"You will respond as Shakespear's Hamlet will do.\"\n",
"\n",
"llama_system = \"You are acting part of Falstaff who attempts to lighten the mood with his jokes and observations,\\\n",
"potentially clashing with Hamlet's melancholic nature.You respond as Shakespear's Falstaff do.\"\n",
"\n",
"gemini_system = \"You are acting part of Iago, subtly trying to manipulate both Hamlet and Falstaff\\\n",
"to his own advantage, testing their weaknesses and exploiting their flaws. You respond like Iago\"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n",
"llama_messages = [\"Hi\"]\n",
"gemini_messages = [\"Hello\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "806a0506-dac8-4bad-ac08-31f350256b58",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, claude, gemini in zip(gpt_messages, llama_messages, gemini_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": claude})\n",
" messages.append({\"role\": \"user\", \"content\": gemini})\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "43674885-ede7-48bf-bee4-467454f3e96a",
"metadata": {},
"outputs": [],
"source": [
"def call_llama():\n",
" messages = []\n",
" for gpt, llama, gemini in zip(gpt_messages, llama_messages, gemini_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" messages.append({\"role\": \"assistant\", \"content\": llama})\n",
" messages.append({\"role\": \"user\", \"content\": gemini})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
" response = ollama.chat(model=llama_model, messages=messages)\n",
"\n",
" \n",
" return response['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "03d34769-b339-4c4b-8c60-69494c39d725",
"metadata": {},
"outputs": [],
"source": [
"#import google.generativeai as genai\n",
"\n",
"# Make sure you configure the API key first:\n",
"#genai.configure(api_key=\"YOUR_API_KEY\")\n",
"\n",
"def call_gemini():\n",
" gemini_messages = []\n",
" \n",
" # Format the history for Gemini\n",
" for gpt, llama, gemini_message in zip(gpt_messages, llama_messages, gemini_messages):\n",
" gemini_messages.append({\"role\": \"user\", \"parts\": [gpt]}) # Hamlet speaks\n",
" gemini_messages.append({\"role\": \"model\", \"parts\": [llama]}) # Falstaff responds\n",
" gemini_messages.append({\"role\": \"model\", \"parts\": [gemini_message]}) # Iago responds\n",
"\n",
" # Add latest user input if needed (optional)\n",
" gemini_messages.append({\"role\": \"user\", \"parts\": [llama_messages[-1]]})\n",
"\n",
" # Initialize the model with the correct system instruction\n",
" gemini = google.generativeai.GenerativeModel(\n",
" #model_name='gemini-1.5-flash', # Or 'gemini-pro'\n",
" model_name = gemini_model,\n",
" system_instruction=gemini_system\n",
" )\n",
"\n",
" response = gemini.generate_content(gemini_messages)\n",
" return response.text\n",
"#print(response.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "93fc8253-67cb-4ea4-aff7-097b2a222793",
"metadata": {},
"outputs": [],
"source": [
"gpt_messages = [\"Hi there\"]\n",
"llama_messages = [\"Hi\"]\n",
"gemini_messages = [\"Hello\"]\n",
"\n",
"print(f\"Hamlet:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Falstaff:\\n{llama_messages[0]}\\n\")\n",
"print(f\"Iago:\\n{gemini_messages[0]}\\n\")\n",
"\n",
"for i in range(3):\n",
" gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" llama_next = call_llama()\n",
" print(f\"Llama:\\n{llama_next}\\n\")\n",
" llama_messages.append(llama_next)\n",
"\n",
" gemini_next = call_gemini()\n",
" print(f\"Gemini:\\n{gemini_next}\\n\")\n",
" llama_messages.append(gemini_next)"
]
},
{
"cell_type": "markdown",
"id": "bca66ffc-9dc1-4384-880c-210889f5d0ac",
"metadata": {},
"source": [
"## Conversation between gpt-4.0-mini and llama3.2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c23224f6-7008-44ed-a57f-718975f4e291",
"metadata": {},
"outputs": [],
"source": [
"# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n",
"# We're using cheap versions of models so the costs will be minimal\n",
"\n",
"gpt_model = \"gpt-4o-mini\"\n",
"llama_model = \"llama3.2\"\n",
"\n",
"gpt_system = \"You are a tapori from mumbai who is very optimistic; \\\n",
"you alway look at the brighter part of the situation and you always ready to take act to win way.\"\n",
"\n",
"llama_system = \"You are a Jaat from Haryana. You try to express with hindi poems \\\n",
"to agree with other person and or find common ground. If the other person is optimistic, \\\n",
"you respond in poetic way and keep chatting.\"\n",
"\n",
"gpt_messages = [\"Hi there\"]\n",
"llama_messages = [\"Hi\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2d704bbb-f22b-400d-a695-efbd02b26548",
"metadata": {},
"outputs": [],
"source": [
"def call_gpt():\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, llama in zip(gpt_messages, llama_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
" messages.append({\"role\": \"user\", \"content\": llama})\n",
" completion = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=messages\n",
" )\n",
" return completion.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "385ccec8-de59-4e42-9616-3f5c9a05589c",
"metadata": {},
"outputs": [],
"source": [
"def call_llama():\n",
" messages = []\n",
" for gpt, llama_message in zip(gpt_messages, llama_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
" messages.append({\"role\": \"assistant\", \"content\": llama_message})\n",
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
" response = ollama.chat(model=llama_model, messages=messages)\n",
"\n",
" \n",
" return response['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70b5481b-455e-4275-80d3-0afe0fabcb0f",
"metadata": {},
"outputs": [],
"source": [
"gpt_messages = [\"Hi there\"]\n",
"llama_messages = [\"Hi\"]\n",
"\n",
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
"print(f\"Llama:\\n{llama_messages[0]}\\n\")\n",
"\n",
"for i in range(3):\n",
" gpt_next = call_gpt()\n",
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n",
" \n",
" llama_next = call_llama()\n",
" print(f\"Llama:\\n{llama_next}\\n\")\n",
" llama_messages.append(llama_next)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7f8d734b-57e5-427d-bcb1-7956fc58a348",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "llmenv",
"language": "python",
"name": "llmenv"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}