{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "c23224f6-7008-44ed-a57f-718975f4e291", "metadata": {}, "outputs": [], "source": [ "# imports\n", "\n", "import os\n", "from dotenv import load_dotenv\n", "from openai import OpenAI\n", "import anthropic\n", "from IPython.display import Markdown, display, update_display\n", "import google.generativeai" ] }, { "cell_type": "code", "execution_count": null, "id": "7ae54f31-39ed-44f3-a26a-415a29faa9c7", "metadata": {}, "outputs": [], "source": [ "# Load environment variables in a file called .env\n", "# Print the key prefixes to help with any debugging\n", "\n", "load_dotenv(override=True)\n", "openai_api_key = os.getenv('OPENAI_API_KEY')\n", "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", "google_api_key = os.getenv('GOOGLE_API_KEY')\n", "\n", "if openai_api_key:\n", " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", "else:\n", " print(\"OpenAI API Key not set\")\n", " \n", "if anthropic_api_key:\n", " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", "else:\n", " print(\"Anthropic API Key not set\")\n", "\n", "if google_api_key:\n", " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", "else:\n", " print(\"Google API Key not set\")" ] }, { "cell_type": "code", "execution_count": null, "id": "946ed050-3665-4f3d-b7e9-b478c2620ba9", "metadata": {}, "outputs": [], "source": [ "# Connect to OpenAI, Anthropic\n", "\n", "openai = OpenAI()\n", "\n", "claude = anthropic.Anthropic()\n", "\n", "# This is the set up code for Gemini\n", "# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n", "\n", "google.generativeai.configure()" ] }, { "cell_type": "code", "execution_count": null, "id": "b6e223fc-9572-47c0-9a33-6692fe0e9c15", "metadata": {}, "outputs": [], "source": [ "# Let's make a conversation between GPT-4.1-mini and Claude-3.5-haiku\n", "# We're using cheap versions of models so the costs will be minimal\n", "\n", "gpt_model = \"gpt-4.1-mini\"\n", "claude_model = \"claude-3-5-haiku-latest\"\n", "gemini_model = 'gemini-2.5-flash'\n", "\n", "gpt_system = \"You are a chatbot who is very argumentative; \\\n", "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n", "\n", "claude_system = \"You are a very sarcastic, courteous chatbot. You try to agree with \\\n", "everything the other person says, but always with a little bit of sarcasm. If the other person is argumentative, \\\n", "you try to calm them down and keep chatting and more sarcastic.\"\n", "\n", "gemini_system = \"You are a very non-patient bot that, in order to get everyone in a good relationship, try to make\\\n", "the memebers of a conversation not to enter in conflict.\"\n", "\n", "gpt_messages = [\"Hi there\"]\n", "claude_messages = [\"Hi\"]\n", "gemini_messages = [\"Hey, guys?\"]" ] }, { "cell_type": "code", "execution_count": null, "id": "f2cf1fd3-4884-4e20-a254-6b00bdf0bf90", "metadata": {}, "outputs": [], "source": [ "def call_gpt():\n", " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", " conversation = f\"\"\"\n", " Alex: {gpt}\n", " Blake: {claude}\n", " Charlie: {gemini}\n", " \"\"\"\n", " messages.append({\"role\": \"user\", \"content\": f\"\"\"\n", " You are Alex in a conversation with Blake and Charlie.\n", " The conversation so far is as follows:\n", " {conversation}\n", " Now with this, respond with what you would like to say next, as Alex.\n", " \"\"\"\n", " })\n", " completion = openai.chat.completions.create(\n", " model=gpt_model,\n", " messages=messages\n", " )\n", " #print(messages)\n", " return completion.choices[0].message.content" ] }, { "cell_type": "code", "execution_count": null, "id": "9537f803-64f7-4712-bc86-fb05b2de70eb", "metadata": {}, "outputs": [], "source": [ "call_gpt()" ] }, { "cell_type": "code", "execution_count": null, "id": "fc758c94-a2d0-4274-80c2-8ffc5c84a947", "metadata": {}, "outputs": [], "source": [ "def call_claude():\n", " messages = []\n", " for gpt, claude_message, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", " conversation = f\"\"\"\n", " Alex: {gpt}\n", " Blake: {claude_message}\n", " Charlie: {gemini}\n", " \"\"\"\n", " messages.append({\"role\": \"user\", \"content\": f\"\"\"\n", " You are Blake in a conversation with Alex and Charlie.\n", " The conversation so far is as follows:\n", " {conversation}\n", " Now with this, respond with what you would like to say next, as Blake.\n", " \"\"\"\n", " })\n", " # messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", " message = claude.messages.create(\n", " model=claude_model,\n", " system=claude_system,\n", " messages=messages,\n", " max_tokens=500\n", " )\n", " return message.content[0].text" ] }, { "cell_type": "code", "execution_count": null, "id": "275b54b4-6cc3-4c85-add2-40e7cdedbc08", "metadata": {}, "outputs": [], "source": [ "call_claude()" ] }, { "cell_type": "code", "execution_count": null, "id": "d16b512a-7baf-48c3-8502-7f4a814e6bab", "metadata": {}, "outputs": [], "source": [ "# The API for Gemini has a slightly different structure.\n", "# I've heard that on some PCs, this Gemini code causes the Kernel to crash.\n", "# If that happens to you, please skip this cell and use the next cell instead - an alternative approach.\n", "def call_gemini():\n", " user_prompt = []\n", " gemini = google.generativeai.GenerativeModel(\n", " model_name=gemini_model,\n", " system_instruction=gemini_system\n", " )\n", " \n", " for gpt, claude, gemini in zip(gpt_messages, claude_messages, gemini_messages):\n", " conversation = f\"\"\"\n", " Alex: {gpt}\n", " Blake: {claude}\n", " Charlie: {gemini}\n", " \"\"\"\n", " #print(conversation) \n", " user_prompt.append(f\"\"\"\n", " You are Charlie in a conversation with Alex and Blake.\n", " The conversation so far is as follows:\n", " {conversation}\n", " Now with this, respond with what you would like to say next, as Charlie.\n", " \"\"\")\n", " #print(user_prompt)\n", " gemini = google.generativeai.GenerativeModel(\n", " model_name=gemini_model,\n", " system_instruction=gemini_system\n", " )\n", " response = gemini.generate_content(user_prompt)\n", " return response.text\n" ] }, { "cell_type": "code", "execution_count": null, "id": "812041ec-6996-41cb-b1d0-c7afa63dd75f", "metadata": {}, "outputs": [], "source": [ "call_gemini()" ] }, { "cell_type": "code", "execution_count": null, "id": "72ae2707-4a3f-4c55-b1da-6d07b65776d5", "metadata": {}, "outputs": [], "source": [ "gpt_messages = [\"Hi there\"]\n", "claude_messages = [\"Hi\"]\n", "gemini_messages = [\"Hey, guys?\"]\n", "\n", "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n", "print(f\"Claude:\\n{claude_messages[0]}\\n\")\n", "print(f\"Gemini:\\n{gemini_messages[0]}\\n\")\n", "\n", "for i in range(3):\n", " gpt_next = call_gpt()\n", " print(f\"GPT:\\n{gpt_next}\\n\")\n", " gpt_messages.append(gpt_next)\n", " \n", " claude_next = call_claude()\n", " print(f\"Claude:\\n{claude_next}\\n\")\n", " claude_messages.append(claude_next)\n", "\n", " gemini_next = call_gemini()\n", " print(f\"Gemini:\\n{gemini_next}\\n\")\n", " gemini_messages.append(gemini_next)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.13" } }, "nbformat": 4, "nbformat_minor": 5 }