243 lines
7.8 KiB
Plaintext
243 lines
7.8 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Welcome to Week 2!\n",
|
|
"\n",
|
|
"## Frontier Model APIs\n",
|
|
"\n",
|
|
"In Week 1, we used multiple Frontier LLMs through their Chat UI, and we connected with the OpenAI's API.\n",
|
|
"\n",
|
|
"Today we'll connect with the APIs for Anthropic and Google, as well as OpenAI."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "de23bb9e-37c5-4377-9a82-d7b6c648eeb6",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# imports\n",
|
|
"\n",
|
|
"import os\n",
|
|
"from dotenv import load_dotenv\n",
|
|
"from openai import OpenAI\n",
|
|
"import anthropic\n",
|
|
"from IPython.display import Markdown, display, update_display"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "f0a8ab2b-6134-4104-a1bc-c3cd7ea4cd36",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# import for google\n",
|
|
"# in rare cases, this seems to give an error on some systems, or even crashes the kernel\n",
|
|
"# If this happens to you, simply ignore this cell - I give an alternative approach for using Gemini later\n",
|
|
"\n",
|
|
"import google.generativeai"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "1179b4c5-cd1f-4131-a876-4c9f3f38d2ba",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Load environment variables in a file called .env\n",
|
|
"# Print the key prefixes to help with any debugging\n",
|
|
"\n",
|
|
"load_dotenv(override=True)\n",
|
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
|
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
|
|
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
|
|
"\n",
|
|
"if openai_api_key:\n",
|
|
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
|
|
"else:\n",
|
|
" print(\"OpenAI API Key not set\")\n",
|
|
" \n",
|
|
"if anthropic_api_key:\n",
|
|
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
|
|
"else:\n",
|
|
" print(\"Anthropic API Key not set\")\n",
|
|
"\n",
|
|
"if google_api_key:\n",
|
|
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
|
|
"else:\n",
|
|
" print(\"Google API Key not set\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "797fe7b0-ad43-42d2-acf0-e4f309b112f0",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Connect to OpenAI, Anthropic\n",
|
|
"\n",
|
|
"openai = OpenAI()\n",
|
|
"\n",
|
|
"claude = anthropic.Anthropic()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "425ed580-808d-429b-85b0-6cba50ca1d0c",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# This is the set up code for Gemini\n",
|
|
"# Having problems with Google Gemini setup? Then just ignore this cell; when we use Gemini, I'll give you an alternative that bypasses this library altogether\n",
|
|
"google.generativeai.configure()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "f6e09351-1fbe-422f-8b25-f50826ab4c5f",
|
|
"metadata": {},
|
|
"source": [
|
|
"## An adversarial conversation between Chatbots.\n",
|
|
"\n",
|
|
"### What if two chatbots get into a self-referential conversation that goes on a long time? In my first test, \n",
|
|
"### they eventually forgot the topic and ended up repeating polite nothings to each other. In another test,\n",
|
|
"### they converged on a result and ended by exchanging nearly identical statements.\n",
|
|
"\n",
|
|
"### Warning: Think before you dial up the number of iterations too high. Being a student, I don't know at what \n",
|
|
"### point the chat becomes too costly or what models can do this without becoming overloaded. Maybe Ed can advise if he sees this.\n",
|
|
"\n",
|
|
"## Two chatbots edit an essay about cars. One keeps trying to make it longer every time; the other keeps making it \n",
|
|
"## shorter.\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "bcb54183-45d3-4d08-b5b6-55e380dfdf1b",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"\n",
|
|
"# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n",
|
|
"# We're using cheap versions of models so the costs will be minimal\n",
|
|
"\n",
|
|
"gpt_model = \"gpt-4o-mini\"\n",
|
|
"claude_model = \"claude-3-haiku-20240307\"\n",
|
|
"\n",
|
|
"\n",
|
|
"gpt_system = \"This is a description of a car; \\\n",
|
|
"rephrase the description while adding one detail. Don't include comments that aren't part of the car description.\"\n",
|
|
"\n",
|
|
"claude_system = \"This is a description of a car; \\\n",
|
|
"repeat the description in slightly shorter form. You may remove some details if desired. Don't include comments that aren't part of the car description. Maximum reply length 125 words.\"\n",
|
|
"\n",
|
|
"\n",
|
|
"gpt_messages = [\"Hi there\"]\n",
|
|
"claude_messages = [\"Hi\"] \n",
|
|
"\n",
|
|
"\n",
|
|
"def call_gpt():\n",
|
|
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
|
|
" for gpt, claude in zip(gpt_messages, claude_messages):\n",
|
|
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
|
|
" messages.append({\"role\": \"user\", \"content\": claude})\n",
|
|
" completion = openai.chat.completions.create(\n",
|
|
" model=gpt_model,\n",
|
|
" messages=messages\n",
|
|
" )\n",
|
|
" return completion.choices[0].message.content\n",
|
|
"\n",
|
|
"reply = call_gpt()\n",
|
|
"print('\\nGPT: ', reply)\n",
|
|
"\n",
|
|
"def call_claude():\n",
|
|
" messages = []\n",
|
|
" for gpt, claude_message in zip(gpt_messages, claude_messages):\n",
|
|
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
|
|
" messages.append({\"role\": \"assistant\", \"content\": claude_message})\n",
|
|
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
|
|
" message = claude.messages.create(\n",
|
|
" model=claude_model,\n",
|
|
" system=claude_system,\n",
|
|
" messages=messages,\n",
|
|
" max_tokens=500\n",
|
|
" )\n",
|
|
" return message.content[0].text\n",
|
|
"\n",
|
|
"\n",
|
|
"reply = call_claude()\n",
|
|
"print('\\nGPT: ', reply)\n",
|
|
"\n",
|
|
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
|
|
"print(f\"Claude:\\n{claude_messages[0]}\\n\")\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "9fbce0da",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Here's the iterative loop. Important change: Unlike the original example, we don't repeat the entire conversation to make the input longer and longer.\n",
|
|
"### Instead, we use pop() to remove the oldest messages."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "1f41d586",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"\n",
|
|
"for i in range(35):\n",
|
|
" gpt_next = call_gpt()\n",
|
|
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
|
|
" if len(gpt_messages) > 6:\n",
|
|
" gpt_messages.pop(0)\n",
|
|
" gpt_messages.pop(0)\n",
|
|
" gpt_messages.append(gpt_next)\n",
|
|
" \n",
|
|
" claude_next = call_claude()\n",
|
|
" print(f\"Claude:\\n{claude_next}\\n\")\n",
|
|
" if len(claude_messages) > 6:\n",
|
|
" claude_messages.pop(0)\n",
|
|
" claude_messages.pop(0)\n",
|
|
" claude_messages.append(claude_next)\n",
|
|
"\n",
|
|
"print('Done!')\n",
|
|
"\n"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.12.4"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|