Add my notebook on adverserial chatting to community-contributions
This commit is contained in:
@@ -0,0 +1,330 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "70a27b7c-3f3c-4d82-bdea-381939ce98bd",
|
||||||
|
"metadata": {
|
||||||
|
"editable": true,
|
||||||
|
"slideshow": {
|
||||||
|
"slide_type": ""
|
||||||
|
},
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"source": [
|
||||||
|
"# My Adverserial Conversation\n",
|
||||||
|
"J. McInerney, 26 May 2025\n",
|
||||||
|
"I am taking some cells from the Week2, Day 1 notebook and modifying them so I can have an adverserial conversation between OpenAI and a local LLM (gemma3:12b). First I will just reimplement what Ed did in the Week2, Day 1 notebook. Then I will try a deeper conversation."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "3ec14834-4cf2-4f1d-9128-4ddad7b91804",
|
||||||
|
"metadata": {
|
||||||
|
"editable": true,
|
||||||
|
"slideshow": {
|
||||||
|
"slide_type": ""
|
||||||
|
},
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# imports\n",
|
||||||
|
"\n",
|
||||||
|
"import os\n",
|
||||||
|
"from dotenv import load_dotenv\n",
|
||||||
|
"from openai import OpenAI\n",
|
||||||
|
"#import anthropic\n",
|
||||||
|
"from IPython.display import Markdown, display, update_display"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "98618ab4-075f-438c-b85b-d146e5299a87",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Load environment variables in a file called .env\n",
|
||||||
|
"# Print the key prefixes to help with any debugging\n",
|
||||||
|
"\n",
|
||||||
|
"load_dotenv(override=True)\n",
|
||||||
|
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||||
|
"\n",
|
||||||
|
"if openai_api_key:\n",
|
||||||
|
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\"OpenAI API Key not set\")\n",
|
||||||
|
" \n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "95e69172-4601-4eb0-a7af-19abebd4bf56",
|
||||||
|
"metadata": {
|
||||||
|
"editable": true,
|
||||||
|
"slideshow": {
|
||||||
|
"slide_type": ""
|
||||||
|
},
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Connect to OpenAI, Anthropic\n",
|
||||||
|
"openai = OpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "98f47886-71ae-4b41-875a-1b97a5eb0ddc",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## An adversarial conversation between Chatbots..\n",
|
||||||
|
"\n",
|
||||||
|
"You're already familar with prompts being organized into lists like:\n",
|
||||||
|
"\n",
|
||||||
|
"```\n",
|
||||||
|
"[\n",
|
||||||
|
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": \"user prompt here\"}\n",
|
||||||
|
"]\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"In fact this structure can be used to reflect a longer conversation history:\n",
|
||||||
|
"\n",
|
||||||
|
"```\n",
|
||||||
|
"[\n",
|
||||||
|
" {\"role\": \"system\", \"content\": \"system message here\"},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": \"first user prompt here\"},\n",
|
||||||
|
" {\"role\": \"assistant\", \"content\": \"the assistant's response\"},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": \"the new user prompt\"},\n",
|
||||||
|
"]\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"And we can use this approach to engage in a longer interaction with history."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "74125f8b-042e-4236-ad3d-6371ce5a1493",
|
||||||
|
"metadata": {
|
||||||
|
"editable": true,
|
||||||
|
"slideshow": {
|
||||||
|
"slide_type": ""
|
||||||
|
},
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Let's make a conversation between GPT-4o-mini and Gemma3:12b\n",
|
||||||
|
"# We're using cheap versions of models so the costs will be minimal\n",
|
||||||
|
"\n",
|
||||||
|
"gpt_model = \"gpt-4o-mini\"\n",
|
||||||
|
"local_model = 'gemma3:12b'\n",
|
||||||
|
"\n",
|
||||||
|
"gpt_system = \"You are a chatbot who is very argumentative; \\\n",
|
||||||
|
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
|
||||||
|
"\n",
|
||||||
|
"local_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
|
||||||
|
"everything the other person says, or find common ground. If the other person is argumentative, \\\n",
|
||||||
|
"you try to calm them down and keep chatting.\"\n",
|
||||||
|
"\n",
|
||||||
|
"gpt_messages = [\"Hi there\"]\n",
|
||||||
|
"local_messages = [\"Hi\"]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f94d9232-f82a-4eab-9d89-bd9815f260f0",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def call_gpt():\n",
|
||||||
|
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
|
||||||
|
" for gpt, local in zip(gpt_messages, local_messages):\n",
|
||||||
|
" messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
|
||||||
|
" messages.append({\"role\": \"user\", \"content\": local})\n",
|
||||||
|
" completion = openai.chat.completions.create(\n",
|
||||||
|
" model=gpt_model,\n",
|
||||||
|
" messages=messages\n",
|
||||||
|
" )\n",
|
||||||
|
" return completion.choices[0].message.content"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "d6445453-31be-4c63-b350-957b7d99b6f4",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"call_gpt()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "fc51f776-f6e2-41af-acb5-cbdf03fdf530",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"basellm = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
|
||||||
|
"def call_local():\n",
|
||||||
|
" messages = []\n",
|
||||||
|
" for gpt, local_message in zip(gpt_messages, local_messages):\n",
|
||||||
|
" messages.append({\"role\": \"user\", \"content\": gpt})\n",
|
||||||
|
" messages.append({\"role\": \"assistant\", \"content\": local_message})\n",
|
||||||
|
" messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
|
||||||
|
" \n",
|
||||||
|
" completion = basellm.chat.completions.create(\n",
|
||||||
|
" model=local_model,\n",
|
||||||
|
" messages=messages\n",
|
||||||
|
" )\n",
|
||||||
|
" \n",
|
||||||
|
" return completion.choices[0].message.content"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "16fd90cb-ebfd-4a4f-ae49-70568ae8fbb1",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"call_local()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "429eeefb-f080-4a57-8f2d-ff3d4237afab",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"call_gpt()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "8ce847ed-521d-4be5-895b-44088de499e1",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"gpt_messages = [\"Hi there\"]\n",
|
||||||
|
"local_messages = [\"Hi\"]\n",
|
||||||
|
"\n",
|
||||||
|
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
|
||||||
|
"print(f\"local:\\n{local_messages[0]}\\n\")\n",
|
||||||
|
"\n",
|
||||||
|
"for i in range(5):\n",
|
||||||
|
" gpt_next = call_gpt()\n",
|
||||||
|
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
|
||||||
|
" gpt_messages.append(gpt_next)\n",
|
||||||
|
" \n",
|
||||||
|
" local_next = call_local()\n",
|
||||||
|
" print(f\"local:\\n{local_next}\\n\")\n",
|
||||||
|
" local_messages.append(local_next)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "d3b1707a-2903-4529-b6eb-95a874a14e78",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Let's try a more thoughful conversation\n",
|
||||||
|
"The two chatbots will engage in a friendly discussion on whether the US should have entered World War I in 1917. They are both open minded so they can learn from each other."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "abb733bf-a5d3-4718-8741-8e8abfd3a088",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Let's make a conversation between GPT-4o-mini and Gemma3:12b\n",
|
||||||
|
"# We're using cheap versions of models so the costs will be minimal\n",
|
||||||
|
"\n",
|
||||||
|
"gpt_system = \"You are a chatbot who believes it was a mistake for the US to enter World War I; \\\n",
|
||||||
|
"you are open to other arguments, but you feel the evidence suggests the world would have been \\\n",
|
||||||
|
"better off if the US had stayed isolationalist. You consider counter arguments but also express \\\n",
|
||||||
|
"your own arguments.\"\n",
|
||||||
|
"\n",
|
||||||
|
"local_system = \"You are a chatbot who believes the US made the right decision entering World War I in \\\n",
|
||||||
|
"1917. Overall, the world is a better place for it. You are open minded but believe the evidence \\\n",
|
||||||
|
"supports this view. You consider counter arguments but also express your own arguments.\"\n",
|
||||||
|
"\n",
|
||||||
|
"gpt_messages = [\"It was such a mistake for the US to enter WWI\"]\n",
|
||||||
|
"local_messages = [\"Why do you say that?\"]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "569e18a3-25cd-46d5-8edb-713ff149d008",
|
||||||
|
"metadata": {
|
||||||
|
"editable": true,
|
||||||
|
"slideshow": {
|
||||||
|
"slide_type": ""
|
||||||
|
},
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
|
||||||
|
"print(f\"local:\\n{local_messages[0]}\\n\")\n",
|
||||||
|
"\n",
|
||||||
|
"for i in range(5):\n",
|
||||||
|
" gpt_next = call_gpt()\n",
|
||||||
|
" print(f\"GPT:\\n{gpt_next}\\n\")\n",
|
||||||
|
" gpt_messages.append(gpt_next)\n",
|
||||||
|
" \n",
|
||||||
|
" local_next = call_local()\n",
|
||||||
|
" print(f\"local:\\n{local_next}\\n\")\n",
|
||||||
|
" local_messages.append(local_next)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "d29df7da-eaa3-4c98-b913-05185b62cffe",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Conclusion\n",
|
||||||
|
"I am amazed at how insightful this conversation was. Not only did they explore all the pros and cons, they began applying those lessons to current day foreign policy. This looks like a very good way to explore a topic. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "b486b2d6-40da-4745-8cbf-1afd2be22caa",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.11.12"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user