Add week 2, day 1, N way conversation solution
Implements SNL Coffee Talk character conversation using multiple models. Supports OpenRouter, OpenAI, and Ollama with automatic fallback. Characters and topics are dynamically generated by the base model.
This commit is contained in:
@@ -0,0 +1,283 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "88f67391",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### N Way Conversation - Coffee Talk \n",
|
||||
"\n",
|
||||
"This example simulates an N-way conversation between the characters of the Saturday Night Live skit Coffee Talk.\n",
|
||||
"\n",
|
||||
"The character information is retrieved from a model and each character is handled by its own model selected at random from a list of available models. Only the number of characters, number of rounds, and available models are configured.\n",
|
||||
"\n",
|
||||
"The example can use OpenRouter, OpenAI, or Ollama, in that order. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a1eeb029",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setup ...\n",
|
||||
"\n",
|
||||
"# The number of characters (models) conversing\n",
|
||||
"NBR_CHARACTERS=4\n",
|
||||
"\n",
|
||||
"# The number of rounds of conversation\n",
|
||||
"NBR_ROUNDS=4\n",
|
||||
"\n",
|
||||
"# Available OpenRouter models. The base model is used to select characters and the topic. Other models are used for the conversation\n",
|
||||
"OPENROUTER_MODELS=\"openai/gpt-4.1-mini, anthropic/claude-3.5-haiku, google/gemini-2.5-flash\"\n",
|
||||
"OPENROUTER_BASE=\"openai/gpt-5\"\n",
|
||||
"\n",
|
||||
"# Available OpenAI models\n",
|
||||
"OPENAI_MODELS=\"gpt-4.1, gpt-4.1-mini, gpt-5-nano\"\n",
|
||||
"OPENAI_BASE=\"gpt-5\"\n",
|
||||
"\n",
|
||||
"# Available Ollama models. Note that these must be pre-fetched or errors will occur (and won't be handled)\n",
|
||||
"OLLAMA_MODELS=\"gpt-oss, gemma3, llama3.2\"\n",
|
||||
"OLLAMA_BASE=\"gpt-oss\"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "68022fbc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from IPython.display import Markdown, display, update_display\n",
|
||||
"from openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "73460c5e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setup the LLM client and models. OpenRouter has priority if available, then OpenAI, then Ollama.\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"openrouter_api_key = os.getenv('OPENROUTER_API_KEY')\n",
|
||||
"\n",
|
||||
"if openrouter_api_key:\n",
|
||||
" print(f\"OpenRouter API Key exists and begins {openrouter_api_key[:3]}, using OpenRouter.\")\n",
|
||||
" available_models=OPENROUTER_MODELS\n",
|
||||
" base_model=OPENROUTER_BASE\n",
|
||||
" client = OpenAI(base_url=\"https://openrouter.ai/api/v1\", api_key=openrouter_api_key)\n",
|
||||
"elif openai_api_key:\n",
|
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}, using OpenAI.\")\n",
|
||||
" available_models=OPENAI_MODELS\n",
|
||||
" base_model=OPENAI_BASE\n",
|
||||
" client = OpenAI()\n",
|
||||
"else:\n",
|
||||
" print(\"OpenAI API Key not set, using Ollama.\")\n",
|
||||
" available_models=OLLAMA_MODELS\n",
|
||||
" base_model=OLLAMA_BASE\n",
|
||||
" client = OpenAI(api_key=\"ollama\", base_url=\"http://localhost:11434/v1\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1a7004d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the characters from the base model\n",
|
||||
"system_prompt = \"\"\"\n",
|
||||
"You will be asked to return information about characters in the SNL skit Coffee Talk\n",
|
||||
"You should return the information as a JSON response with the following format:\n",
|
||||
"{\n",
|
||||
" { \"name\" : \"Linda\", \"persona\", \"....\", \"model\" : \"model-name\" },\n",
|
||||
" { \"name\" : \"Paul\", \"persona\", \"....\", \"model\" : \"model-name\" }\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"user_prompt = f\"\"\"\n",
|
||||
"Create a list of the many characters from the SNL skit Coffee Talk, and return {NBR_CHARACTERS} total characters.\n",
|
||||
"Always return Linda Richmond as the first character.\n",
|
||||
"Return one caller.\n",
|
||||
"Select the remaining characters at random from the list of all characters. \n",
|
||||
"For the model value, return a random model name from this list: {available_models}.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"response = client.chat.completions.create(\n",
|
||||
" model=base_model,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt}\n",
|
||||
" ],\n",
|
||||
" response_format={\"type\": \"json_object\"}\n",
|
||||
" )\n",
|
||||
"result = response.choices[0].message.content\n",
|
||||
"characters = json.loads(result)\n",
|
||||
"\n",
|
||||
"print(json.dumps(characters, indent=2))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "21a73805",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Generate system prompts for each character, which includes their name, persona, the other guests, and how they should respond.\n",
|
||||
"\n",
|
||||
"guests = \"The guests on todays show are \"\n",
|
||||
"guest_names = [character['name'] for character in characters[\"characters\"]]\n",
|
||||
"guests += \", \".join(guest_names)\n",
|
||||
"\n",
|
||||
"prompt = \"\"\n",
|
||||
"for character in characters[\"characters\"]:\n",
|
||||
" prompt = f\"You are {character['name']} a character on the SNL skit Coffee Talk.\"\n",
|
||||
" prompt += f\" Your personality is : {character['persona']} \"\n",
|
||||
" prompt += \" \" + guests + \".\"\n",
|
||||
" prompt += \" Keep responses brief and in character.\"\n",
|
||||
" prompt += \" In the conversation history, each response is prefixed with the character's name to identify the respondent.\"\n",
|
||||
" prompt += \" Your response should not include your character name as a prefix.\"\n",
|
||||
"\n",
|
||||
" character[\"system_prompt\"] = prompt\n",
|
||||
"\n",
|
||||
"print(json.dumps(characters, indent=2))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "656131a1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get the topic\n",
|
||||
"user_prompt=\"\"\"\n",
|
||||
"In the SNL skit Coffee Talk, the host Linda Richmond proposes topics in the form \"X Y is neither X, nor Y - discuss\".\n",
|
||||
"Create a list of the many topics proposed on the show, and select one at random and return it.\n",
|
||||
"Return only the selected topic without any formatting.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"response = client.chat.completions.create(\n",
|
||||
" model=base_model,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt}\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
"topic = response.choices[0].message.content\n",
|
||||
"\n",
|
||||
"print(topic)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6e137753",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_character_response(character,history):\n",
|
||||
" user_prompt = f\"\"\"\n",
|
||||
" The conversation so far is as follows:\n",
|
||||
" {history}\n",
|
||||
" What is your response? \n",
|
||||
" \"\"\"\n",
|
||||
" \n",
|
||||
" response = client.chat.completions.create(\n",
|
||||
" model=character[\"model\"],\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": character[\"system_prompt\"]},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt}\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
" return response.choices[0].message.content\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "23fb446f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Start the show!\n",
|
||||
"\n",
|
||||
"history = \"\"\n",
|
||||
"history += \"Welcome to Coffee Talk, I am your host Linda Richmond. Today's guests are:\\n\"\n",
|
||||
"\n",
|
||||
"for character in characters[\"characters\"][1:]:\n",
|
||||
" history += f\" - {character['name']}\\n\"\n",
|
||||
"\n",
|
||||
"history += f\"\\nI'll give you a topic: {topic}\\n\"\n",
|
||||
"\n",
|
||||
"display(Markdown(\"---\"))\n",
|
||||
"display(Markdown(history))\n",
|
||||
"display(Markdown(\"---\"))\n",
|
||||
"\n",
|
||||
"# Other guests respond (first round)\n",
|
||||
"for character in characters[\"characters\"][1:]:\n",
|
||||
" response = get_character_response(character,history)\n",
|
||||
" display(Markdown(f\"**{character['name']}({character['model']}):** {response}\")) \n",
|
||||
" history += f\"\\n{character['name']}: {response}\"\n",
|
||||
"\n",
|
||||
"# Continue conversation for remaining rounds (all characters including Linda)\n",
|
||||
"for round in range(1, NBR_ROUNDS):\n",
|
||||
" for character in characters[\"characters\"]:\n",
|
||||
" response = get_character_response(character,history)\n",
|
||||
" display(Markdown(f\"**{character['name']}({character['model']}):** {response}\")) \n",
|
||||
" history += f\"\\n{character['name']}: {response}\"\n",
|
||||
"\n",
|
||||
"# Wrap it up\n",
|
||||
"user_prompt=f\"\"\"\n",
|
||||
"It's time to wrap up the show. Here's the whole conversation:\\n\n",
|
||||
"{history}\n",
|
||||
"Wrap up the show, as only you can.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"linda = characters[\"characters\"][0]\n",
|
||||
"response = client.chat.completions.create(\n",
|
||||
" model=linda[\"model\"],\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": linda[\"system_prompt\"]},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt}\n",
|
||||
" ]\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"display(Markdown(\"---\"))\n",
|
||||
"display(Markdown(response.choices[0].message.content)) \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "llm-engineering",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Reference in New Issue
Block a user