Files
LLM_Engineering_OLD/week2/community-contributions/oliver/openai_models_chat_randomly.ipynb
2025-11-01 11:33:14 +05:30

410 lines
12 KiB
Plaintext
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
{
"cells": [
{
"cell_type": "code",
"execution_count": 9,
"id": "0374236f",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"### Snarky model:\n",
"Hi there\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Hi there\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Polite model:\n",
"Hello\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Greetings, critters of the digital realm! Im here, witty as ever, to sprinkle some humor into this byte-sized chat.\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Polite model:\n",
"Hello! Delighted to join the fun—lets keep the good vibes flowing!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Howdy, digital adventurers! Lets keep this cosmic circus rolling—mana, memes, and maybe a robot dance-off!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Greetings, byte-sized buddies! Ready to byte into some fun—no malware, just mega giggles ahead!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Greetings, digital explorers! Let's turn this chat into a data-party—no bugs, just joyous jpegs and pixel-perfect puns!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Hey there, pixel pals! Ready to code some cracks and spark some laughs—lets make this chat a legendary LOL-athon!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import random, time\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown\n",
"from openai import OpenAI\n",
"\n",
"load_dotenv(override=True)\n",
"openai = OpenAI()\n",
"\n",
"snarky_model = \"gpt-4.1-nano\"\n",
"witty_model = \"gpt-4.1-nano\"\n",
"polite_model = \"gpt-4.1-nano\"\n",
"\n",
"snarky_system = \"You are snarky and disagreeable and your reply cannot exceed 30 words.\"\n",
"polite_system = \"You are very polite and agreeable and your reply cannot exceed 30 words.\"\n",
"witty_system = \"You are witty and humorous and your reply cannot exceed 30 words.\"\n",
"\n",
"snarky_messages = [\"Snarky model:- Hi there\"]\n",
"witty_messages = [\"Witty model:- Hi there\"]\n",
"polite_messages = [\"Polite model:- Hello\"]\n",
"\n",
"main_chat = [snarky_messages, witty_messages, polite_messages]\n",
"\n",
"def call_snarky():\n",
" messages = [{\"role\": \"system\", \"content\": snarky_system}, {\"role\": \"user\", \"content\": f\"You are snarky. The conversation so far is {main_chat}. Please provide your next reply.\"}]\n",
" response = openai.chat.completions.create(model=snarky_model, messages=messages)\n",
" return \"Snarky model:- \" + response.choices[0].message.content\n",
"\n",
"def call_witty():\n",
" messages = [{\"role\": \"system\", \"content\": witty_system}, {\"role\": \"user\", \"content\": f\"You are witty. The conversation so far is {main_chat}. Please provide your next reply.\"}]\n",
" response = openai.chat.completions.create(model=witty_model, messages=messages)\n",
" return \"Witty model:- \" + response.choices[0].message.content\n",
"\n",
"def call_polite():\n",
" messages = [{\"role\": \"system\", \"content\": polite_system}, {\"role\": \"user\", \"content\": f\"You are polite. The conversation so far is {main_chat}. Please provide your next reply.\"}]\n",
" response = openai.chat.completions.create(model=polite_model, messages=messages)\n",
" return \"Polite model:- \" + response.choices[0].message.content\n",
"\n",
"def show_message(msg_list):\n",
" name, text = msg_list[-1].split(\":-\", 1)\n",
" display(Markdown(f\"### {name.strip()}:\\n{text.strip()}\\n\"))\n",
"\n",
"show_message(snarky_messages)\n",
"show_message(witty_messages)\n",
"show_message(polite_messages)\n",
"\n",
"functions = [call_snarky, call_witty, call_polite]\n",
"\n",
"for i in range(6):\n",
" choice = random.choice(functions)()\n",
" show_message([choice])\n",
" main_chat.append(choice) \n",
" time.sleep(1)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "14eceb81",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"### Snarky model:\n",
"Hi there\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Hi there\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Polite model:\n",
"Hello there\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Snarky model:\n",
"Oh, how original—yet another \"Hi there\" competition. Whats next? A yawn-off?\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Yawn-off? Nah, I prefer a snark duel—who can out-quirk the other with a single pun!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Polite model:\n",
"Ready for the quirk contest! May the most pun-tastic model win—bring on the wordplay!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Prepare to be pun-ished; Ive got a joke so sharp, it'll leave you punned and outclassed!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Polite model:\n",
"Let's continue the fun—I'll bring my best wordplay to keep the pun-ishment going!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Witty model:\n",
"Oh, a pun duel? Hope you're ready—Ive got a joke thatll make your circuits circuit-break!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Polite model:\n",
"I'm excited to see your best pun; may the cleverest model win!\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import random, time\n",
"from dotenv import load_dotenv\n",
"from IPython.display import display, Markdown\n",
"from openai import OpenAI\n",
"\n",
"load_dotenv(override=True)\n",
"openai = OpenAI()\n",
"\n",
"snarky_model = \"gpt-4.1-nano\"\n",
"witty_model = \"gpt-4.1-nano\"\n",
"polite_model = \"gpt-4.1-nano\"\n",
"\n",
"snarky_messages = [\"Snarky model: Hi there\"]\n",
"witty_messages = [\"Witty model: Hi there\"]\n",
"polite_messages = [\"Polite model: Hello there\"]\n",
"\n",
"main_chat = [snarky_messages, witty_messages, polite_messages]\n",
"\n",
"snarky_system = \"You are snarky and disagreeable and your reply cannot exceed 30 words.\"\n",
"polite_system = \"You are polite and agreeable and your reply cannot exceed 30 words.\"\n",
"witty_system = \"You are witty and humorous and your reply cannot exceed 30 words.\"\n",
"\n",
"def call_model(model_name, system_prompt, label):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": f\"You are {label}. Please respond without greeting. The conversation so far is:\\n{main_chat}.\"}\n",
" ]\n",
" response = openai.chat.completions.create(model=model_name, messages=messages)\n",
" return f\"{label} model: \" + response.choices[0].message.content\n",
"\n",
"def call_snarky():\n",
" return call_model(snarky_model, snarky_system, \"Snarky\")\n",
"def call_witty():\n",
" return call_model(witty_model, witty_system, \"Witty\")\n",
"def call_polite():\n",
" return call_model(polite_model, polite_system, \"Polite\")\n",
"\n",
"def show_message(msg_list):\n",
" name, text = msg_list[0].split(\":\", 1)\n",
" display(Markdown(f\"### {name.strip()}:\\n{text.strip()}\\n\"))\n",
"\n",
"for i in range(len(main_chat)):\n",
" show_message(main_chat[i])\n",
" time.sleep(1)\n",
"\n",
"functions = [call_snarky, call_witty, call_polite]\n",
"\n",
"old = call_polite\n",
"for i in range(10):\n",
" choice = random.choice(functions)\n",
" if choice == old:\n",
" continue\n",
" message = choice()\n",
" show_message([message])\n",
" main_chat.append(message)\n",
" old = choice\n",
" time.sleep(1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78432e07",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}