From 04fe412199c4122d90091bd841a749a21bf31cff Mon Sep 17 00:00:00 2001 From: milan0lazic Date: Wed, 24 Sep 2025 21:47:20 +0200 Subject: [PATCH 1/3] Added q/a session on a boardgame topic using gpt and gemini --- .../boardgame_critique.ipynb | 177 ++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 week2/community-contributions/boardgame_critique.ipynb diff --git a/week2/community-contributions/boardgame_critique.ipynb b/week2/community-contributions/boardgame_critique.ipynb new file mode 100644 index 0000000..81d20bd --- /dev/null +++ b/week2/community-contributions/boardgame_critique.ipynb @@ -0,0 +1,177 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "768629e6", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "\n", + "from IPython.display import Markdown, display, update_display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "84a945dc", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables in a file called .env\n", + "# Print the key prefixes to help with any debugging\n", + "\n", + "load_dotenv(override=True)\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ad8ae0b6", + "metadata": {}, + "outputs": [], + "source": [ + "# Connect to OpenAI, Gemini\n", + "openai = OpenAI()\n", + "gemini_via_openai_client = OpenAI(\n", + " api_key=google_api_key, \n", + " base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f66cf12f", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's make a conversation between GPT-4.1-mini and Gemini-2.0-flash\n", + "# We're using cheap versions of models so the costs will be minimal\n", + "\n", + "game = \"Santorini\"\n", + "no_questions = 3\n", + "\n", + "gpt_model = \"gpt-4o-mini\"\n", + "gemini_model = \"gemini-2.0-flash\"\n", + "\n", + "gpt_system = \"You are a boardgame journalist. \\\n", + "You tend to be objective and ask right questions to get to the core of the boardgame mechanics, \\\n", + "visual appeal and time to setup the game. Your goal is to ask the right questions to get the best possible review of the board game.\" \\\n", + "\"You ask one question at a time and wait for the other person to answer. \\\n", + "You do not answer your own questions. You always try to build on the previous answer.\"\n", + "\n", + "gemini_system = \"You are a boardgame critique; \\\n", + "you tend to objectively analyze everything when it comes to a board game gameplay, visual appeal and time to setup the game. \\\n", + "Your goal is to provide constructive criticism so the board gaming community can benefit from these insights.\" \\\n", + "\"You answer one question at a time and wait for the other person to ask the next question. \\\n", + "You do not ask your own questions. You always try to build on the previous question. \\\n", + "If the other person is very positive, you try to point out flaws in the game. \\\n", + "If the other person is very negative, you try to point out good aspects of the game.\"\n", + "\n", + "gpt_messages = [f\"I would like to review the board game {game}.\"]\n", + "gemini_messages = [f\"Sure, ask me anything about the board game {game}.\"]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33266f0c", + "metadata": {}, + "outputs": [], + "source": [ + "def call_boardgame_journalist():\n", + " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", + " for gpt, gemini in zip(gpt_messages, gemini_messages):\n", + " messages.append({\"role\": \"user\", \"content\": gpt})\n", + " messages.append({\"role\": \"assistant\", \"content\": gemini})\n", + " completion = openai.chat.completions.create(\n", + " model=gpt_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "53d42055", + "metadata": {}, + "outputs": [], + "source": [ + "def call_boardgame_critique():\n", + " messages = [{\"role\": \"system\", \"content\": gemini_system}]\n", + " for gpt, gemini in zip(gpt_messages, gemini_messages):\n", + " messages.append({\"role\": \"user\", \"content\": gpt})\n", + " messages.append({\"role\": \"assistant\", \"content\": gemini})\n", + " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n", + " completion = gemini_via_openai_client.chat.completions.create(\n", + " model=gemini_model,\n", + " messages=messages\n", + " )\n", + " return completion.choices[0].message.content\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5aa66868", + "metadata": {}, + "outputs": [], + "source": [ + "gpt_messages = [f\"I would like to review the board game {game}.\"]\n", + "gemini_messages = [f\"Sure, ask me anything about the board game {game}.\"]\n", + "\n", + "print(f\"Journalist:\\n{gpt_messages[0]}\\n\")\n", + "print(f\"Critique:\\n{gemini_messages[0]}\\n\")\n", + "\n", + "for i in range(no_questions):\n", + " gpt_next = call_boardgame_journalist()\n", + " print(f\"Journalist:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", + "\n", + " gemini_next = call_boardgame_critique()\n", + " print(f\"Critique:\\n{gemini_next}\\n\")\n", + " gemini_messages.append(gemini_next)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv (3.13.5)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From c5f6537b0dc79feae0a116a76d8eea69be4ae916 Mon Sep 17 00:00:00 2001 From: milan0lazic Date: Thu, 25 Sep 2025 21:35:13 +0200 Subject: [PATCH 2/3] Added gradio to the previous commit. It does not support streaming, just full text completions. --- .../boardgame_critique.ipynb | 226 ++++++++++++++++-- 1 file changed, 200 insertions(+), 26 deletions(-) diff --git a/week2/community-contributions/boardgame_critique.ipynb b/week2/community-contributions/boardgame_critique.ipynb index 81d20bd..de8ac4a 100644 --- a/week2/community-contributions/boardgame_critique.ipynb +++ b/week2/community-contributions/boardgame_critique.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "768629e6", "metadata": {}, "outputs": [], @@ -18,10 +18,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "84a945dc", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OpenAI API Key exists and begins sk-proj-\n", + "Google API Key exists and begins AIzaSyCW\n" + ] + } + ], "source": [ "# Load environment variables in a file called .env\n", "# Print the key prefixes to help with any debugging\n", @@ -43,7 +52,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "ad8ae0b6", "metadata": {}, "outputs": [], @@ -58,7 +67,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "id": "f66cf12f", "metadata": {}, "outputs": [], @@ -66,7 +75,7 @@ "# Let's make a conversation between GPT-4.1-mini and Gemini-2.0-flash\n", "# We're using cheap versions of models so the costs will be minimal\n", "\n", - "game = \"Santorini\"\n", + "# game = \"Santorini\"\n", "no_questions = 3\n", "\n", "gpt_model = \"gpt-4o-mini\"\n", @@ -76,18 +85,15 @@ "You tend to be objective and ask right questions to get to the core of the boardgame mechanics, \\\n", "visual appeal and time to setup the game. Your goal is to ask the right questions to get the best possible review of the board game.\" \\\n", "\"You ask one question at a time and wait for the other person to answer. \\\n", - "You do not answer your own questions. You always try to build on the previous answer.\"\n", + "You do not answer any own questions. You always try to build on the previous answer.\"\n", "\n", "gemini_system = \"You are a boardgame critique; \\\n", "you tend to objectively analyze everything when it comes to a board game gameplay, visual appeal and time to setup the game. \\\n", "Your goal is to provide constructive criticism so the board gaming community can benefit from these insights.\" \\\n", "\"You answer one question at a time and wait for the other person to ask the next question. \\\n", - "You do not ask your own questions. You always try to build on the previous question. \\\n", + "You do not ask any questions you always just answer the previous question. \\\n", "If the other person is very positive, you try to point out flaws in the game. \\\n", - "If the other person is very negative, you try to point out good aspects of the game.\"\n", - "\n", - "gpt_messages = [f\"I would like to review the board game {game}.\"]\n", - "gemini_messages = [f\"Sure, ask me anything about the board game {game}.\"]\n" + "If the other person is very negative, you try to point out good aspects of the game.\"\n" ] }, { @@ -97,7 +103,7 @@ "metadata": {}, "outputs": [], "source": [ - "def call_boardgame_journalist():\n", + "def call_boardgame_journalist(gpt_messages, gemini_messages):\n", " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", " for gpt, gemini in zip(gpt_messages, gemini_messages):\n", " messages.append({\"role\": \"user\", \"content\": gpt})\n", @@ -116,7 +122,7 @@ "metadata": {}, "outputs": [], "source": [ - "def call_boardgame_critique():\n", + "def call_boardgame_critique(gpt_messages, gemini_messages):\n", " messages = [{\"role\": \"system\", \"content\": gemini_system}]\n", " for gpt, gemini in zip(gpt_messages, gemini_messages):\n", " messages.append({\"role\": \"user\", \"content\": gpt})\n", @@ -131,25 +137,193 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "id": "5aa66868", "metadata": {}, "outputs": [], "source": [ - "gpt_messages = [f\"I would like to review the board game {game}.\"]\n", - "gemini_messages = [f\"Sure, ask me anything about the board game {game}.\"]\n", + "def run_boardgame_conversation(boardgame_name):\n", + " gpt_messages = [f\"I would like to review the board game {boardgame_name}.\"]\n", + " gemini_messages = [f\"Sure, ask me anything about the board game {boardgame_name}.\"]\n", "\n", - "print(f\"Journalist:\\n{gpt_messages[0]}\\n\")\n", - "print(f\"Critique:\\n{gemini_messages[0]}\\n\")\n", + " print(f\"Journalist:\\n{gpt_messages[0]}\\n\")\n", + " print(f\"Critique:\\n{gemini_messages[0]}\\n\")\n", "\n", - "for i in range(no_questions):\n", - " gpt_next = call_boardgame_journalist()\n", - " print(f\"Journalist:\\n{gpt_next}\\n\")\n", - " gpt_messages.append(gpt_next)\n", + " for i in range(no_questions):\n", + " print(f\"\\n\\n***Question {i + 1}***\\n\\n\")\n", + " gpt_next = call_boardgame_journalist(gpt_messages, gemini_messages)\n", + " print(f\"Journalist:\\n{gpt_next}\\n\")\n", + " gpt_messages.append(gpt_next)\n", "\n", - " gemini_next = call_boardgame_critique()\n", - " print(f\"Critique:\\n{gemini_next}\\n\")\n", - " gemini_messages.append(gemini_next)" + " gemini_next = call_boardgame_critique(gpt_messages, gemini_messages)\n", + " print(f\"Critique:\\n{gemini_next}\\n\")\n", + " gemini_messages.append(gemini_next)\n", + "\n", + " return \"\\n\".join(f\"*Journalist*: {g}\\n*Critique*: {c}\" for g, c in zip(gpt_messages, gemini_messages))\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "51c9dadc", + "metadata": {}, + "outputs": [], + "source": [ + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "548efb27", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7863\n", + "* To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Journalist:\n", + "I would like to review the board game Santorini.\n", + "\n", + "Critique:\n", + "Sure, ask me anything about the board game Santorini.\n", + "\n", + "\n", + "\n", + "***Question 1***\n", + "\n", + "\n", + "Journalist:\n", + "\n", + "\n", + "To start, could you briefly describe the core gameplay and objective of Santorini?\n", + "\n", + "\n", + "Critique:\n", + "Santorini is an abstract strategy game at its core. Two players compete to be the first to get one of their two pawns to the third level of a tower. Players take turns moving one of their pawns, and then building a block on an adjacent space. The build can be on the same level, one level higher, or down a level. Certain spaces have a dome, which signifies the tower is complete, and no one can build or move to that space.\n", + "\n", + "The game's main draw is the use of God Powers, which give each player a unique ability that breaks the standard rules of the game. This adds a significant layer of asymmetry and strategic depth.\n", + "\n", + "\n", + "\n", + "\n", + "***Question 2***\n", + "\n", + "\n", + "Journalist:\n", + "None\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Traceback (most recent call last):\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\queueing.py\", line 745, in process_events\n", + " response = await route_utils.call_process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " ...<5 lines>...\n", + " )\n", + " ^\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\route_utils.py\", line 354, in call_process_api\n", + " output = await app.get_blocks().process_api(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " ...<11 lines>...\n", + " )\n", + " ^\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\blocks.py\", line 2116, in process_api\n", + " result = await self.call_function(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " ...<8 lines>...\n", + " )\n", + " ^\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\blocks.py\", line 1623, in call_function\n", + " prediction = await anyio.to_thread.run_sync( # type: ignore\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " fn, *processed_input, limiter=self.limiter\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " )\n", + " ^\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n", + " return await get_async_backend().run_sync_in_worker_thread(\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " )\n", + " ^\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2476, in run_sync_in_worker_thread\n", + " return await future\n", + " ^^^^^^^^^^^^\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 967, in run\n", + " result = context.run(func, *args)\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\utils.py\", line 915, in wrapper\n", + " response = f(*args, **kwargs)\n", + " File \"C:\\Users\\Milan Lazic\\AppData\\Local\\Temp\\ipykernel_29732\\641492170.py\", line 14, in run_boardgame_conversation\n", + " gemini_next = call_boardgame_critique(gpt_messages, gemini_messages)\n", + " File \"C:\\Users\\Milan Lazic\\AppData\\Local\\Temp\\ipykernel_29732\\2813548043.py\", line 7, in call_boardgame_critique\n", + " completion = gemini_via_openai_client.chat.completions.create(\n", + " model=gemini_model,\n", + " messages=messages\n", + " )\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\_utils\\_utils.py\", line 286, in wrapper\n", + " return func(*args, **kwargs)\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\resources\\chat\\completions\\completions.py\", line 1147, in create\n", + " return self._post(\n", + " ~~~~~~~~~~^\n", + " \"/chat/completions\",\n", + " ^^^^^^^^^^^^^^^^^^^^\n", + " ...<46 lines>...\n", + " stream_cls=Stream[ChatCompletionChunk],\n", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " )\n", + " ^\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1259, in post\n", + " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", + " ~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", + " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1047, in request\n", + " raise self._make_status_error_from_response(err.response) from None\n", + "openai.BadRequestError: Error code: 400 - [{'error': {'code': 400, 'message': 'Unable to submit request because it must include at least one parts field, which describes the prompt input. Learn more: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/gemini', 'status': 'INVALID_ARGUMENT'}}]\n" + ] + } + ], + "source": [ + "view = gr.Interface(\n", + " fn=run_boardgame_conversation,\n", + " inputs=[gr.Textbox(label=\"Input the name of the board game:\")],\n", + " outputs=[gr.Markdown(label=\"Conversation:\")],\n", + " flagging_mode=\"never\"\n", + ")\n", + "view.launch()" ] } ], From 1d2f02afff57e245b3c5d78a3150725073bdb2e5 Mon Sep 17 00:00:00 2001 From: milan0lazic Date: Thu, 25 Sep 2025 21:37:39 +0200 Subject: [PATCH 3/3] Removed outputs and added comments --- .../boardgame_critique.ipynb | 168 ++---------------- 1 file changed, 18 insertions(+), 150 deletions(-) diff --git a/week2/community-contributions/boardgame_critique.ipynb b/week2/community-contributions/boardgame_critique.ipynb index de8ac4a..b72e199 100644 --- a/week2/community-contributions/boardgame_critique.ipynb +++ b/week2/community-contributions/boardgame_critique.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "768629e6", "metadata": {}, "outputs": [], @@ -18,19 +18,10 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "84a945dc", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI API Key exists and begins sk-proj-\n", - "Google API Key exists and begins AIzaSyCW\n" - ] - } - ], + "outputs": [], "source": [ "# Load environment variables in a file called .env\n", "# Print the key prefixes to help with any debugging\n", @@ -52,7 +43,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "ad8ae0b6", "metadata": {}, "outputs": [], @@ -67,7 +58,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "f66cf12f", "metadata": {}, "outputs": [], @@ -137,7 +128,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "5aa66868", "metadata": {}, "outputs": [], @@ -165,7 +156,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "id": "51c9dadc", "metadata": {}, "outputs": [], @@ -175,148 +166,25 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "id": "548efb27", "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "* Running on local URL: http://127.0.0.1:7863\n", - "* To create a public link, set `share=True` in `launch()`.\n" - ] - }, - { - "data": { - "text/html": [ - "
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Journalist:\n", - "I would like to review the board game Santorini.\n", - "\n", - "Critique:\n", - "Sure, ask me anything about the board game Santorini.\n", - "\n", - "\n", - "\n", - "***Question 1***\n", - "\n", - "\n", - "Journalist:\n", - "\n", - "\n", - "To start, could you briefly describe the core gameplay and objective of Santorini?\n", - "\n", - "\n", - "Critique:\n", - "Santorini is an abstract strategy game at its core. Two players compete to be the first to get one of their two pawns to the third level of a tower. Players take turns moving one of their pawns, and then building a block on an adjacent space. The build can be on the same level, one level higher, or down a level. Certain spaces have a dome, which signifies the tower is complete, and no one can build or move to that space.\n", - "\n", - "The game's main draw is the use of God Powers, which give each player a unique ability that breaks the standard rules of the game. This adds a significant layer of asymmetry and strategic depth.\n", - "\n", - "\n", - "\n", - "\n", - "***Question 2***\n", - "\n", - "\n", - "Journalist:\n", - "None\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Traceback (most recent call last):\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\queueing.py\", line 745, in process_events\n", - " response = await route_utils.call_process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " ...<5 lines>...\n", - " )\n", - " ^\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\route_utils.py\", line 354, in call_process_api\n", - " output = await app.get_blocks().process_api(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " ...<11 lines>...\n", - " )\n", - " ^\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\blocks.py\", line 2116, in process_api\n", - " result = await self.call_function(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " ...<8 lines>...\n", - " )\n", - " ^\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\blocks.py\", line 1623, in call_function\n", - " prediction = await anyio.to_thread.run_sync( # type: ignore\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " fn, *processed_input, limiter=self.limiter\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " )\n", - " ^\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n", - " return await get_async_backend().run_sync_in_worker_thread(\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " )\n", - " ^\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2476, in run_sync_in_worker_thread\n", - " return await future\n", - " ^^^^^^^^^^^^\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 967, in run\n", - " result = context.run(func, *args)\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\utils.py\", line 915, in wrapper\n", - " response = f(*args, **kwargs)\n", - " File \"C:\\Users\\Milan Lazic\\AppData\\Local\\Temp\\ipykernel_29732\\641492170.py\", line 14, in run_boardgame_conversation\n", - " gemini_next = call_boardgame_critique(gpt_messages, gemini_messages)\n", - " File \"C:\\Users\\Milan Lazic\\AppData\\Local\\Temp\\ipykernel_29732\\2813548043.py\", line 7, in call_boardgame_critique\n", - " completion = gemini_via_openai_client.chat.completions.create(\n", - " model=gemini_model,\n", - " messages=messages\n", - " )\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\_utils\\_utils.py\", line 286, in wrapper\n", - " return func(*args, **kwargs)\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\resources\\chat\\completions\\completions.py\", line 1147, in create\n", - " return self._post(\n", - " ~~~~~~~~~~^\n", - " \"/chat/completions\",\n", - " ^^^^^^^^^^^^^^^^^^^^\n", - " ...<46 lines>...\n", - " stream_cls=Stream[ChatCompletionChunk],\n", - " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " )\n", - " ^\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1259, in post\n", - " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n", - " ~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n", - " File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1047, in request\n", - " raise self._make_status_error_from_response(err.response) from None\n", - "openai.BadRequestError: Error code: 400 - [{'error': {'code': 400, 'message': 'Unable to submit request because it must include at least one parts field, which describes the prompt input. Learn more: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/gemini', 'status': 'INVALID_ARGUMENT'}}]\n" + "ename": "NameError", + "evalue": "name 'gr' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mNameError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m view = \u001b[43mgr\u001b[49m.Interface(\n\u001b[32m 2\u001b[39m fn=run_boardgame_conversation,\n\u001b[32m 3\u001b[39m inputs=[gr.Textbox(label=\u001b[33m\"\u001b[39m\u001b[33mInput the name of the board game:\u001b[39m\u001b[33m\"\u001b[39m)],\n\u001b[32m 4\u001b[39m outputs=[gr.Markdown(label=\u001b[33m\"\u001b[39m\u001b[33mConversation:\u001b[39m\u001b[33m\"\u001b[39m)],\n\u001b[32m 5\u001b[39m flagging_mode=\u001b[33m\"\u001b[39m\u001b[33mnever\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 6\u001b[39m )\n\u001b[32m 7\u001b[39m view.launch()\n", + "\u001b[31mNameError\u001b[39m: name 'gr' is not defined" ] } ], "source": [ + "# Create a Gradio interface for running boardgame conversations.\n", + "# The interface takes the board game name as input and displays the conversation as Markdown.\n", "view = gr.Interface(\n", " fn=run_boardgame_conversation,\n", " inputs=[gr.Textbox(label=\"Input the name of the board game:\")],\n",