Added gradio to the previous commit.

It does not support streaming, just full text completions.
This commit is contained in:
milan0lazic
2025-09-25 21:35:13 +02:00
parent c7631c3927
commit c5f6537b0d

View File

@@ -2,7 +2,7 @@
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"id": "768629e6", "id": "768629e6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@@ -18,10 +18,19 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 2,
"id": "84a945dc", "id": "84a945dc",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Google API Key exists and begins AIzaSyCW\n"
]
}
],
"source": [ "source": [
"# Load environment variables in a file called .env\n", "# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n", "# Print the key prefixes to help with any debugging\n",
@@ -43,7 +52,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 3,
"id": "ad8ae0b6", "id": "ad8ae0b6",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@@ -58,7 +67,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 13,
"id": "f66cf12f", "id": "f66cf12f",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@@ -66,7 +75,7 @@
"# Let's make a conversation between GPT-4.1-mini and Gemini-2.0-flash\n", "# Let's make a conversation between GPT-4.1-mini and Gemini-2.0-flash\n",
"# We're using cheap versions of models so the costs will be minimal\n", "# We're using cheap versions of models so the costs will be minimal\n",
"\n", "\n",
"game = \"Santorini\"\n", "# game = \"Santorini\"\n",
"no_questions = 3\n", "no_questions = 3\n",
"\n", "\n",
"gpt_model = \"gpt-4o-mini\"\n", "gpt_model = \"gpt-4o-mini\"\n",
@@ -76,18 +85,15 @@
"You tend to be objective and ask right questions to get to the core of the boardgame mechanics, \\\n", "You tend to be objective and ask right questions to get to the core of the boardgame mechanics, \\\n",
"visual appeal and time to setup the game. Your goal is to ask the right questions to get the best possible review of the board game.\" \\\n", "visual appeal and time to setup the game. Your goal is to ask the right questions to get the best possible review of the board game.\" \\\n",
"\"You ask one question at a time and wait for the other person to answer. \\\n", "\"You ask one question at a time and wait for the other person to answer. \\\n",
"You do not answer your own questions. You always try to build on the previous answer.\"\n", "You do not answer any own questions. You always try to build on the previous answer.\"\n",
"\n", "\n",
"gemini_system = \"You are a boardgame critique; \\\n", "gemini_system = \"You are a boardgame critique; \\\n",
"you tend to objectively analyze everything when it comes to a board game gameplay, visual appeal and time to setup the game. \\\n", "you tend to objectively analyze everything when it comes to a board game gameplay, visual appeal and time to setup the game. \\\n",
"Your goal is to provide constructive criticism so the board gaming community can benefit from these insights.\" \\\n", "Your goal is to provide constructive criticism so the board gaming community can benefit from these insights.\" \\\n",
"\"You answer one question at a time and wait for the other person to ask the next question. \\\n", "\"You answer one question at a time and wait for the other person to ask the next question. \\\n",
"You do not ask your own questions. You always try to build on the previous question. \\\n", "You do not ask any questions you always just answer the previous question. \\\n",
"If the other person is very positive, you try to point out flaws in the game. \\\n", "If the other person is very positive, you try to point out flaws in the game. \\\n",
"If the other person is very negative, you try to point out good aspects of the game.\"\n", "If the other person is very negative, you try to point out good aspects of the game.\"\n"
"\n",
"gpt_messages = [f\"I would like to review the board game {game}.\"]\n",
"gemini_messages = [f\"Sure, ask me anything about the board game {game}.\"]\n"
] ]
}, },
{ {
@@ -97,7 +103,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"def call_boardgame_journalist():\n", "def call_boardgame_journalist(gpt_messages, gemini_messages):\n",
" messages = [{\"role\": \"system\", \"content\": gpt_system}]\n", " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
" for gpt, gemini in zip(gpt_messages, gemini_messages):\n", " for gpt, gemini in zip(gpt_messages, gemini_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n", " messages.append({\"role\": \"user\", \"content\": gpt})\n",
@@ -116,7 +122,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"def call_boardgame_critique():\n", "def call_boardgame_critique(gpt_messages, gemini_messages):\n",
" messages = [{\"role\": \"system\", \"content\": gemini_system}]\n", " messages = [{\"role\": \"system\", \"content\": gemini_system}]\n",
" for gpt, gemini in zip(gpt_messages, gemini_messages):\n", " for gpt, gemini in zip(gpt_messages, gemini_messages):\n",
" messages.append({\"role\": \"user\", \"content\": gpt})\n", " messages.append({\"role\": \"user\", \"content\": gpt})\n",
@@ -131,25 +137,193 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 21,
"id": "5aa66868", "id": "5aa66868",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"gpt_messages = [f\"I would like to review the board game {game}.\"]\n", "def run_boardgame_conversation(boardgame_name):\n",
"gemini_messages = [f\"Sure, ask me anything about the board game {game}.\"]\n", " gpt_messages = [f\"I would like to review the board game {boardgame_name}.\"]\n",
" gemini_messages = [f\"Sure, ask me anything about the board game {boardgame_name}.\"]\n",
"\n", "\n",
" print(f\"Journalist:\\n{gpt_messages[0]}\\n\")\n", " print(f\"Journalist:\\n{gpt_messages[0]}\\n\")\n",
" print(f\"Critique:\\n{gemini_messages[0]}\\n\")\n", " print(f\"Critique:\\n{gemini_messages[0]}\\n\")\n",
"\n", "\n",
" for i in range(no_questions):\n", " for i in range(no_questions):\n",
" gpt_next = call_boardgame_journalist()\n", " print(f\"\\n\\n***Question {i + 1}***\\n\\n\")\n",
" gpt_next = call_boardgame_journalist(gpt_messages, gemini_messages)\n",
" print(f\"Journalist:\\n{gpt_next}\\n\")\n", " print(f\"Journalist:\\n{gpt_next}\\n\")\n",
" gpt_messages.append(gpt_next)\n", " gpt_messages.append(gpt_next)\n",
"\n", "\n",
" gemini_next = call_boardgame_critique()\n", " gemini_next = call_boardgame_critique(gpt_messages, gemini_messages)\n",
" print(f\"Critique:\\n{gemini_next}\\n\")\n", " print(f\"Critique:\\n{gemini_next}\\n\")\n",
" gemini_messages.append(gemini_next)" " gemini_messages.append(gemini_next)\n",
"\n",
" return \"\\n\".join(f\"*Journalist*: {g}\\n*Critique*: {c}\" for g, c in zip(gpt_messages, gemini_messages))\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "51c9dadc",
"metadata": {},
"outputs": [],
"source": [
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "548efb27",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7863\n",
"* To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7863/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Journalist:\n",
"I would like to review the board game Santorini.\n",
"\n",
"Critique:\n",
"Sure, ask me anything about the board game Santorini.\n",
"\n",
"\n",
"\n",
"***Question 1***\n",
"\n",
"\n",
"Journalist:\n",
"\n",
"\n",
"To start, could you briefly describe the core gameplay and objective of Santorini?\n",
"\n",
"\n",
"Critique:\n",
"Santorini is an abstract strategy game at its core. Two players compete to be the first to get one of their two pawns to the third level of a tower. Players take turns moving one of their pawns, and then building a block on an adjacent space. The build can be on the same level, one level higher, or down a level. Certain spaces have a dome, which signifies the tower is complete, and no one can build or move to that space.\n",
"\n",
"The game's main draw is the use of God Powers, which give each player a unique ability that breaks the standard rules of the game. This adds a significant layer of asymmetry and strategic depth.\n",
"\n",
"\n",
"\n",
"\n",
"***Question 2***\n",
"\n",
"\n",
"Journalist:\n",
"None\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Traceback (most recent call last):\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\queueing.py\", line 745, in process_events\n",
" response = await route_utils.call_process_api(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" ...<5 lines>...\n",
" )\n",
" ^\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\route_utils.py\", line 354, in call_process_api\n",
" output = await app.get_blocks().process_api(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" ...<11 lines>...\n",
" )\n",
" ^\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\blocks.py\", line 2116, in process_api\n",
" result = await self.call_function(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" ...<8 lines>...\n",
" )\n",
" ^\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\blocks.py\", line 1623, in call_function\n",
" prediction = await anyio.to_thread.run_sync( # type: ignore\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" fn, *processed_input, limiter=self.limiter\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" )\n",
" ^\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\anyio\\to_thread.py\", line 56, in run_sync\n",
" return await get_async_backend().run_sync_in_worker_thread(\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" )\n",
" ^\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 2476, in run_sync_in_worker_thread\n",
" return await future\n",
" ^^^^^^^^^^^^\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\anyio\\_backends\\_asyncio.py\", line 967, in run\n",
" result = context.run(func, *args)\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\gradio\\utils.py\", line 915, in wrapper\n",
" response = f(*args, **kwargs)\n",
" File \"C:\\Users\\Milan Lazic\\AppData\\Local\\Temp\\ipykernel_29732\\641492170.py\", line 14, in run_boardgame_conversation\n",
" gemini_next = call_boardgame_critique(gpt_messages, gemini_messages)\n",
" File \"C:\\Users\\Milan Lazic\\AppData\\Local\\Temp\\ipykernel_29732\\2813548043.py\", line 7, in call_boardgame_critique\n",
" completion = gemini_via_openai_client.chat.completions.create(\n",
" model=gemini_model,\n",
" messages=messages\n",
" )\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\_utils\\_utils.py\", line 286, in wrapper\n",
" return func(*args, **kwargs)\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\resources\\chat\\completions\\completions.py\", line 1147, in create\n",
" return self._post(\n",
" ~~~~~~~~~~^\n",
" \"/chat/completions\",\n",
" ^^^^^^^^^^^^^^^^^^^^\n",
" ...<46 lines>...\n",
" stream_cls=Stream[ChatCompletionChunk],\n",
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" )\n",
" ^\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1259, in post\n",
" return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n",
" ~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
" File \"c:\\Users\\Milan Lazic\\projects\\llm_engineering\\venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1047, in request\n",
" raise self._make_status_error_from_response(err.response) from None\n",
"openai.BadRequestError: Error code: 400 - [{'error': {'code': 400, 'message': 'Unable to submit request because it must include at least one parts field, which describes the prompt input. Learn more: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/gemini', 'status': 'INVALID_ARGUMENT'}}]\n"
]
}
],
"source": [
"view = gr.Interface(\n",
" fn=run_boardgame_conversation,\n",
" inputs=[gr.Textbox(label=\"Input the name of the board game:\")],\n",
" outputs=[gr.Markdown(label=\"Conversation:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
] ]
} }
], ],