LLM eng week4 projects

This commit is contained in:
Susan Martin
2025-06-24 10:21:35 +01:00
parent e74be16156
commit 17de8bdd9d
2 changed files with 757 additions and 0 deletions

View File

@@ -0,0 +1,337 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "cc7674a9-6164-4424-85a9-f669454cfd2a",
"metadata": {},
"source": [
"I used this project to play about with Gradio blocks a little bit as it had more inputs than the other projects I've done.\n",
"Its a password generator which I have no doubt I will use!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "04c8d2dd-cb9a-4b18-b12d-48ed2f39679a",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import requests\n",
"import google.generativeai\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "04521351-f220-42fe-9dc5-d0be80c95dd7",
"metadata": {},
"outputs": [],
"source": [
"# keys\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"if openai_api_key:\n",
" print(\"All good\")\n",
"else:\n",
" print(\"OpenAI key issue\")\n",
"\n",
"claude_api_key = os.getenv(\"ANTHROPIC_API_KEY\")\n",
"\n",
"if claude_api_key:\n",
" print(\"All good\")\n",
"else:\n",
" print(\"Claude key issue\")\n",
"\n",
"google_api_key = os.getenv(\"GOOGLE_API_KEY\")\n",
"\n",
"if google_api_key:\n",
" print(\"All good\")\n",
"else:\n",
" print(\"Google key issue\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70fd3748-e6b6-4ac2-89a5-ef65ed7e41a3",
"metadata": {},
"outputs": [],
"source": [
"# initialise\n",
"\n",
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()\n",
"google.generativeai.configure()\n",
"\n",
"OPENAI_MODEL = \"gpt-4o\"\n",
"CLAUDE_MODEL = \"claude-sonnet-4-20250514\"\n",
"GOOGLE_MODEL = \"gemini-2.0-flash\"\n",
"\n",
"max_tok = 500"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6a448651-e426-4c3c-96f7-d69975dc7b10",
"metadata": {},
"outputs": [],
"source": [
"#Prompts\n",
"\n",
"def pass_system_prompt(required_len, spec_char=\"Y\",num_char=\"Y\",min_lowercase=1,min_uppercase=1):\n",
"\n",
" system_prompt = f\"\"\"You are a secure password generator. Your task is to create a single, cryptographically strong password that meets ALL specified requirements.\n",
" \n",
"CRITICAL REQUIREMENTS:\n",
"- Length: EXACTLY {required_len} characters\n",
"- Must include: At least {min_lowercase} lowercase letter(s) AND at least {min_uppercase} uppercase letter(s)\n",
"- Special characters: {'REQUIRED - include at least 1 char' if spec_char else 'FORBIDDEN - do not include any'}\n",
"- Numbers: {'REQUIRED - include at least 1 digit' if num_char else 'FORBIDDEN - do not include any digits'}\n",
"\n",
"SECURITY RULES:\n",
"1. Generate truly random passwords - avoid patterns, dictionary words, or predictable sequences\n",
"2. Distribute character types evenly throughout the password\n",
"3. Do not use repeated characters excessively (max 2 of same character)\n",
"4. Ensure password meets minimum complexity for each required character type\n",
"\n",
"OUTPUT FORMAT:\n",
"- Respond with ONLY the generated password\n",
"- No explanations, no additional text, just the password\n",
"- Verify the password meets ALL requirements before responding\"\"\"\n",
"\n",
" return system_prompt\n",
"\n",
"def pass_user_prompt(required_len, spec_char=\"Y\",num_char=\"Y\",min_lowercase=1,min_uppercase=1):\n",
" \n",
" user_prompt = f\"\"\"Generate a secure password with these exact specifications:\n",
" \n",
"Length: {required_len} characters\n",
"Lowercase letters: Required (minimum {min_lowercase})\n",
"Uppercase letters: Required (minimum {min_uppercase})\n",
"Numbers: {'Required (minimum 1)' if num_char else 'Not allowed'}\n",
"Special characters: {'Required (minimum 1)' if spec_char else 'Not allowed'}\n",
"\n",
"Requirements verification checklist:\n",
"✓ Exactly {required_len} characters total\n",
"✓ Contains {min_lowercase}+ lowercase letters\n",
"✓ Contains {min_uppercase}+ uppercase letters\n",
"✓ {'Contains 1+ numbers' if num_char else 'Contains NO numbers'}\n",
"✓ {'Contains 1+ special characters' if spec_char else 'Contains NO special characters'}\n",
"✓ No obvious patterns or dictionary words\n",
"✓ Good distribution of character types\n",
"\n",
"Generate the password now.\"\"\"\n",
"\n",
" return user_prompt\n",
" \n",
"def pass_messages(required_len, spec_char,num_char,min_lowercase,min_uppercase):\n",
" messages = [\n",
" {\"role\":\"system\",\"content\":pass_system_prompt(required_len, spec_char,num_char,min_lowercase,min_uppercase)},\n",
" {\"role\":\"user\",\"content\":pass_user_prompt(required_len, spec_char,num_char,min_lowercase,min_uppercase)}\n",
" ]\n",
"\n",
" return messages\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "857370b0-35a5-4b50-8715-86f8e781523b",
"metadata": {},
"outputs": [],
"source": [
"#test\n",
"\n",
"messages1 = pass_messages(12, \"N\", \"Y\",1,1)\n",
"print(messages1)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59ab4279-90a8-4997-8e15-f07295856222",
"metadata": {},
"outputs": [],
"source": [
"def openai_password_gen(required_len, spec_char, num_char,min_lowercase,min_uppercase):\n",
" response=openai.chat.completions.create(\n",
" model=OPENAI_MODEL,\n",
" max_tokens=max_tok,\n",
" messages=pass_messages(required_len, spec_char,num_char,min_lowercase,min_uppercase)\n",
" )\n",
" return response.choices[0].message.content\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f5e1a41a-b03c-4408-a0f5-00529785f3d1",
"metadata": {},
"outputs": [],
"source": [
"def claude_password_gen(required_len, spec_char, num_char,min_lowercase,min_uppercase):\n",
" response = claude.messages.create(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=max_tok,\n",
" system=pass_system_prompt(required_len, spec_char, num_char,min_lowercase,min_uppercase),\n",
" messages = [{\"role\":\"user\",\"content\":pass_user_prompt(required_len, spec_char, num_char,min_lowercase,min_uppercase)}]\n",
" )\n",
" return response.content[0].text\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6a41a0a2-55a1-47e5-8fc0-5dd04ebd3573",
"metadata": {},
"outputs": [],
"source": [
"def google_password_gen(required_len, spec_char, num_char,min_lowercase,min_uppercase):\n",
" message = google.generativeai.GenerativeModel(\n",
" model_name=GOOGLE_MODEL,\n",
" system_instruction=pass_system_prompt(required_len, spec_char, num_char,min_lowercase,min_uppercase)\n",
" )\n",
" response = message.generate_content(pass_user_prompt(required_len, spec_char, num_char,min_lowercase,min_uppercase))\n",
" return response.text"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dcd1ce50-6576-4594-8739-1d7daf602213",
"metadata": {},
"outputs": [],
"source": [
"#test\n",
"messages1 = openai_password_gen(12, \"N\",\"Y\",1,1)\n",
"messages2 = claude_password_gen(12,\"N\",\"Y\",1,1)\n",
"messages3= google_password_gen(12,\"N\",\"Y\",1,1)\n",
"print(\"OpenAI: \",messages1)\n",
"print(\"Claude: \", messages2)\n",
"print(\"Gemini: \", messages3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9cec429a-2355-4941-8422-480b2614009c",
"metadata": {},
"outputs": [],
"source": [
"# model select\n",
"\n",
"def select_model(required_len, spec_char, num_char,min_lowercase,min_uppercase,model):\n",
" if model == \"OpenAI\":\n",
" return openai_password_gen(required_len, spec_char, num_char,min_lowercase,min_uppercase)\n",
" elif model == \"Claude\":\n",
" return claude_password_gen(required_len, spec_char, num_char,min_lowercase,min_uppercase)\n",
" elif model == \"Gemini\":\n",
" return google_password_gen(required_len, spec_char, num_char,min_lowercase,min_uppercase)\n",
" else:\n",
" print(\"No model selected\")\n",
" return None"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bef52e6d-dc50-4c91-9d56-624dfdd66276",
"metadata": {},
"outputs": [],
"source": [
"test = select_model(12, \"N\",\"Y\",1,1,\"OpenAI\")\n",
"\n",
"print(test)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b9d3685-a1b8-470c-8f4b-e63d68a0240d",
"metadata": {},
"outputs": [],
"source": [
"css = \"\"\"\n",
"#password_box textarea {\n",
" background-color: #306998;\n",
" color: white;\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "81c423ec-0ca7-4c96-a2fe-02ed2b5f3839",
"metadata": {},
"outputs": [],
"source": [
"\n",
"with gr.Blocks(css=css) as demo:\n",
" gr.Markdown(\"Choose your password complexity requirements and run:\")\n",
" with gr.Row():\n",
" with gr.Column(min_width=150,scale=2):\n",
" with gr.Row():\n",
" required_len = gr.Number(label=\"Specify the required length\",value=12,minimum=1,maximum=30)\n",
" min_lowercase = gr.Number(label=\"the minimum lowercase letters\", value=1,minimum=0)\n",
" min_uppercase = gr.Number(label=\"the minimum uppercase letters\", value=1,minimum=0)\n",
" with gr.Column():\n",
" spec_char = gr.Checkbox(label=\"Include special characters?\",value=True)\n",
" num_char = gr.Checkbox(label=\"Include numbers?\", value=True)\n",
" with gr.Row():\n",
" with gr.Column():\n",
" model = gr.Dropdown([\"OpenAI\",\"Claude\",\"Gemini\"])\n",
" btn = gr.Button(\"Run\")\n",
" with gr.Column():\n",
" output = gr.Textbox(label=\"Password:\", elem_id=\"password_box\")\n",
" \n",
" btn.click(fn=select_model,inputs=[required_len,spec_char,num_char,min_lowercase,min_uppercase,model],outputs=output)\n",
"\n",
"demo.launch()\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d81a8318-57ef-46ae-91b7-ae63d661edd8",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,420 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "65b3aadc-c540-4cb2-a338-d523d3f22e5b",
"metadata": {},
"source": [
"Unit test generator using GPT, Claude and Gemini.\n",
"This will create unit test code from python and also run the code and provide the result (including any errors)\n",
"Note:\n",
"When I tried to use claude-sonnet-4-20250514 the results were too big and the python was cut-off (no matter how big I made the max tokens). This seemed to be the case for both examples. I've changed it to claude-3-5-sonnet-20240620 and it seems to be run better."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e610bf56-a46e-4aff-8de1-ab49d62b1ad3",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import google.generativeai\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display\n",
"import gradio as gr\n",
"import sys\n",
"import io\n",
"import traceback\n",
"import unittest\n",
"import subprocess\n",
"import tempfile"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f672e1c-87e9-4865-b760-370fa605e614",
"metadata": {},
"outputs": [],
"source": [
"# keys\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"if openai_api_key:\n",
" print(\"All good\")\n",
"else:\n",
" print(\"OpenAI key issue\")\n",
"\n",
"claude_api_key = os.getenv(\"ANTHROPIC_API_KEY\")\n",
"\n",
"if claude_api_key:\n",
" print(\"All good\")\n",
"else:\n",
" print(\"Claude key issue\")\n",
"\n",
"google_api_key = os.getenv(\"GOOGLE_API_KEY\")\n",
"\n",
"if google_api_key:\n",
" print(\"All good\")\n",
"else:\n",
" print(\"Google key issue\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8aa149ed-9298-4d69-8fe2-8f5de0f667da",
"metadata": {},
"outputs": [],
"source": [
"# initialise\n",
"\n",
"openai = OpenAI()\n",
"claude = anthropic.Anthropic()\n",
"google.generativeai.configure()\n",
"\n",
"OPENAI_MODEL = \"gpt-4o\"\n",
"CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\" #\"claude-sonnet-4-20250514\"\n",
"GOOGLE_MODEL = \"gemini-2.0-flash\"\n",
"\n",
"max_tok = 5000"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6896636f-923e-4a2c-9d6c-fac07828a201",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an engineer with responsibility for unit testing python code.\"\n",
"system_message += \"You review base python code and develop unit tests, also in python, which validate each unit of code.\"\n",
"system_message += \"\"\" The output must be in Python with both the unit tests and comments explaining the purpose of each test.\n",
"The output should not include any additional text at the start or end including \"```\". It should be possible to run the code without any updates including an execution statement.\n",
"Include the base / original python code in the response.\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8e7b3546-57aa-4c29-bc5d-f211970d04eb",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for(python):\n",
" user_prompt = \"Review the Python code provided and develop unit tests which can be run in a jupyter lab.\"\n",
" user_prompt += \"\"\" The output must be in Python with both the unit tests and comments explaining the purpose of each test.\n",
"The output should not include any additional text at the start or end including \"```\". It should be possible to run the code without any updates (include an execution statement).\n",
"Include the base / original python code in the response.\"\"\"\n",
" user_prompt += python\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6190659-f54c-4951-bef4-4960f8e51cc4",
"metadata": {},
"outputs": [],
"source": [
"def messages_for(python):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(python)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0b327aa3-3277-44e1-972f-aa7158147ddd",
"metadata": {},
"outputs": [],
"source": [
"# python example\n",
"example = \"\"\"class BookNotAvailableError(Exception):\n",
" pass\n",
"\n",
"class Library:\n",
" def __init__(self):\n",
" self.inventory = {} # book title -> quantity\n",
" self.borrowed = {} # user -> list of borrowed book titles\n",
"\n",
" def add_book(self, title, quantity=1):\n",
" if quantity <= 0:\n",
" raise ValueError(\"Quantity must be positive\")\n",
" self.inventory[title] = self.inventory.get(title, 0) + quantity\n",
"\n",
" def borrow_book(self, user, title):\n",
" if self.inventory.get(title, 0) < 1:\n",
" raise BookNotAvailableError(f\"'{title}' is not available\")\n",
" self.inventory[title] -= 1\n",
" self.borrowed.setdefault(user, []).append(title)\n",
"\n",
" def return_book(self, user, title):\n",
" if user not in self.borrowed or title not in self.borrowed[user]:\n",
" raise ValueError(f\"User '{user}' did not borrow '{title}'\")\n",
" self.borrowed[user].remove(title)\n",
" self.inventory[title] = self.inventory.get(title, 0) + 1\n",
"\n",
" def get_available_books(self):\n",
" return {title: qty for title, qty in self.inventory.items() if qty > 0}\n",
"\n",
" def get_borrowed_books(self, user):\n",
" return self.borrowed.get(user, [])\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ed6e624e-88a5-4f10-8ab5-f071f0ca3041",
"metadata": {},
"outputs": [],
"source": [
"# python example2\n",
"example2 = \"\"\"class Calculator:\n",
" def add(self, a, b):\n",
" return a + b\n",
"\n",
" def subtract(self, a, b):\n",
" return a - b\n",
"\n",
" def divide(self, a, b):\n",
" if b == 0:\n",
" raise ValueError(\"Cannot divide by zero\")\n",
" return a / b\n",
"\n",
" def multiply(self, a, b):\n",
" return a * b\n",
"\n",
"\n",
"def is_prime(n):\n",
" if n <= 1:\n",
" return False\n",
" if n <= 3:\n",
" return True\n",
" if n % 2 == 0 or n % 3 == 0:\n",
" return False\n",
" i = 5\n",
" while i * i <= n:\n",
" if n % i == 0 or n % (i + 2) == 0:\n",
" return False\n",
" i += 6\n",
" return True\n",
" \"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e7d2fea8-74c6-4421-8f1e-0e76d5b201b9",
"metadata": {},
"outputs": [],
"source": [
"def unit_test_gpt(python): \n",
" stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" fragment = chunk.choices[0].delta.content or \"\"\n",
" reply += fragment\n",
" yield reply"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7cd84ad8-d55c-4fe0-9eeb-1895c95c4a9d",
"metadata": {},
"outputs": [],
"source": [
"def unit_test_claude(python):\n",
" result = claude.messages.stream(\n",
" model=CLAUDE_MODEL,\n",
" max_tokens=max_tok,\n",
" system=system_message,\n",
" messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n",
" )\n",
" reply = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" reply += text\n",
" yield reply"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ad86f652-879a-489f-9891-bdc2d97c33b0",
"metadata": {},
"outputs": [],
"source": [
"def unit_test_google(python):\n",
" model = google.generativeai.GenerativeModel(\n",
" model_name=GOOGLE_MODEL,\n",
" system_instruction=system_message\n",
" )\n",
" stream = model.generate_content(contents=user_prompt_for(python),stream=True)\n",
" reply = \"\"\n",
" for chunk in stream:\n",
" reply += chunk.text or \"\"\n",
" yield reply.replace(\"```python\\n\", \"\").replace(\"```\", \"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "105db6f9-343c-491d-8e44-3a5328b81719",
"metadata": {},
"outputs": [],
"source": [
"#unit_test_gpt(example)\n",
"#unit_test_claude(example)\n",
"#unit_test_google(example)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2f1ae8f5-16c8-40a0-aa18-63b617df078d",
"metadata": {},
"outputs": [],
"source": [
"def select_model(python, model):\n",
" if model==\"GPT\":\n",
" result = unit_test_gpt(python)\n",
" elif model==\"Claude\":\n",
" result = unit_test_claude(python)\n",
" elif model==\"Google\":\n",
" result = unit_test_google(python)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" for stream_so_far in result:\n",
" yield stream_so_far "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f1ddb38e-6b0a-4c37-baa4-ace0b7de887a",
"metadata": {},
"outputs": [],
"source": [
"# with gr.Blocks() as ui:\n",
"# with gr.Row():\n",
"# python = gr.Textbox(label=\"Python code:\", lines=10, value=example)\n",
"# test = gr.Textbox(label=\"Unit tests\", lines=10)\n",
"# with gr.Row():\n",
"# model = gr.Dropdown([\"GPT\", \"Claude\",\"Google\"], label=\"Select model\", value=\"GPT\")\n",
"# generate = gr.Button(\"Generate unit tests\")\n",
"\n",
"# generate.click(select_model, inputs=[python, model], outputs=[test])\n",
"\n",
"# ui.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "389ae411-a4f6-44f2-8b26-d46a971687a7",
"metadata": {},
"outputs": [],
"source": [
"def execute_python(code):\n",
" # Capture stdout and stderr\n",
" output = io.StringIO()\n",
" sys_stdout = sys.stdout\n",
" sys_stderr = sys.stderr\n",
" sys.stdout = output\n",
" sys.stderr = output\n",
"\n",
" try:\n",
" # Compile the code first\n",
" compiled_code = compile(code, '<string>', 'exec')\n",
"\n",
" # Prepare a namespace dict for exec environment\n",
" # Include __builtins__ so imports like 'import unittest' work\n",
" namespace = {\"__builtins__\": __builtins__}\n",
"\n",
" # Run the user's code, but expect tests will be defined here\n",
" exec(compiled_code, namespace)\n",
"\n",
" # Look for unittest.TestCase subclasses in the namespace\n",
" loader = unittest.TestLoader()\n",
" suite = unittest.TestSuite()\n",
"\n",
" for obj in namespace.values():\n",
" if isinstance(obj, type) and issubclass(obj, unittest.TestCase):\n",
" tests = loader.loadTestsFromTestCase(obj)\n",
" suite.addTests(tests)\n",
"\n",
" # Run the tests\n",
" runner = unittest.TextTestRunner(stream=output, verbosity=2)\n",
" result = runner.run(suite)\n",
"\n",
" except SystemExit as e:\n",
" # Catch sys.exit calls from unittest.main()\n",
" output.write(f\"\\nSystemExit called with code {e.code}\\n\")\n",
" except Exception as e:\n",
" # Catch other errors\n",
" output.write(f\"\\nException: {e}\\n\")\n",
" finally:\n",
" sys.stdout = sys_stdout\n",
" sys.stderr = sys_stderr\n",
"\n",
" return output.getvalue()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "eca98de3-9e2f-4c23-8bb4-dbb2787a15a4",
"metadata": {},
"outputs": [],
"source": [
"with gr.Blocks() as ui:\n",
" with gr.Row():\n",
" python = gr.Textbox(label=\"Python code:\", lines=10, value=example2)\n",
" test = gr.Textbox(label=\"Unit tests\", lines=10)\n",
" test_run = gr.Textbox(label=\"Test results\", lines=10)\n",
" with gr.Row():\n",
" model = gr.Dropdown([\"GPT\", \"Claude\",\"Google\"], label=\"Select model\", value=\"GPT\")\n",
" generate = gr.Button(\"Generate unit tests\")\n",
" run = gr.Button(\"Run unit tests\")\n",
"\n",
" generate.click(select_model, inputs=[python, model], outputs=[test])\n",
" run.click(execute_python, inputs=[test],outputs=[test_run])\n",
"\n",
"ui.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}