539 lines
23 KiB
Plaintext
539 lines
23 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "3e473bbd-a0c2-43bd-bf99-c749784d00c3",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import gradio as gr\n",
|
|
"import openai\n",
|
|
"import anthropic\n",
|
|
"import google.generativeai as genai\n",
|
|
"import requests\n",
|
|
"import json\n",
|
|
"import os\n",
|
|
"from typing import Dict, Any, Optional\n",
|
|
"import asyncio\n",
|
|
"from dotenv import load_dotenv"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "16210512-41f1-4de3-8348-2cd7129e023f",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"True"
|
|
]
|
|
},
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"# load API\n",
|
|
"load_dotenv(override=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "6747e275-91eb-4d2b-90b6-805f2bd9b6b7",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"class CodeCommenter:\n",
|
|
" def __init__(self):\n",
|
|
" # Initialize API clients\n",
|
|
" self.openai_client = None\n",
|
|
" self.anthropic_client = None\n",
|
|
" self.gemini_client = None\n",
|
|
" \n",
|
|
" # Load API keys from environment variables\n",
|
|
" self.setup_clients()\n",
|
|
" \n",
|
|
" def setup_clients(self):\n",
|
|
" \"\"\"Initialize API clients with keys from environment variables\"\"\"\n",
|
|
" try:\n",
|
|
" # OpenAI\n",
|
|
" openai_key = os.getenv('OPENAI_API_KEY')\n",
|
|
" if openai_key:\n",
|
|
" self.openai_client = openai.OpenAI(api_key=openai_key)\n",
|
|
" \n",
|
|
" # Anthropic\n",
|
|
" anthropic_key = os.getenv('ANTHROPIC_API_KEY')\n",
|
|
" if anthropic_key:\n",
|
|
" self.anthropic_client = anthropic.Anthropic(api_key=anthropic_key)\n",
|
|
" \n",
|
|
" # Google Gemini\n",
|
|
" gemini_key = os.getenv('GOOGLE_API_KEY')\n",
|
|
" if gemini_key:\n",
|
|
" genai.configure(api_key=gemini_key)\n",
|
|
" self.gemini_client = genai.GenerativeModel('gemini-2.0-flash-exp')\n",
|
|
" \n",
|
|
" except Exception as e:\n",
|
|
" print(f\"Warning: Error setting up API clients: {e}\")\n",
|
|
" \n",
|
|
" def create_comments_prompt(self, code: str, language: str) -> str:\n",
|
|
" \"\"\"Create a prompt for the LLM to add comments and docstrings\"\"\"\n",
|
|
" return f\"\"\"Please add detailed and helpful comments and docstrings to the following {language} code. \n",
|
|
" \n",
|
|
"Guidelines:\n",
|
|
"1. Add comprehensive docstrings for functions, classes, and modules\n",
|
|
"2. Add inline comments explaining complex logic\n",
|
|
"3. Follow the commenting conventions for {language}\n",
|
|
"4. Maintain the original code structure and functionality\n",
|
|
"5. Make comments clear and professional\n",
|
|
"6. Don't change the actual code logic, only add comments\n",
|
|
"7. Do not add code markdown delimiters like ```python\n",
|
|
"\n",
|
|
"Here's the code to comment:\n",
|
|
"\n",
|
|
"{code}\n",
|
|
"\n",
|
|
"Please return only the commented code without any additional explanation or markdown formatting.\"\"\"\n",
|
|
"\n",
|
|
" def create_tests_prompt(self, code: str, language: str) -> str:\n",
|
|
" \"\"\"Create a prompt for the LLM to generate unit tests\"\"\"\n",
|
|
" return f\"\"\"Please generate comprehensive unit tests for the following {language} code.\n",
|
|
" \n",
|
|
"Guidelines:\n",
|
|
"1. Use appropriate testing framework for {language} (pytest for Python, JUnit for Java, etc.)\n",
|
|
"2. Create tests for all functions and methods\n",
|
|
"3. Include both positive and negative test cases\n",
|
|
"4. Test edge cases and error conditions\n",
|
|
"5. Use meaningful test names that describe what is being tested\n",
|
|
"6. Include setup and teardown methods if needed\n",
|
|
"7. Add mock objects for external dependencies (like database connections)\n",
|
|
"8. Do not add code markdown delimiters like ```python\n",
|
|
"9. Follow testing best practices for {language}\n",
|
|
"\n",
|
|
"Here's the code to test:\n",
|
|
"\n",
|
|
"{code}\n",
|
|
"\n",
|
|
"Please return only the unit test code without any additional explanation or markdown formatting.\"\"\"\n",
|
|
"\n",
|
|
" def create_combined_prompt(self, code: str, language: str) -> str:\n",
|
|
" \"\"\"Create a prompt for the LLM to add both comments and unit tests\"\"\"\n",
|
|
" return f\"\"\"Please add detailed comments and docstrings to the following {language} code AND generate comprehensive unit tests for it.\n",
|
|
" \n",
|
|
"For Comments:\n",
|
|
"1. Add comprehensive docstrings for functions, classes, and modules\n",
|
|
"2. Add inline comments explaining complex logic\n",
|
|
"3. Follow the commenting conventions for {language}\n",
|
|
"4. Don't change the actual code logic, only add comments\n",
|
|
"\n",
|
|
"For Unit Tests:\n",
|
|
"1. Use appropriate testing framework for {language} (pytest for Python, JUnit for Java, etc.)\n",
|
|
"2. Create tests for all functions and methods\n",
|
|
"3. Include both positive and negative test cases\n",
|
|
"4. Test edge cases and error conditions\n",
|
|
"5. Add mock objects for external dependencies (like database connections)\n",
|
|
"6. Follow testing best practices for {language}\n",
|
|
"\n",
|
|
"Structure your response as:\n",
|
|
"1. First, provide the original code with added comments and docstrings \n",
|
|
"2. Then, provide the unit tests as a separate section\n",
|
|
"3. Do not add code markdown delimiters like ```python\n",
|
|
"4. The 2 separated portions of code, comments and unit test should be clearly demarcated by comments specifying the following section purpose\n",
|
|
"\n",
|
|
"Here's the code:\n",
|
|
"\n",
|
|
"{code}\n",
|
|
"\n",
|
|
"Please return the commented code followed by the unit tests, clearly separated.\"\"\"\n",
|
|
"\n",
|
|
" def call_openai(self, prompt: str, model: str = \"gpt-4o-mini\") -> str:\n",
|
|
" \"\"\"Make API call to OpenAI\"\"\"\n",
|
|
" if not self.openai_client:\n",
|
|
" return \"Error: OpenAI API key not configured. Please set OPENAI_API_KEY environment variable.\"\n",
|
|
" \n",
|
|
" try:\n",
|
|
" response = self.openai_client.chat.completions.create(\n",
|
|
" model=model,\n",
|
|
" messages=[\n",
|
|
" {\"role\": \"system\", \"content\": \"You are a helpful coding assistant that adds detailed comments, docstrings, and generates unit tests for code.\"},\n",
|
|
" {\"role\": \"user\", \"content\": prompt}\n",
|
|
" ],\n",
|
|
" max_tokens=4000,\n",
|
|
" temperature=0.1\n",
|
|
" )\n",
|
|
" return response.choices[0].message.content.strip()\n",
|
|
" except Exception as e:\n",
|
|
" return f\"Error calling OpenAI API: {str(e)}\"\n",
|
|
" \n",
|
|
" def call_anthropic(self, prompt: str, model: str = \"claude-3-5-haiku-20241022\") -> str:\n",
|
|
" \"\"\"Make API call to Anthropic Claude\"\"\"\n",
|
|
" if not self.anthropic_client:\n",
|
|
" return \"Error: Anthropic API key not configured. Please set ANTHROPIC_API_KEY environment variable.\"\n",
|
|
" \n",
|
|
" try:\n",
|
|
" response = self.anthropic_client.messages.create(\n",
|
|
" model=model,\n",
|
|
" max_tokens=4000,\n",
|
|
" temperature=0.1,\n",
|
|
" messages=[\n",
|
|
" {\"role\": \"user\", \"content\": prompt}\n",
|
|
" ]\n",
|
|
" )\n",
|
|
" return response.content[0].text.strip()\n",
|
|
" except Exception as e:\n",
|
|
" return f\"Error calling Anthropic API: {str(e)}\"\n",
|
|
" \n",
|
|
" def call_gemini(self, prompt: str) -> str:\n",
|
|
" \"\"\"Make API call to Google Gemini\"\"\"\n",
|
|
" if not self.gemini_client:\n",
|
|
" return \"Error: Google API key not configured. Please set GOOGLE_API_KEY environment variable.\"\n",
|
|
" \n",
|
|
" try:\n",
|
|
" response = self.gemini_client.generate_content(\n",
|
|
" prompt,\n",
|
|
" generation_config=genai.types.GenerationConfig(\n",
|
|
" max_output_tokens=4000,\n",
|
|
" temperature=0.1,\n",
|
|
" )\n",
|
|
" )\n",
|
|
" return response.text.strip()\n",
|
|
" except Exception as e:\n",
|
|
" return f\"Error calling Gemini API: {str(e)}\"\n",
|
|
" \n",
|
|
" def call_ollama(self, prompt: str, model: str = \"llama3.2:latest\") -> str:\n",
|
|
" \"\"\"Make API call to Ollama (local)\"\"\"\n",
|
|
" try:\n",
|
|
" url = \"http://localhost:11434/api/generate\"\n",
|
|
" data = {\n",
|
|
" \"model\": model,\n",
|
|
" \"prompt\": prompt,\n",
|
|
" \"stream\": False,\n",
|
|
" \"options\": {\n",
|
|
" \"temperature\": 0.1,\n",
|
|
" \"num_predict\": 4000\n",
|
|
" }\n",
|
|
" }\n",
|
|
" \n",
|
|
" response = requests.post(url, json=data, timeout=60)\n",
|
|
" if response.status_code == 200:\n",
|
|
" result = response.json()\n",
|
|
" return result.get('response', '').strip()\n",
|
|
" else:\n",
|
|
" return f\"Error calling Ollama API: HTTP {response.status_code}\"\n",
|
|
" except requests.exceptions.ConnectionError:\n",
|
|
" return \"Error: Could not connect to Ollama. Make sure Ollama is running locally on port 11434.\"\n",
|
|
" except Exception as e:\n",
|
|
" return f\"Error calling Ollama API: {str(e)}\"\n",
|
|
"\n",
|
|
" def process_code(self, language: str, code: str, llm: str, generate_comments: bool, generate_tests: bool) -> str:\n",
|
|
" \"\"\"Process the given code based on selected options\"\"\"\n",
|
|
" if not code.strip():\n",
|
|
" return \"Error: Please provide code to process.\"\n",
|
|
" \n",
|
|
" if not generate_comments and not generate_tests:\n",
|
|
" return \"Error: Please select at least one option (Generate comments or Generate test units).\"\n",
|
|
" \n",
|
|
" # Determine which prompt to use\n",
|
|
" if generate_comments and generate_tests:\n",
|
|
" prompt = self.create_combined_prompt(code, language)\n",
|
|
" elif generate_comments:\n",
|
|
" prompt = self.create_comments_prompt(code, language)\n",
|
|
" else: # generate_tests only\n",
|
|
" prompt = self.create_tests_prompt(code, language)\n",
|
|
" \n",
|
|
" # Route to appropriate LLM\n",
|
|
" if llm == \"gpt-4o-mini\":\n",
|
|
" return self.call_openai(prompt, \"gpt-4o-mini\")\n",
|
|
" elif llm == \"claude-3-5-haiku-20241022\":\n",
|
|
" return self.call_anthropic(prompt, \"claude-3-5-haiku-20241022\")\n",
|
|
" elif llm == \"gemini-2.0-flash\":\n",
|
|
" return self.call_gemini(prompt)\n",
|
|
" elif llm == \"ollama:llama3.2:latest\":\n",
|
|
" return self.call_ollama(prompt, \"llama3.2:latest\")\n",
|
|
" else:\n",
|
|
" return f\"Error: Unsupported LLM: {llm}\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "813f0911-d53f-4887-9341-656712e32d8f",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def create_gradio_interface():\n",
|
|
" \"\"\"Create and configure the Gradio interface\"\"\"\n",
|
|
" commenter = CodeCommenter()\n",
|
|
" \n",
|
|
" # Define the main function for the interface\n",
|
|
" def process_code_interface(language, code, llm, generate_comments, generate_tests):\n",
|
|
" \"\"\"Process the code and return processed version based on selected options\"\"\"\n",
|
|
" if not code.strip():\n",
|
|
" return \"Please enter some code to process.\"\n",
|
|
" \n",
|
|
" if not generate_comments and not generate_tests:\n",
|
|
" return \"Please select at least one option: Generate comments or Generate test units.\"\n",
|
|
" \n",
|
|
" # Show processing message\n",
|
|
" options = []\n",
|
|
" if generate_comments:\n",
|
|
" options.append(\"comments\")\n",
|
|
" if generate_tests:\n",
|
|
" options.append(\"unit tests\")\n",
|
|
" \n",
|
|
" processing_msg = f\"Processing {language} code with {llm} to generate {' and '.join(options)}...\"\n",
|
|
" print(processing_msg)\n",
|
|
" \n",
|
|
" # Process the code\n",
|
|
" result = commenter.process_code(language, code, llm, generate_comments, generate_tests)\n",
|
|
" return result\n",
|
|
" \n",
|
|
" # Define default code\n",
|
|
" default_code = \"\"\"import pyodbc\n",
|
|
"from tabulate import tabulate\n",
|
|
"def connect_to_sql_server(server_name, database, username=None, password=None):\n",
|
|
" try:\n",
|
|
" if username and password:\n",
|
|
" connection_string = f\"DRIVER={{ODBC Driver 17 for SQL Server}};SERVER={server_name};DATABASE={database};UID={username};PWD={password}\"\n",
|
|
" else:\n",
|
|
" connection_string = f\"DRIVER={{ODBC Driver 17 for SQL Server}};SERVER={server_name};DATABASE={database};Trusted_Connection=yes\"\n",
|
|
" connection = pyodbc.connect(connection_string)\n",
|
|
" print(f\"Successfully connected to {server_name}/{database}\")\n",
|
|
" return connection\n",
|
|
" except Exception as e:\n",
|
|
" print(f\"Failed to connect to {server_name}/{database}: {str(e)}\")\n",
|
|
" return None\n",
|
|
"def get_record_count(connection, table_name):\n",
|
|
" try:\n",
|
|
" cursor = connection.cursor()\n",
|
|
" query = f\"SELECT COUNT(*) FROM {table_name}\"\n",
|
|
" cursor.execute(query)\n",
|
|
" count = cursor.fetchone()[0]\n",
|
|
" cursor.close()\n",
|
|
" print(f\"Record count for {table_name}: {count}\")\n",
|
|
" return count\n",
|
|
" except Exception as e:\n",
|
|
" print(f\"Failed to get record count for {table_name}: {str(e)}\")\n",
|
|
" return None\n",
|
|
"def select_top_records(connection, table_name, n):\n",
|
|
" try:\n",
|
|
" cursor = connection.cursor()\n",
|
|
" query = f\"SELECT TOP {n} * FROM {table_name}\"\n",
|
|
" cursor.execute(query)\n",
|
|
" records = cursor.fetchall()\n",
|
|
" columns = [column[0] for column in cursor.description]\n",
|
|
" cursor.close()\n",
|
|
" print(f\"Top {n} records from {table_name}\")\n",
|
|
" if records:\n",
|
|
" print(tabulate(records, headers=columns, tablefmt=\"grid\"))\n",
|
|
" return records\n",
|
|
" except Exception as e:\n",
|
|
" print(f\"Failed to retrieve top {n} records from {table_name}: {str(e)}\")\n",
|
|
" return None\n",
|
|
"conn = connect_to_sql_server(\"localhost\", \"AdventureWorks_lite\")\n",
|
|
"if conn:\n",
|
|
" total_records = get_record_count(conn, \"Sales.SalesOrderDetail\")\n",
|
|
" top_records = select_top_records(conn, \"Production.Product\", 10)\n",
|
|
" conn.close()\n",
|
|
" print(\"Connection closed successfully\")\"\"\"\n",
|
|
"\n",
|
|
" css = \"\"\"\n",
|
|
"textarea[rows]:not([rows=\"1\"]) {\n",
|
|
" overflow-y: auto !important;\n",
|
|
" scrollbar-width: thin !important;\n",
|
|
"}\n",
|
|
"textarea[rows]:not([rows=\"1\"])::-webkit-scrollbar {\n",
|
|
" all: initial !important;\n",
|
|
" background: #f1f1f1 !important;\n",
|
|
"}\n",
|
|
"textarea[rows]:not([rows=\"1\"])::-webkit-scrollbar-thumb {\n",
|
|
" all: initial !important;\n",
|
|
" background: #a8a8a8 !important;\n",
|
|
"}\n",
|
|
"\"\"\"\n",
|
|
"\n",
|
|
" # Create the interface\n",
|
|
" with gr.Blocks(title=\"Code Commenter & Test Generator\", theme=gr.themes.Base(), css=css) as interface:\n",
|
|
" gr.Markdown(\"# 🔧 Code Commenter & Test Generator\")\n",
|
|
" gr.Markdown(\"Add detailed comments, docstrings, and/or generate unit tests for your code using various LLM models.\")\n",
|
|
" \n",
|
|
" with gr.Row():\n",
|
|
" with gr.Column():\n",
|
|
" code_input = gr.Textbox(\n",
|
|
" label=\"Input Code\",\n",
|
|
" value=default_code,\n",
|
|
" lines=15,\n",
|
|
" max_lines=20,\n",
|
|
" info=\"Enter the code you want to process\"\n",
|
|
" )\n",
|
|
" \n",
|
|
" with gr.Column():\n",
|
|
" code_output = gr.Textbox(\n",
|
|
" label=\"Processed Code\",\n",
|
|
" lines=20,\n",
|
|
" max_lines=20,\n",
|
|
" info=\"Your code with added comments, docstrings, and/or unit tests\"\n",
|
|
" )\n",
|
|
" \n",
|
|
" # Add checkboxes below the textboxes\n",
|
|
" with gr.Row():\n",
|
|
" with gr.Column():\n",
|
|
" generate_comments_checkbox = gr.Checkbox(\n",
|
|
" label=\"Generate comments\",\n",
|
|
" value=True,\n",
|
|
" info=\"Add detailed comments and docstrings to the code\"\n",
|
|
" )\n",
|
|
" generate_tests_checkbox = gr.Checkbox(\n",
|
|
" label=\"Generate test units\",\n",
|
|
" value=False,\n",
|
|
" info=\"Generate comprehensive unit tests for the code\"\n",
|
|
" )\n",
|
|
" \n",
|
|
" with gr.Row():\n",
|
|
" with gr.Column(scale=1):\n",
|
|
" language_dropdown = gr.Dropdown(\n",
|
|
" choices=[\"Python\", \"Ruby\", \"Rust\", \"C++\", \"Java\"],\n",
|
|
" value=\"Python\",\n",
|
|
" label=\"Programming Language\",\n",
|
|
" info=\"Select the programming language of your code\"\n",
|
|
" )\n",
|
|
" \n",
|
|
" llm_dropdown = gr.Dropdown(\n",
|
|
" choices=[\n",
|
|
" \"gpt-4o-mini\",\n",
|
|
" \"claude-3-5-haiku-20241022\", \n",
|
|
" \"gemini-2.0-flash\",\n",
|
|
" \"ollama:llama3.2:latest\"\n",
|
|
" ],\n",
|
|
" value=\"gpt-4o-mini\",\n",
|
|
" label=\"LLM Model\",\n",
|
|
" info=\"Choose the language model to use\"\n",
|
|
" )\n",
|
|
" \n",
|
|
" generate_btn = gr.Button(\n",
|
|
" \"🚀 Process Code\", \n",
|
|
" variant=\"primary\",\n",
|
|
" size=\"lg\"\n",
|
|
" )\n",
|
|
" \n",
|
|
" # Add some API setup information\n",
|
|
" gr.Markdown(\"## 📝 API Setup Instructions\")\n",
|
|
" gr.Markdown(\"\"\"\n",
|
|
" To use this tool, you need to set up API keys as environment variables:\n",
|
|
" \n",
|
|
" - **OpenAI**: Set `OPENAI_API_KEY`\n",
|
|
" - **Anthropic**: Set `ANTHROPIC_API_KEY` \n",
|
|
" - **Google Gemini**: Set `GOOGLE_API_KEY`\n",
|
|
" - **Ollama**: Make sure Ollama is running locally on port 11434\n",
|
|
" \"\"\")\n",
|
|
" \n",
|
|
" gr.Markdown(\"## ✨ Features\")\n",
|
|
" gr.Markdown(\"\"\"\n",
|
|
" - **Generate Comments**: Add detailed docstrings and inline comments\n",
|
|
" - **Generate Unit Tests**: Create comprehensive test suites with mocking for external dependencies\n",
|
|
" - **Combined Mode**: Generate both comments and unit tests in one go\n",
|
|
" - **Multiple LLMs**: Choose from OpenAI, Anthropic, Google Gemini, or local Ollama models\n",
|
|
" - **Multiple Languages**: Support for Python, Ruby, Rust, C++, and Java\n",
|
|
" \"\"\")\n",
|
|
" \n",
|
|
" # Connect the button to the processing function\n",
|
|
" generate_btn.click(\n",
|
|
" fn=process_code_interface,\n",
|
|
" inputs=[language_dropdown, code_input, llm_dropdown, generate_comments_checkbox, generate_tests_checkbox],\n",
|
|
" outputs=code_output,\n",
|
|
" show_progress=True\n",
|
|
" )\n",
|
|
" \n",
|
|
" return interface"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "ef461e08-c1d5-406d-b7d2-a4329f16486e",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"🚀 Starting Code Commenter & Test Generator...\n",
|
|
"📋 Setting up Gradio interface...\n",
|
|
"🌐 Launching interface...\n",
|
|
"💡 The interface will open in your default browser\n",
|
|
"🔧 Make sure to set up your API keys as environment variables\n",
|
|
"* Running on local URL: http://127.0.0.1:7860\n",
|
|
"\n",
|
|
"To create a public link, set `share=True` in `launch()`.\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
|
],
|
|
"text/plain": [
|
|
"<IPython.core.display.HTML object>"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": []
|
|
},
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"print(\"🚀 Starting Code Commenter & Test Generator...\")\n",
|
|
"print(\"📋 Setting up Gradio interface...\")\n",
|
|
"\n",
|
|
"# Create and launch the interface\n",
|
|
"interface = create_gradio_interface()\n",
|
|
"\n",
|
|
"print(\"🌐 Launching interface...\")\n",
|
|
"print(\"💡 The interface will open in your default browser\")\n",
|
|
"print(\"🔧 Make sure to set up your API keys as environment variables\")\n",
|
|
"\n",
|
|
"# Launch with auto-opening in browser\n",
|
|
"interface.launch(\n",
|
|
" server_name=\"127.0.0.1\",\n",
|
|
" server_port=7860,\n",
|
|
" share=False,\n",
|
|
" inbrowser=True,\n",
|
|
" show_error=True\n",
|
|
")"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.12.3"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|