diff --git a/week4/community-contributions/week4_exercise_solution-Stephen.ipynb b/week4/community-contributions/week4_exercise_solution-Stephen.ipynb new file mode 100644 index 0000000..07d5155 --- /dev/null +++ b/week4/community-contributions/week4_exercise_solution-Stephen.ipynb @@ -0,0 +1,180 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "ed8c52b6", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "\n", + "load_dotenv(override=True)\n", + "\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "ollama_api_key = os.getenv('OLLAMA_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + "\n", + "if ollama_api_key:\n", + " print(f\"OLLAMA API Key exists and begins {ollama_api_key[:2]}\")\n", + "else:\n", + " print(\"OLLAMA API Key not set (and this is optional)\")\n", + "\n", + "ollama_url = \"http://localhost:11434/v1\"\n", + "\n", + "openai = OpenAI()\n", + "ollama = OpenAI(api_key=ollama_api_key, base_url=ollama_url)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "c628f95e", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt_doc = \"\"\"You are an expert Python developer and code reviewer.\n", + "Your job is to read the user's provided function, and return:\n", + "1. A concise, PEP-257-compliant docstring summarizing what the function does, clarifying types, parameters, return values, and side effects.\n", + "2. Helpful inline comments that improve both readability and maintainability, without restating what the code obviously does.\n", + "\n", + "Only output the function, not explanations or additional text. \n", + "Do not modify variable names or refactor the function logic.\n", + "Your response should improve the code's clarity and documentation, making it easier for others to understand and maintain.\n", + "Don't be extremely verbose.\n", + "Your answer should be at a senior level of expertise.\n", + "\"\"\"\n", + "\n", + "system_prompt_tests = \"\"\"You are a seasoned Python developer and testing expert.\n", + "Your task is to read the user's provided function, and generate:\n", + "1. A concise set of meaningful unit tests that thoroughly validate the function's correctness, including typical, edge, and error cases.\n", + "2. The tests should be written for pytest (or unittest if pytest is not appropriate), use clear, descriptive names, and avoid unnecessary complexity.\n", + "3. If dependencies or mocking are needed, include minimal necessary setup code (but avoid over-mocking).\n", + "\n", + "Only output the relevant test code, not explanations or extra text.\n", + "Do not change the original function; focus solely on comprehensive, maintainable test coverage that other developers can easily understand and extend.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "4bb84e6c", + "metadata": {}, + "outputs": [], + "source": [ + "models = [\"gpt-4.1-mini\", \"llama3.1\"]\n", + "clients = {\"gpt-4.1-mini\": openai, \"llama3.1\": ollama}\n", + "\n", + "def generate_documentation(code, model):\n", + " response = clients[model].chat.completions.create(\n", + " model=model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt_doc},\n", + " {\"role\": \"user\", \"content\": code}\n", + " ],\n", + " stream=True\n", + " )\n", + " output = \"\"\n", + " for chunk in response:\n", + " output += chunk.choices[0].delta.content or \"\"\n", + " yield output.replace(\"```python\", \"\").replace(\"```\", \"\")\n", + "\n", + "def generate_tests(code, model):\n", + " response = clients[model].chat.completions.create(\n", + " model=model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt_tests},\n", + " {\"role\": \"user\", \"content\": code}\n", + " ],\n", + " stream=True\n", + " )\n", + " output = \"\"\n", + " for chunk in response:\n", + " output += chunk.choices[0].delta.content or \"\"\n", + " yield output.replace(\"```python\", \"\").replace(\"```\", \"\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a4e65b26", + "metadata": {}, + "outputs": [], + "source": [ + "with gr.Blocks(theme=gr.themes.Soft(spacing_size=gr.themes.sizes.spacing_sm, radius_size=gr.themes.sizes.radius_none)) as ui:\n", + " gr.Markdown(\"# Python Toolbox\", elem_id=\"app-title\")\n", + " \n", + " with gr.Tab(\"Docstring Generator\") as tab1:\n", + " gr.Markdown(\"## Docstring & Comment Generator\")\n", + " gr.Markdown(\"Paste your function below to generate helpful docstrings and inline comments!\")\n", + "\n", + " with gr.Row():\n", + " with gr.Column():\n", + " code_input = gr.Code(label=\"Your Python function here\", lines=20, language=\"python\")\n", + " model_dropdown = gr.Dropdown(choices=models, value=models[0], label=\"Select model\")\n", + " submit_doc_btn = gr.Button(\"Generate docstring & comments\")\n", + " with gr.Column():\n", + " code_output = gr.Code(label=\"New function with docstring and comments\", language=\"python\")\n", + "\n", + " submit_doc_btn.click(\n", + " generate_documentation, \n", + " inputs=[code_input, model_dropdown], \n", + " outputs=code_output\n", + " )\n", + "\n", + " with gr.Tab(\"Unit Tests Generator\") as tab2:\n", + " gr.Markdown(\"## Unit Test Generator\")\n", + " gr.Markdown(\"Paste your function below to generate helpful unit tests!\")\n", + "\n", + " with gr.Row():\n", + " with gr.Column():\n", + " code_input_2 = gr.Code(label=\"Your Python function here\", lines=20, language=\"python\")\n", + " model_dropdown_2 = gr.Dropdown(choices=models, value=models[0], label=\"Select model\")\n", + " submit_test_btn = gr.Button(\"Generate unit tests\")\n", + " with gr.Column():\n", + " code_output_2 = gr.Code(label=\"Generated unit tests\", language=\"python\")\n", + "\n", + " submit_test_btn.click(\n", + " generate_tests, \n", + " inputs=[code_input_2, model_dropdown_2], \n", + " outputs=code_output_2\n", + " )\n", + " \n", + " \n", + " tab1.select(lambda x: x, inputs=code_input_2, outputs=code_input)\n", + " tab2.select(lambda x: x, inputs=code_input, outputs=code_input_2)\n", + "\n", + "ui.launch(share=False, inbrowser=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}