diff --git a/week4/community-contributions/ai_docstring_generator/README.md b/week4/community-contributions/ai_docstring_generator/README.md new file mode 100644 index 0000000..f206685 --- /dev/null +++ b/week4/community-contributions/ai_docstring_generator/README.md @@ -0,0 +1,220 @@ +# 🚀 AI Docstring Generator + +An intelligent tool that automatically generates comprehensive docstrings and comments for your code using state-of-the-art AI models (OpenAI GPT, Anthropic Claude, and Google Gemini). + +![Python](https://img.shields.io/badge/Python-3.8+-blue.svg) +![License](https://img.shields.io/badge/License-MIT-green.svg) +![Gradio](https://img.shields.io/badge/Gradio-UI-orange.svg) + +## ✨ Features + +- 🤖 **Multi-Model Support**: Choose between GPT-4o, Claude Sonnet 4, or Gemini 2.0 +- 🌍 **Multi-Language Support**: Python, JavaScript, Java, C++, Go, and Rust +- ⚡ **Real-time Streaming**: Watch documentation being generated live +- 📝 **Comprehensive Documentation**: Generates parameter descriptions, return values, exceptions, and inline comments +- 🎨 **Beautiful UI**: Clean and intuitive Gradio interface +- 📚 **Built-in Examples**: Quick start with pre-loaded code examples + +## 🎯 Supported Languages + +- **Python** (PEP 257, Google style) +- **JavaScript/TypeScript** (JSDoc) +- **Java** (Javadoc) +- **C++** (Doxygen) +- **Go** (Go conventions) +- **Rust** (Rust doc comments) + +## 📋 Prerequisites + +- Python 3.8 or higher +- API keys for at least one of the following: + - OpenAI API key + - Anthropic API key + - Google API key + +## 🛠️ Installation + +1. **Clone the repository** +```bash +git clone {paste-this-repo-link} +cd ai-docstring-generator //navigate to this folder +``` + +2. **Create a virtual environment** (recommended) +```bash +python -m venv venv +source venv/bin/activate # On Windows: venv\Scripts\activate +``` + +3. **Install dependencies** +```bash +pip install -r requirements.txt +``` + +4. **Set up environment variables** + +Create a `.env` file in the project root: +```env +OPENAI_API_KEY=sk-your-openai-api-key-here +ANTHROPIC_API_KEY=sk-ant-your-anthropic-api-key-here +GOOGLE_API_KEY=your-google-api-key-here +``` + +**Note**: You only need the API key(s) for the model(s) you plan to use. + +## 🚀 Usage + +1. **Run the application** +```bash +python docstring_generator.ipynb +``` + +2. **Access the interface** + - The app will automatically open in your default browser + +3. **Generate documentation** + - Select your programming language + - Choose an AI model (GPT, Claude, or Gemini) + - Paste your code or load an example + - Click "✨ Generate Docstrings" + - Copy the documented code! + +## 📖 Example + +**Input (Python):** +```python +def calculate_pi(iterations, param1, param2): + result = 1.0 + for i in range(1, iterations+1): + j = i * param1 - param2 + result -= (1/j) + j = i * param1 + param2 + result += (1/j) + return result +``` + +**Output:** +```python +def calculate_pi(iterations, param1, param2): + """ + Calculate an approximation of pi using the Leibniz formula. + + Args: + iterations (int): Number of iterations to perform in the calculation. + Higher values increase accuracy but take longer. + param1 (int): First parameter for the calculation formula (typically 4). + param2 (int): Second parameter for the calculation formula (typically 1). + + Returns: + float: Approximation of pi divided by 4. Multiply by 4 to get pi. + + Note: + This uses the Leibniz formula: π/4 = 1 - 1/3 + 1/5 - 1/7 + ... + Convergence is slow; many iterations needed for good accuracy. + """ + result = 1.0 + for i in range(1, iterations+1): + # Calculate denominator for negative term + j = i * param1 - param2 + result -= (1/j) + # Calculate denominator for positive term + j = i * param1 + param2 + result += (1/j) + return result +``` + +## 🔑 Getting API Keys + +### OpenAI API Key +1. Visit [platform.openai.com](https://platform.openai.com) +2. Sign up or log in +3. Go to API Keys section +4. Create a new API key + +### Anthropic API Key +1. Visit [console.anthropic.com](https://console.anthropic.com) +2. Sign up or log in +3. Go to API Keys +4. Generate a new key + +### Google API Key +1. Visit [Google AI Studio](https://makersuite.google.com/app/apikey) +2. Sign in with Google account +3. Create an API key + +## 📁 Project Structure + +``` +ai-docstring-generator/ +│ +├── docstring_generator.py # Main application file +├── requirements.txt # Python dependencies +├── README.md # Project documentation +``` + +## 🎨 Customization + +You can customize the documentation style by modifying the `system_prompt_for_docstring()` function in `docstring_generator.py`. + +## 🤝 Contributing + +Contributions are welcome! Please feel free to submit a Pull Request. + +1. Fork the repository +2. Create your feature branch (`git checkout -b feature/AmazingFeature`) +3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) +4. Push to the branch (`git push origin feature/AmazingFeature`) +5. Open a Pull Request + +## 📝 License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +## 🐛 Troubleshooting + +### Common Issues + +**Issue: `TypeError: Client.__init__() got an unexpected keyword argument 'proxies'`** +- **Solution**: Update packages: `pip install --upgrade anthropic httpx` + +**Issue: API key not found** +- **Solution**: Ensure your `.env` file is in the project root and contains valid API keys + +**Issue: Model not responding** +- **Solution**: Check your API key is valid and you have available credits/quota + +**Issue: Port 7860 already in use** +- **Solution**: Change the port in the `ui.launch()` call: `server_port=7861` + +## 🔮 Future Enhancements + +- [ ] Support for more AI models (Llama, Mistral, etc.) +- [ ] Batch processing for multiple files +- [ ] Support for more programming languages +- [ ] Custom documentation style templates +- [ ] Integration with IDEs (VS Code, PyCharm) +- [ ] API endpoint for programmatic access + +## 📧 Contact + +For questions or suggestions, please open an issue on GitHub. +Or mail me at udayslathia16@gmail.com + +## 🙏 Acknowledgments + +- OpenAI for GPT models +- Anthropic for Claude models +- Google for Gemini models +- Gradio for the amazing UI framework + +--- + +**Made with ❤️ for developers who value good documentation** + +--- + +## ⭐ Star History + +If you find this project useful, please consider giving it a star! + +[![Star History Chart](https://api.star-history.com/svg?repos=udayslathia16/ai-docstring-generator&type=Date)](https://star-history.com/#udayslathia16/ai-docstring-generator&Date) \ No newline at end of file diff --git a/week4/community-contributions/ai_docstring_generator/docstring_generator.ipynb b/week4/community-contributions/ai_docstring_generator/docstring_generator.ipynb new file mode 100644 index 0000000..2f195bf --- /dev/null +++ b/week4/community-contributions/ai_docstring_generator/docstring_generator.ipynb @@ -0,0 +1,558 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "7d11beae-8892-4777-924d-6a3a4ea85f7b", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "import google.generativeai as genai\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea3021fa-8281-44da-ae5c-c737c92b6700", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv()\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36a120da-8481-47fc-9f1c-a32664ed61fa", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize clients\n", + "openai_client = OpenAI()\n", + "try:\n", + " claude_client = anthropic.Anthropic(api_key=os.environ['ANTHROPIC_API_KEY'])\n", + "except TypeError:\n", + " # Fallback for older anthropic versions\n", + " claude_client = anthropic.Client(api_key=os.environ['ANTHROPIC_API_KEY'])\n", + "genai.configure(api_key=os.environ['GOOGLE_API_KEY'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c9100f0b-5ae3-48db-858f-2e4010caad08", + "metadata": {}, + "outputs": [], + "source": [ + "# Model configurations\n", + "OPENAI_MODEL = \"gpt-4o-mini\"\n", + "CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n", + "GEMINI_MODEL = \"gemini-2.0-flash-exp\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7141fd87-c17c-4e48-bcb0-8a7cabf1947f", + "metadata": {}, + "outputs": [], + "source": [ + "def system_prompt_for_docstring(language):\n", + " \"\"\"\n", + " Generate system prompt for docstring generation based on programming language.\n", + " \n", + " Args:\n", + " language (str): Programming language (python, javascript, java, etc.)\n", + " \n", + " Returns:\n", + " str: System prompt tailored for the specified language\n", + " \"\"\"\n", + " prompts = {\n", + " \"python\": \"\"\"\n", + " You are a Python documentation expert. When writing documentation:\n", + " - Follow PEP 257 and Google docstring style guidelines\n", + " - Write clear, concise explanations\n", + " - Include practical examples when helpful\n", + " - Highlight edge cases and limitations\n", + " - Use type hints in docstrings\n", + " - Add inline comments only for complex logic\n", + " - Never skip documenting parameters or return values\n", + " - Validate that all documentation is accurate and complete\n", + " \"\"\",\n", + " \"javascript\": \"\"\"\n", + " You are a JavaScript/TypeScript documentation expert. When writing documentation:\n", + " - Follow JSDoc standards\n", + " - Write clear, concise explanations\n", + " - Include type annotations\n", + " - Document parameters, return values, and exceptions\n", + " - Add inline comments for complex logic\n", + " - Use modern ES6+ syntax examples\n", + " \"\"\",\n", + " \"java\": \"\"\"\n", + " You are a Java documentation expert. When writing documentation:\n", + " - Follow Javadoc standards\n", + " - Write clear, concise explanations\n", + " - Document all public methods and classes\n", + " - Include @param, @return, and @throws tags\n", + " - Add inline comments for complex logic\n", + " \"\"\",\n", + " \"cpp\": \"\"\"\n", + " You are a C++ documentation expert. When writing documentation:\n", + " - Follow Doxygen standards\n", + " - Write clear, concise explanations\n", + " - Document parameters, return values, and exceptions\n", + " - Add inline comments for complex logic and memory management\n", + " \"\"\",\n", + " \"go\": \"\"\"\n", + " You are a Go documentation expert. When writing documentation:\n", + " - Follow Go documentation conventions\n", + " - Write clear, concise explanations\n", + " - Document exported functions and types\n", + " - Add inline comments for complex logic\n", + " \"\"\",\n", + " \"rust\": \"\"\"\n", + " You are a Rust documentation expert. When writing documentation:\n", + " - Follow Rust documentation conventions\n", + " - Write clear, concise explanations\n", + " - Document safety considerations\n", + " - Include examples in doc comments\n", + " \"\"\",\n", + " }\n", + " return prompts.get(language.lower(), prompts[\"python\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c535bfe3-30ec-4f07-ae2f-28d4db350c6b", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for_docstring(code, language):\n", + " \"\"\"\n", + " Generate user prompt for docstring generation request.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language of the code\n", + " \n", + " Returns:\n", + " str: Formatted user prompt\n", + " \"\"\"\n", + " return f\"\"\"\n", + " Please document this {language} code with comprehensive docstrings and comments:\n", + " \n", + " 1. Add docstrings containing:\n", + " - Clear description of purpose and functionality\n", + " - All parameters with types and descriptions\n", + " - Return values with types\n", + " - Exceptions that may be raised\n", + " - Any important notes or limitations\n", + " \n", + " 2. Add strategic inline comments for:\n", + " - Complex algorithms or business logic\n", + " - Non-obvious implementation choices\n", + " - Performance considerations\n", + " - Edge cases\n", + " \n", + " Return ONLY the documented code, no explanations before or after.\n", + " \n", + " Here's the code to document:\n", + " \n", + "{code}\n", + " \"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb4b2fe0-81fa-48f2-bfb8-f4503f7b1b14", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_docstring_gpt(code, language):\n", + " \"\"\"\n", + " Generate docstrings using OpenAI GPT model with streaming.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language\n", + " \n", + " Yields:\n", + " str: Progressively generated documented code\n", + " \"\"\"\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt_for_docstring(language)},\n", + " {\"role\": \"user\", \"content\": user_prompt_for_docstring(code, language)}\n", + " ]\n", + " \n", + " stream = openai_client.chat.completions.create(\n", + " model=OPENAI_MODEL,\n", + " messages=messages,\n", + " stream=True,\n", + " temperature=0.3\n", + " )\n", + " \n", + " reply = \"\"\n", + " for chunk in stream:\n", + " fragment = chunk.choices[0].delta.content or \"\"\n", + " reply += fragment\n", + " yield reply.replace('```python', '').replace('```javascript', '').replace('```java', '').replace('```', '')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f20d8bb6-b2e6-407b-823f-03eb09b6558a", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_docstring_claude(code, language):\n", + " \"\"\"\n", + " Generate docstrings using Anthropic Claude model with streaming.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language\n", + " \n", + " Yields:\n", + " str: Progressively generated documented code\n", + " \"\"\"\n", + " result = claude_client.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=4096,\n", + " system=system_prompt_for_docstring(language),\n", + " messages=[{\"role\": \"user\", \"content\": user_prompt_for_docstring(code, language)}],\n", + " temperature=0.3\n", + " )\n", + " \n", + " reply = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " reply += text\n", + " yield reply.replace('```python', '').replace('```javascript', '').replace('```java', '').replace('```', '')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe6f7795-04ee-4c79-b5e6-da4a338547fa", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_docstring_gemini(code, language):\n", + " \"\"\"\n", + " Generate docstrings using Google Gemini model with streaming.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language\n", + " \n", + " Yields:\n", + " str: Progressively generated documented code\n", + " \"\"\"\n", + " model = genai.GenerativeModel(GEMINI_MODEL)\n", + " \n", + " prompt = f\"{system_prompt_for_docstring(language)}\\n\\n{user_prompt_for_docstring(code, language)}\"\n", + " \n", + " response = model.generate_content(\n", + " prompt,\n", + " stream=True,\n", + " generation_config=genai.types.GenerationConfig(\n", + " temperature=0.3,\n", + " max_output_tokens=4096\n", + " )\n", + " )\n", + " \n", + " reply = \"\"\n", + " for chunk in response:\n", + " if chunk.text:\n", + " reply += chunk.text\n", + " yield reply.replace('```python', '').replace('```javascript', '').replace('```java', '').replace('```', '')\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e99d0539-a92b-4ccd-a011-0d5f211aac4a", + "metadata": {}, + "outputs": [], + "source": [ + "def generate_docstring(code, language, model):\n", + " \"\"\"\n", + " Main function to generate docstrings using selected AI model.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language\n", + " model (str): AI model to use (GPT, Claude, or Gemini)\n", + " \n", + " Yields:\n", + " str: Progressively generated documented code\n", + " \n", + " Raises:\n", + " ValueError: If unknown model is specified\n", + " \"\"\"\n", + " if not code.strip():\n", + " yield \"Please enter some code to document.\"\n", + " return\n", + " \n", + " try:\n", + " if model == \"GPT\":\n", + " result = stream_docstring_gpt(code, language)\n", + " elif model == \"Claude\":\n", + " result = stream_docstring_claude(code, language)\n", + " elif model == \"Gemini\":\n", + " result = stream_docstring_gemini(code, language)\n", + " else:\n", + " raise ValueError(f\"Unknown model: {model}\")\n", + " \n", + " for stream_so_far in result:\n", + " yield stream_so_far\n", + " except Exception as e:\n", + " yield f\"Error: {str(e)}\\n\\nPlease check your API keys in .env file.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6e691c2c-9b6b-4ee3-9183-234079ca5c0a", + "metadata": {}, + "outputs": [], + "source": [ + "# Example code for testing\n", + "EXAMPLE_PYTHON_CODE = \"\"\"\n", + "def calculate_pi(iterations, param1, param2):\n", + " result = 1.0\n", + " for i in range(1, iterations+1):\n", + " j = i * param1 - param2\n", + " result -= (1/j)\n", + " j = i * param1 + param2\n", + " result += (1/j)\n", + " return result\n", + "\n", + "class DataProcessor:\n", + " def __init__(self, data):\n", + " self.data = data\n", + " self.processed = False\n", + " \n", + " def process(self, threshold=0.5):\n", + " if not self.data:\n", + " raise ValueError(\"No data to process\")\n", + " result = [x for x in self.data if x > threshold]\n", + " self.processed = True\n", + " return result\n", + "\"\"\"\n", + "\n", + "EXAMPLE_JAVASCRIPT_CODE = \"\"\"\n", + "function calculateSum(numbers) {\n", + " return numbers.reduce((acc, num) => acc + num, 0);\n", + "}\n", + "\n", + "class UserManager {\n", + " constructor(users) {\n", + " this.users = users;\n", + " }\n", + " \n", + " findByAge(minAge, maxAge) {\n", + " return this.users.filter(user => \n", + " user.age >= minAge && user.age <= maxAge\n", + " );\n", + " }\n", + "}\n", + "\"\"\"\n", + "\n", + "EXAMPLE_JAVA_CODE = \"\"\"\n", + "public class Calculator {\n", + " private double result;\n", + " \n", + " public Calculator() {\n", + " this.result = 0.0;\n", + " }\n", + " \n", + " public double add(double a, double b) {\n", + " result = a + b;\n", + " return result;\n", + " }\n", + " \n", + " public double divide(double a, double b) {\n", + " if (b == 0) {\n", + " throw new ArithmeticException(\"Division by zero\");\n", + " }\n", + " result = a / b;\n", + " return result;\n", + " }\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80f0891b-ce44-45c5-916c-f108b09ee912", + "metadata": {}, + "outputs": [], + "source": [ + "# Custom CSS for better UI\n", + "css = \"\"\"\n", + ".code-input textarea, .code-output textarea {\n", + " font-family: 'Courier New', monospace;\n", + " font-size: 14px;\n", + "}\n", + ".header {\n", + " text-align: center;\n", + " padding: 20px;\n", + " background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);\n", + " color: white;\n", + " border-radius: 10px;\n", + " margin-bottom: 20px;\n", + "}\n", + "\"\"\"\n", + "\n", + "# Create Gradio interface\n", + "with gr.Blocks(css=css, theme=gr.themes.Soft()) as ui:\n", + " gr.Markdown(\"\"\"\n", + "
\n", + "

🚀 AI Docstring Generator

\n", + "

Automatically generate comprehensive docstrings and comments for your code

\n", + "
\n", + " \"\"\")\n", + " \n", + " with gr.Row():\n", + " with gr.Column(scale=1):\n", + " gr.Markdown(\"### ⚙️ Configuration\")\n", + " language_dropdown = gr.Dropdown(\n", + " choices=[\"Python\", \"JavaScript\", \"Java\", \"C++\", \"Go\", \"Rust\"],\n", + " label=\"Programming Language\",\n", + " value=\"Python\"\n", + " )\n", + " model_dropdown = gr.Dropdown(\n", + " choices=[\"GPT\", \"Claude\", \"Gemini\"],\n", + " label=\"AI Model\",\n", + " value=\"GPT\",\n", + " info=\"Select which AI model to use\"\n", + " )\n", + " \n", + " gr.Markdown(\"### 📝 Examples\")\n", + " example_dropdown = gr.Dropdown(\n", + " choices=[\"Python Example\", \"JavaScript Example\", \"Java Example\", \"Custom\"],\n", + " label=\"Load Example\",\n", + " value=\"Python Example\"\n", + " )\n", + " \n", + " with gr.Row():\n", + " with gr.Column(scale=1):\n", + " gr.Markdown(\"### 📥 Input Code\")\n", + " code_input = gr.Textbox(\n", + " label=\"Paste your code here\",\n", + " value=EXAMPLE_PYTHON_CODE,\n", + " lines=20,\n", + " placeholder=\"Enter your code...\",\n", + " elem_classes=\"code-input\"\n", + " )\n", + " generate_btn = gr.Button(\"✨ Generate Docstrings\", variant=\"primary\", size=\"lg\")\n", + " \n", + " with gr.Column(scale=1):\n", + " gr.Markdown(\"### 📤 Documented Code\")\n", + " code_output = gr.Textbox(\n", + " label=\"Generated code with docstrings\",\n", + " lines=20,\n", + " elem_classes=\"code-output\"\n", + " )\n", + " \n", + " gr.Markdown(\"\"\"\n", + " ### 📚 Instructions:\n", + " 1. Select your programming language\n", + " 2. Choose an AI model (GPT, Claude, or Gemini)\n", + " 3. Paste your code or select an example\n", + " 4. Click \"Generate Docstrings\"\n", + " 5. Copy the documented code\n", + " \n", + " **Note:** Make sure to set up your API keys in a `.env` file:\n", + " ```\n", + " OPENAI_API_KEY=your_openai_key\n", + " ANTHROPIC_API_KEY=your_anthropic_key\n", + " GOOGLE_API_KEY=your_google_key\n", + " ```\n", + " \"\"\")\n", + " \n", + " # Event handlers\n", + " def load_example(example_name):\n", + " examples = {\n", + " \"Python Example\": EXAMPLE_PYTHON_CODE,\n", + " \"JavaScript Example\": EXAMPLE_JAVASCRIPT_CODE,\n", + " \"Java Example\": EXAMPLE_JAVA_CODE,\n", + " \"Custom\": \"\"\n", + " }\n", + " return examples.get(example_name, \"\")\n", + " \n", + " example_dropdown.change(\n", + " fn=load_example,\n", + " inputs=[example_dropdown],\n", + " outputs=[code_input]\n", + " )\n", + " \n", + " generate_btn.click(\n", + " fn=generate_docstring,\n", + " inputs=[code_input, language_dropdown, model_dropdown],\n", + " outputs=[code_output]\n", + " )\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2e8041f-c330-4a66-9ba8-45a4edabb599", + "metadata": {}, + "outputs": [], + "source": [ + "# Launch the interface\n", + "\n", + "ui.launch(\n", + " inbrowser=True,\n", + " share=False,\n", + " # server_name=\"0.0.0.0\",\n", + " # server_port=7860\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "564dba13-f807-4eb5-aa7c-636f9a7cb286", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week4/community-contributions/ai_docstring_generator/requirements.txt b/week4/community-contributions/ai_docstring_generator/requirements.txt new file mode 100644 index 0000000..f5852f4 --- /dev/null +++ b/week4/community-contributions/ai_docstring_generator/requirements.txt @@ -0,0 +1,12 @@ +# Core dependencies +openai>=1.12.0 +anthropic>=0.18.0 +google-generativeai>=0.3.0 +gradio>=4.0.0 +python-dotenv>=1.0.0 + +# HTTP client (required by anthropic) +httpx>=0.24.0 + +# Optional: For better performance +aiohttp>=3.9.0 \ No newline at end of file