Add Week 4 submission for muhammad_qasim_sheikh

This commit is contained in:
aashahid
2025-10-29 00:04:03 +05:00
parent 998d04f8a3
commit a16303ca77

View File

@@ -0,0 +1,397 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "36640116",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import io\n",
"import zipfile\n",
"import textwrap\n",
"from typing import Dict, Generator, List, Optional, Tuple\n",
"import gradio as gr\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf7c9195",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv()\n",
"client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
"ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key = 'ollama')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2f108866",
"metadata": {},
"outputs": [],
"source": [
"MODEL_CHOICES = [\n",
" \"openai/gpt-4o-mini\",\n",
" \"openai/gpt-4o\",\n",
" \"openai/gpt-3.5-turbo\",\n",
" \"ollama/llama3.2\",\n",
" \"ollama/phi3:mini\",\n",
" \"ollama/qwen2.5:3b\",\n",
"]\n",
"DEFAULT_MODEL = \"openai/gpt-4o-mini\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "09e3566c",
"metadata": {},
"outputs": [],
"source": [
"def normalize_model(model: str) -> str:\n",
" return model.split(\"/\", 1)[1]\n",
"\n",
"def get_client(model_choice: str) -> OpenAI:\n",
" if model_choice.startswith(\"ollama/\"):\n",
" return ollama_via_openai\n",
" return client\n",
"\n",
"def stream_response(model_choice: str, messages: List[Dict]) -> Generator[str, None, None]:\n",
" model_name = normalize_model(model_choice)\n",
" api_client = get_client(model_choice)\n",
"\n",
" try:\n",
" stream = api_client.chat.completions.create(\n",
" model=model_name,\n",
" messages=messages,\n",
" stream=True,\n",
" max_tokens=1500,\n",
" temperature=0.3,\n",
" )\n",
" text = \"\"\n",
" for chunk in stream:\n",
" delta = chunk.choices[0].delta.content or \"\"\n",
" text += delta\n",
" yield text\n",
" except Exception as e:\n",
" yield f\"Error while streaming from {model_choice}: {str(e)}\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e52f2d16",
"metadata": {},
"outputs": [],
"source": [
"SYSTEM_PROMPT = \"\"\"You are a senior Python engineer. Be conservative and helpful.\n",
"When asked to annotate code, add clean Google-style docstrings and minimal comments.\n",
"When asked to generate tests, write readable pytest test modules.\n",
"When asked to create a trading scaffold, make a working 3-file setup with backtesting.\n",
"Avoid clever tricks, prioritize clarity and correctness.\"\"\"\n",
"\n",
"DOC_PROMPT = \"\"\"Task: Add docstrings and helpful inline comments to this code.\n",
"Do not change any logic or flow.\n",
"Use {style}-style docstrings. Add type hints: {add_types}.\n",
"Return only the updated code.\"\"\"\n",
"\n",
"TEST_PROMPT = \"\"\"Task: Generate a pytest test file for this code.\n",
"Include tests for normal, edge, and error conditions.\n",
"Use plain pytest (no unittest). Add minimal mocks if needed.\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b7c12db5",
"metadata": {},
"outputs": [],
"source": [
"def make_trading_files(strategy_brief: str, symbol: str) -> Dict[str, str]:\n",
" \"\"\"Return dictionary of files for a simple strategy, broker, and backtest.\"\"\"\n",
" strategy_py = f'''\"\"\"\n",
"strategy.py\n",
"Auto-generated strategy module for {symbol}.\n",
"Brief: {strategy_brief}\n",
"\"\"\"\n",
"def decide(state, bar):\n",
" \"\"\"Example SMA crossover.\"\"\"\n",
" prices = state.setdefault(\"prices\", [])\n",
" prices.append(bar[\"close\"])\n",
" if len(prices) < 20:\n",
" return \"HOLD\", state\n",
" short = sum(prices[-5:]) / 5\n",
" long = sum(prices[-20:]) / 20\n",
" action = \"BUY\" if short > long and not state.get(\"pos\") else \"SELL\" if short < long and state.get(\"pos\") else \"HOLD\"\n",
" state[\"pos\"] = action == \"BUY\" or (state.get(\"pos\") and action != \"SELL\")\n",
" return action, state\n",
"'''\n",
"\n",
" broker_py = \"\"\"\\\"\\\"\\\"sim_broker.py\n",
"Simple in-memory broker simulator.\\\"\\\"\\\"\n",
"def init(cash=10000.0):\n",
" return {\"cash\": cash, \"pos\": 0, \"equity\": [], \"trades\": []}\n",
"\n",
"def execute(state, action, price, size=1):\n",
" if action == \"BUY\" and state[\"cash\"] >= price * size:\n",
" state[\"cash\"] -= price * size\n",
" state[\"pos\"] += size\n",
" state[\"trades\"].append((\"BUY\", price))\n",
" elif action == \"SELL\" and state[\"pos\"] >= size:\n",
" state[\"cash\"] += price * size\n",
" state[\"pos\"] -= size\n",
" state[\"trades\"].append((\"SELL\", price))\n",
" state[\"equity\"].append(state[\"cash\"] + state[\"pos\"] * price)\n",
" return state\n",
"\"\"\"\n",
"\n",
" backtest_py = f'''\"\"\"\n",
"backtest.py\n",
"Run a synthetic backtest for {symbol}.\n",
"\"\"\"\n",
"import random, strategy, sim_broker\n",
"\n",
"def synthetic_data(n=250, start=100.0):\n",
" price = start\n",
" data = []\n",
" for _ in range(n):\n",
" price *= 1 + random.uniform(-0.01, 0.01)\n",
" data.append({{\"close\": price}})\n",
" return data\n",
"\n",
"def run():\n",
" bars = synthetic_data()\n",
" state = {{\"prices\": []}}\n",
" broker = sim_broker.init()\n",
" for bar in bars:\n",
" action, state = strategy.decide(state, bar)\n",
" broker = sim_broker.execute(broker, action, bar[\"close\"])\n",
" eq = broker[\"equity\"][-1] if broker[\"equity\"] else broker[\"cash\"]\n",
" print(f\"Final equity: {{eq:.2f}} | Trades: {{len(broker['trades'])}}\")\n",
"\n",
"if __name__ == \"__main__\":\n",
" run()\n",
"'''\n",
"\n",
" readme = f\"\"\"# Trading Scaffold for {symbol}\n",
"Generated from your brief: {strategy_brief}\n",
"Files:\n",
"- strategy.py — core logic\n",
"- sim_broker.py — in-memory execution\n",
"- backtest.py — synthetic backtest\n",
"Run with:\n",
"```bash\n",
"python backtest.py\n",
"```\"\"\"\n",
" return {\n",
" \"strategy.py\": strategy_py,\n",
" \"sim_broker.py\": broker_py,\n",
" \"backtest.py\": backtest_py,\n",
" \"README.md\": readme,\n",
" }"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3bb60fc8",
"metadata": {},
"outputs": [],
"source": [
"def zip_trading_files(files: Dict[str, str]) -> Tuple[str, bytes]:\n",
" buf = io.BytesIO()\n",
" with zipfile.ZipFile(buf, \"w\", zipfile.ZIP_DEFLATED) as z:\n",
" for name, content in files.items():\n",
" z.writestr(name, content)\n",
" buf.seek(0)\n",
" return \"trading_scaffold.zip\", buf.read()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "32d5e1eb",
"metadata": {},
"outputs": [],
"source": [
"def docstrings_stream(model_choice: str, code: str, style: str, add_types: bool):\n",
" if not code.strip():\n",
" yield \"Please paste Python code first.\"\n",
" return\n",
" sys = {\"role\": \"system\", \"content\": SYSTEM_PROMPT}\n",
" usr = {\n",
" \"role\": \"user\",\n",
" \"content\": DOC_PROMPT.format(style=style, add_types=add_types) + \"\\n\\n\" + code,\n",
" }\n",
" yield from stream_response(model_choice, [sys, usr])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b23500b",
"metadata": {},
"outputs": [],
"source": [
"def tests_stream(model_choice: str, code: str):\n",
" if not code.strip():\n",
" yield \"Please paste Python code first.\"\n",
" return\n",
" sys = {\"role\": \"system\", \"content\": SYSTEM_PROMPT}\n",
" usr = {\"role\": \"user\", \"content\": TEST_PROMPT + \"\\n\\n\" + code}\n",
" yield from stream_response(model_choice, [sys, usr])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7d2bb480",
"metadata": {},
"outputs": [],
"source": [
"def trading_scaffold(strategy_brief: str, symbol: str):\n",
" if not symbol.strip():\n",
" symbol = \"AAPL\"\n",
" files = make_trading_files(strategy_brief or \"Simple SMA crossover\", symbol)\n",
" name, data = zip_trading_files(files)\n",
" zip_path = \"trading_scaffold.zip\"\n",
" with open(zip_path, \"wb\") as f:\n",
" f.write(data)\n",
" return (\n",
" files[\"strategy.py\"],\n",
" files[\"sim_broker.py\"],\n",
" files[\"backtest.py\"],\n",
" files[\"README.md\"],\n",
" zip_path,\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "623db7de",
"metadata": {},
"outputs": [],
"source": [
"with gr.Blocks(title=\"DevLab Assistant\") as demo:\n",
" gr.Markdown(\"# DevLab Assistant\")\n",
" gr.Markdown(\n",
" \"This mini-lab helps with everyday coding tasks:\\n\"\n",
" \"* Add docstrings and helpful comments to existing code\\n\"\n",
" \"* Generate pytest unit tests automatically\\n\"\n",
" \"* Scaffold a small trading simulator to experiment with strategy ideas\\n\\n\"\n",
" \"Select a model (OpenAI or a local Ollama one) and try each tab.\"\n",
" )\n",
"\n",
" with gr.Tab(\"Docstrings / Comments\"):\n",
" model1 = gr.Dropdown(MODEL_CHOICES, value=DEFAULT_MODEL, label=\"Model\")\n",
" style = gr.Radio([\"google\", \"numpy\"], value=\"google\", label=\"Docstring style\")\n",
" add_types = gr.Checkbox(value=False, label=\"Add basic type hints\")\n",
" code_input = gr.Textbox(\n",
" lines=14,\n",
" label=\"Paste your Python code here\",\n",
" placeholder=\"def multiply(a, b):\\n return a * b\",\n",
" )\n",
" output_md = gr.Markdown(label=\"Result\")\n",
" gen_btn = gr.Button(\"Generate Docstrings\")\n",
"\n",
" gen_btn.click(\n",
" fn=docstrings_stream,\n",
" inputs=[model1, code_input, style, add_types],\n",
" outputs=output_md,\n",
" )\n",
"\n",
" with gr.Tab(\"Unit Tests\"):\n",
" model2 = gr.Dropdown(MODEL_CHOICES, value=DEFAULT_MODEL, label=\"Model\")\n",
" code_input2 = gr.Textbox(\n",
" lines=14,\n",
" label=\"Paste the code you want tests for\",\n",
" placeholder=\"class Calculator:\\n def add(self, a, b):\\n return a + b\",\n",
" )\n",
" output_md2 = gr.Markdown(label=\"Generated test file\")\n",
" gen_btn2 = gr.Button(\"Generate Tests\")\n",
"\n",
" gen_btn2.click(\n",
" fn=tests_stream,\n",
" inputs=[model2, code_input2],\n",
" outputs=output_md2,\n",
" )\n",
"\n",
" with gr.Tab(\"Trading Scaffold\"):\n",
" gr.Markdown(\n",
" \"Generate a minimal, self-contained trading simulator that includes:\\n\"\n",
" \"* `strategy.py`: basic SMA crossover strategy\\n\"\n",
" \"* `sim_broker.py`: in-memory broker\\n\"\n",
" \"* `backtest.py`: synthetic data backtest\\n\"\n",
" \"You can run it locally with `python backtest.py`.\"\n",
" )\n",
"\n",
" brief = gr.Textbox(\n",
" lines=6,\n",
" label=\"Strategy Brief\",\n",
" placeholder=\"e.g., SMA crossover with fast=5, slow=20, long-only\",\n",
" )\n",
" symbol = gr.Textbox(value=\"AAPL\", label=\"Symbol\")\n",
" gen_btn3 = gr.Button(\"Generate Scaffold\")\n",
"\n",
" s_md = gr.Code(language=\"python\", label=\"strategy.py\")\n",
" b_md = gr.Code(language=\"python\", label=\"sim_broker.py\")\n",
" bt_md = gr.Code(language=\"python\", label=\"backtest.py\")\n",
" r_md = gr.Markdown(label=\"README.md\")\n",
" zip_out = gr.File(label=\"Download ZIP\")\n",
"\n",
" gen_btn3.click(\n",
" fn=trading_scaffold,\n",
" inputs=[brief, symbol],\n",
" outputs=[s_md, b_md, bt_md, r_md, zip_out],\n",
" )\n",
"\n",
" gr.Markdown(\"---\")\n",
" gr.Markdown(\n",
" \"Tips:\\n\"\n",
" \"* Ollama models must be pulled locally (for example `ollama pull llama3.2`).\\n\"\n",
" \"* OpenAI models require the `OPENAI_API_KEY` environment variable.\\n\"\n",
" \"* Everything runs safely and offline for the local models.\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a1c6ba48",
"metadata": {},
"outputs": [],
"source": [
"if __name__ == \"__main__\":\n",
" demo.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "llm-engineering",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}