diff --git a/README.md b/README.md index f3bfb1b..b2f5864 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,27 @@ https://edwarddonner.com/2024/11/13/llm-engineering-resources/ The most significant change is that the new version uses the fabulous uv, instead of Anaconda. But there's also tons of new content, including new models, tools and techniques. Prompt caching, LiteLLM, inference techniques and so much more. +### How this is organized in Udemy + +We are rolling out the new weeks, but keeping the original content in an appendix: + +In Udemy: + +Section 1 = NEW WEEK 1 +Section 2 = NEW WEEK 2 +Section 3 = NEW WEEK 3 +Section 4 = Original Week 4 +Section 5 = Original Week 5 +Section 6 = Original Week 6 +Section 7 = Original Week 7 +Section 8 = Original Week 8 + +Then as an appendix / archive: + +Section 9 = Original Week 1 +Section 10 = Original Week 2 +Section 11 = Original Week 3 + ### To revert to the original version of code, consistent with the original videos (Anaconda + virtualenv) If you'd prefer to stick with the code for the original videos, simply do this from your Anaconda Prompt or Terminal: diff --git a/community-contributions/WebScraperApp/week1_day2_ak.ipynb b/community-contributions/WebScraperApp/week1_day2_ak.ipynb new file mode 100644 index 0000000..cfe29e7 --- /dev/null +++ b/community-contributions/WebScraperApp/week1_day2_ak.ipynb @@ -0,0 +1,1035 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# Welcome to the Day 2 Lab!\n" + ] + }, + { + "cell_type": "markdown", + "id": "ada885d9-4d42-4d9b-97f0-74fbbbfe93a9", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Just before we get started --

\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.
\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/
\n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "79ffe36f", + "metadata": {}, + "source": [ + "## First - let's talk about the Chat Completions API\n", + "\n", + "1. The simplest way to call an LLM\n", + "2. It's called Chat Completions because it's saying: \"here is a conversation, please predict what should come next\"\n", + "3. The Chat Completions API was invented by OpenAI, but it's so popular that everybody uses it!\n", + "\n", + "### We will start by calling OpenAI again - but don't worry non-OpenAI people, your time is coming!\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e38f17a0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key found and looks good so far!\n" + ] + } + ], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not api_key.startswith(\"sk-proj-\"):\n", + " print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "97846274", + "metadata": {}, + "source": [ + "## Do you know what an Endpoint is?\n", + "\n", + "If not, please review the Technical Foundations guide in the guides folder\n", + "\n", + "And, here is an endpoint that might interest you..." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "5af5c188", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'model': 'gpt-5-nano',\n", + " 'messages': [{'role': 'user', 'content': 'Tell me a fun fact'}]}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import requests\n", + "\n", + "headers = {\"Authorization\": f\"Bearer {api_key}\", \"Content-Type\": \"application/json\"}\n", + "\n", + "payload = {\n", + " \"model\": \"gpt-5-nano\",\n", + " \"messages\": [\n", + " {\"role\": \"user\", \"content\": \"Tell me a fun fact\"}]\n", + "}\n", + "\n", + "payload" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "2d0ab242", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'id': 'chatcmpl-CRaBOfilI65j2z7nHeFjyq6bHAsjw',\n", + " 'object': 'chat.completion',\n", + " 'created': 1760689978,\n", + " 'model': 'gpt-5-nano-2025-08-07',\n", + " 'choices': [{'index': 0,\n", + " 'message': {'role': 'assistant',\n", + " 'content': 'Bananas are berries, but strawberries aren’t. Botanically, a berry comes from a single ovary and has seeds inside; bananas fit that definition, while strawberries do not.',\n", + " 'refusal': None,\n", + " 'annotations': []},\n", + " 'finish_reason': 'stop'}],\n", + " 'usage': {'prompt_tokens': 11,\n", + " 'completion_tokens': 749,\n", + " 'total_tokens': 760,\n", + " 'prompt_tokens_details': {'cached_tokens': 0, 'audio_tokens': 0},\n", + " 'completion_tokens_details': {'reasoning_tokens': 704,\n", + " 'audio_tokens': 0,\n", + " 'accepted_prediction_tokens': 0,\n", + " 'rejected_prediction_tokens': 0}},\n", + " 'service_tier': 'default',\n", + " 'system_fingerprint': None}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response = requests.post(\n", + " \"https://api.openai.com/v1/chat/completions\",\n", + " headers=headers,\n", + " json=payload\n", + ")\n", + "\n", + "response.json()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "cb11a9f6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Bananas are berries, but strawberries aren’t. Botanically, a berry comes from a single ovary and has seeds inside; bananas fit that definition, while strawberries do not.'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response.json()[\"choices\"][0][\"message\"][\"content\"]" + ] + }, + { + "cell_type": "markdown", + "id": "cea3026a", + "metadata": {}, + "source": [ + "# What is the openai package?\n", + "\n", + "It's known as a Python Client Library.\n", + "\n", + "It's nothing more than a wrapper around making this exact call to the http endpoint.\n", + "\n", + "It just allows you to work with nice Python code instead of messing around with janky json objects.\n", + "\n", + "But that's it. It's open-source and lightweight. Some people think it contains OpenAI model code - it doesn't!\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "490fdf09", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Fun fact: Wombat poop is cube-shaped, not round. The cube form comes from the way their intestines remove water and shape the feces, helping it stack neatly and not roll away when wombats mark their territory.'" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Create OpenAI client\n", + "\n", + "from openai import OpenAI\n", + "openai = OpenAI()\n", + "\n", + "response = openai.chat.completions.create(model=\"gpt-5-nano\", messages=[{\"role\": \"user\", \"content\": \"Tell me a fun fact\"}])\n", + "\n", + "response.choices[0].message.content\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "id": "c7739cda", + "metadata": {}, + "source": [ + "## And then this great thing happened:\n", + "\n", + "OpenAI's Chat Completions API was so popular, that the other model providers created endpoints that are identical.\n", + "\n", + "They are known as the \"OpenAI Compatible Endpoints\".\n", + "\n", + "For example, google made one here: https://generativelanguage.googleapis.com/v1beta/openai/\n", + "\n", + "And OpenAI decided to be kind: they said, hey, you can just use the same client library that we made for GPT. We'll allow you to specify a different endpoint URL and a different key, to use another provider.\n", + "\n", + "So you can use:\n", + "\n", + "```python\n", + "gemini = OpenAI(base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\", api_key=\"AIz....\")\n", + "gemini.chat.completions.create(...)\n", + "```\n", + "\n", + "And to be clear - even though OpenAI is in the code, we're only using this lightweight python client library to call the endpoint - there's no OpenAI model involved here.\n", + "\n", + "If you're confused, please review Guide 9 in the Guides folder!\n", + "\n", + "And now let's try it!" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "f74293bc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "API key found and looks good so far!\n" + ] + } + ], + "source": [ + "GEMINI_BASE_URL = \"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", + "\n", + "google_api_key = os.getenv(\"GOOGLE_API_KEY\")\n", + "\n", + "if not google_api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif not google_api_key.startswith(\"AIz\"):\n", + " print(\"An API key was found, but it doesn't start AIz\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "d060f484", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'The dot over a lowercase \"i\" or \"j\" isn\\'t just a dot—it has a name.\\n\\nIt\\'s called a **tittle**.'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key)\n", + "\n", + "response = gemini.chat.completions.create(model=\"gemini-2.5-pro\", messages=[{\"role\": \"user\", \"content\": \"Tell me a fun fact\"}])\n", + "\n", + "response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a5b069be", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "65272432", + "metadata": {}, + "source": [ + "## And Ollama also gives an OpenAI compatible endpoint\n", + "\n", + "...and it's on your local machine!\n", + "\n", + "If the next cell doesn't print \"Ollama is running\" then please open a terminal and run `ollama serve`" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "f06280ad", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "b'Ollama is running'" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "requests.get(\"http://localhost:11434\").content" + ] + }, + { + "cell_type": "markdown", + "id": "c6ef3807", + "metadata": {}, + "source": [ + "### Download llama3.2 from meta\n", + "\n", + "Change this to llama3.2:1b if your computer is smaller.\n", + "\n", + "Don't use llama3.3 or llama4! They are too big for your computer.." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "e633481d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠋ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠙ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠸ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠼ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠴ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠦ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠧ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠇ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠏ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest \u001b[K\n", + "pulling dde5aa3fc5ff: 100% ▕██████████████████▏ 2.0 GB \u001b[K\n", + "pulling 966de95ca8a6: 100% ▕██████████████████▏ 1.4 KB \u001b[K\n", + "pulling fcc5a6bec9da: 100% ▕██████████████████▏ 7.7 KB \u001b[K\n", + "pulling a70ff7e570d9: 100% ▕██████████████████▏ 6.0 KB \u001b[K\n", + "pulling 56bb8bd477a5: 100% ▕██████████████████▏ 96 B \u001b[K\n", + "pulling 34bb5ab01051: 100% ▕██████████████████▏ 561 B \u001b[K\n", + "verifying sha256 digest \u001b[K\n", + "writing manifest \u001b[K\n", + "success \u001b[K\u001b[?25h\u001b[?2026l\n" + ] + } + ], + "source": [ + "!ollama pull llama3.2" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "d9419762", + "metadata": {}, + "outputs": [], + "source": [ + "OLLAMA_BASE_URL = \"http://localhost:11434/v1\"\n", + "\n", + "ollama = OpenAI(base_url=OLLAMA_BASE_URL, api_key='ollama')\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "e2456cdf", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Did you know that there is a species of jellyfish that is immortal? The Turritopsis dohrnii, also known as the \"immortal jellyfish,\" can transform its body into a younger state through a process called transdifferentiation. This means it can essentially revert back to its polyp stage and grow back into an adult jellyfish, making it theoretically immortal!'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Get a fun fact\n", + "\n", + "response = ollama.chat.completions.create(model=\"llama3.2\", messages=[{\"role\": \"user\", \"content\": \"Tell me a fun fact\"}])\n", + "\n", + "response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "1e6cae7f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠋ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠙ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠸ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠼ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠴ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠦ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠧ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠇ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠏ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠋ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠙ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 0% ▕ ▏ 137 KB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 1% ▕ ▏ 8.3 MB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 1% ▕ ▏ 12 MB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 2% ▕ ▏ 22 MB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 3% ▕ ▏ 31 MB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 3% ▕ ▏ 33 MB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 3% ▕ ▏ 37 MB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 4% ▕ ▏ 47 MB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 4% ▕ ▏ 49 MB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 6% ▕ ▏ 61 MB/1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 6% ▕█ ▏ 70 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 7% ▕█ ▏ 74 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 7% ▕█ ▏ 82 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 8% ▕█ ▏ 90 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 8% ▕█ ▏ 90 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 9% ▕█ ▏ 100 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 10% ▕█ ▏ 111 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 10% ▕█ ▏ 115 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 11% ▕█ ▏ 123 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 12% ▕██ ▏ 130 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 12% ▕██ ▏ 135 MB/1.1 GB 69 MB/s 14s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 13% ▕██ ▏ 145 MB/1.1 GB 70 MB/s 13s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 14% ▕██ ▏ 151 MB/1.1 GB 70 MB/s 13s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 14% ▕██ ▏ 158 MB/1.1 GB 70 MB/s 13s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 15% ▕██ ▏ 162 MB/1.1 GB 70 MB/s 13s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 16% ▕██ ▏ 175 MB/1.1 GB 70 MB/s 13s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 16% ▕██ ▏ 179 MB/1.1 GB 70 MB/s 13s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 17% ▕███ ▏ 188 MB/1.1 GB 70 MB/s 13s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 17% ▕███ ▏ 192 MB/1.1 GB 70 MB/s 13s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 18% ▕███ ▏ 199 MB/1.1 GB 70 MB/s 13s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 19% ▕███ ▏ 209 MB/1.1 GB 70 MB/s 12s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 20% ▕███ ▏ 218 MB/1.1 GB 71 MB/s 12s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 20% ▕███ ▏ 223 MB/1.1 GB 71 MB/s 12s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 21% ▕███ ▏ 232 MB/1.1 GB 71 MB/s 12s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 22% ▕███ ▏ 240 MB/1.1 GB 71 MB/s 12s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 22% ▕███ ▏ 245 MB/1.1 GB 71 MB/s 12s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 23% ▕████ ▏ 254 MB/1.1 GB 71 MB/s 12s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 24% ▕████ ▏ 263 MB/1.1 GB 71 MB/s 11s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 24% ▕████ ▏ 266 MB/1.1 GB 71 MB/s 11s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 25% ▕████ ▏ 275 MB/1.1 GB 71 MB/s 11s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 26% ▕████ ▏ 286 MB/1.1 GB 71 MB/s 11s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 26% ▕████ ▏ 290 MB/1.1 GB 72 MB/s 11s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 27% ▕████ ▏ 298 MB/1.1 GB 72 MB/s 11s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 28% ▕████ ▏ 308 MB/1.1 GB 72 MB/s 11s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 28% ▕█████ ▏ 313 MB/1.1 GB 72 MB/s 11s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 29% ▕█████ ▏ 322 MB/1.1 GB 72 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 30% ▕█████ ▏ 330 MB/1.1 GB 72 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 30% ▕█████ ▏ 335 MB/1.1 GB 72 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 31% ▕█████ ▏ 344 MB/1.1 GB 72 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 32% ▕█████ ▏ 353 MB/1.1 GB 72 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 32% ▕█████ ▏ 357 MB/1.1 GB 72 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 33% ▕█████ ▏ 365 MB/1.1 GB 73 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 33% ▕██████ ▏ 373 MB/1.1 GB 73 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 34% ▕██████ ▏ 378 MB/1.1 GB 73 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 35% ▕██████ ▏ 387 MB/1.1 GB 73 MB/s 10s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 36% ▕██████ ▏ 396 MB/1.1 GB 73 MB/s 9s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 36% ▕██████ ▏ 400 MB/1.1 GB 73 MB/s 9s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 37% ▕██████ ▏ 409 MB/1.1 GB 73 MB/s 9s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 37% ▕██████ ▏ 418 MB/1.1 GB 73 MB/s 9s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 38% ▕██████ ▏ 422 MB/1.1 GB 73 MB/s 9s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 38% ▕██████ ▏ 427 MB/1.1 GB 73 MB/s 9s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 39% ▕███████ ▏ 438 MB/1.1 GB 73 MB/s 9s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 40% ▕███████ ▏ 443 MB/1.1 GB 73 MB/s 9s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 40% ▕███████ ▏ 452 MB/1.1 GB 73 MB/s 9s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 41% ▕███████ ▏ 462 MB/1.1 GB 73 MB/s 8s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 42% ▕███████ ▏ 463 MB/1.1 GB 73 MB/s 8s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 42% ▕███████ ▏ 474 MB/1.1 GB 73 MB/s 8s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 43% ▕███████ ▏ 485 MB/1.1 GB 73 MB/s 8s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 44% ▕███████ ▏ 489 MB/1.1 GB 73 MB/s 8s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 45% ▕████████ ▏ 500 MB/1.1 GB 73 MB/s 8s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 46% ▕████████ ▏ 509 MB/1.1 GB 73 MB/s 8s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 46% ▕████████ ▏ 512 MB/1.1 GB 73 MB/s 8s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 47% ▕████████ ▏ 523 MB/1.1 GB 73 MB/s 8s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 48% ▕████████ ▏ 533 MB/1.1 GB 73 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 48% ▕████████ ▏ 537 MB/1.1 GB 73 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 48% ▕████████ ▏ 540 MB/1.1 GB 73 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 50% ▕████████ ▏ 553 MB/1.1 GB 73 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 50% ▕████████ ▏ 558 MB/1.1 GB 73 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 51% ▕█████████ ▏ 565 MB/1.1 GB 73 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 51% ▕█████████ ▏ 567 MB/1.1 GB 73 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 52% ▕█████████ ▏ 580 MB/1.1 GB 73 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 53% ▕█████████ ▏ 587 MB/1.1 GB 73 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 54% ▕█████████ ▏ 598 MB/1.1 GB 74 MB/s 7s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 54% ▕█████████ ▏ 599 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 54% ▕█████████ ▏ 607 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 55% ▕█████████ ▏ 619 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 56% ▕██████████ ▏ 624 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 57% ▕██████████ ▏ 632 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 57% ▕██████████ ▏ 641 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 58% ▕██████████ ▏ 646 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 59% ▕██████████ ▏ 655 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 59% ▕██████████ ▏ 664 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 60% ▕██████████ ▏ 668 MB/1.1 GB 74 MB/s 6s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 61% ▕██████████ ▏ 677 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 62% ▕███████████ ▏ 687 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 62% ▕███████████ ▏ 692 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 63% ▕███████████ ▏ 701 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 64% ▕███████████ ▏ 709 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 64% ▕███████████ ▏ 714 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 65% ▕███████████ ▏ 721 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 65% ▕███████████ ▏ 725 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 65% ▕███████████ ▏ 728 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 66% ▕███████████ ▏ 737 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 67% ▕████████████ ▏ 746 MB/1.1 GB 74 MB/s 5s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 67% ▕████████████ ▏ 750 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 68% ▕████████████ ▏ 758 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 69% ▕████████████ ▏ 767 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 69% ▕████████████ ▏ 771 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 70% ▕████████████ ▏ 778 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 71% ▕████████████ ▏ 788 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 71% ▕████████████ ▏ 792 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 72% ▕████████████ ▏ 801 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 73% ▕█████████████ ▏ 810 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 73% ▕█████████████ ▏ 815 MB/1.1 GB 74 MB/s 4s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 74% ▕█████████████ ▏ 825 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 75% ▕█████████████ ▏ 834 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 75% ▕█████████████ ▏ 839 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 76% ▕█████████████ ▏ 847 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 77% ▕█████████████ ▏ 856 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 77% ▕█████████████ ▏ 860 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 78% ▕█████████████ ▏ 867 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 78% ▕██████████████ ▏ 875 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 79% ▕██████████████ ▏ 878 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 79% ▕██████████████ ▏ 886 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 80% ▕██████████████ ▏ 892 MB/1.1 GB 74 MB/s 3s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 80% ▕██████████████ ▏ 898 MB/1.1 GB 74 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 81% ▕██████████████ ▏ 902 MB/1.1 GB 74 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 82% ▕██████████████ ▏ 914 MB/1.1 GB 74 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 82% ▕██████████████ ▏ 918 MB/1.1 GB 74 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 83% ▕██████████████ ▏ 927 MB/1.1 GB 74 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 84% ▕███████████████ ▏ 935 MB/1.1 GB 74 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 84% ▕███████████████ ▏ 937 MB/1.1 GB 74 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 85% ▕███████████████ ▏ 946 MB/1.1 GB 74 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 85% ▕███████████████ ▏ 948 MB/1.1 GB 73 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 86% ▕███████████████ ▏ 956 MB/1.1 GB 73 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 86% ▕███████████████ ▏ 964 MB/1.1 GB 73 MB/s 2s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 87% ▕███████████████ ▏ 972 MB/1.1 GB 73 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 87% ▕███████████████ ▏ 977 MB/1.1 GB 73 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 88% ▕███████████████ ▏ 984 MB/1.1 GB 73 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 89% ▕███████████████ ▏ 991 MB/1.1 GB 73 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 89% ▕████████████████ ▏ 995 MB/1.1 GB 73 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 90% ▕████████████████ ▏ 1.0 GB/1.1 GB 73 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 90% ▕████████████████ ▏ 1.0 GB/1.1 GB 73 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 91% ▕████████████████ ▏ 1.0 GB/1.1 GB 73 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 91% ▕████████████████ ▏ 1.0 GB/1.1 GB 72 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 92% ▕████████████████ ▏ 1.0 GB/1.1 GB 72 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 92% ▕████████████████ ▏ 1.0 GB/1.1 GB 72 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 93% ▕████████████████ ▏ 1.0 GB/1.1 GB 72 MB/s 1s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 94% ▕████████████████ ▏ 1.0 GB/1.1 GB 72 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 94% ▕████████████████ ▏ 1.0 GB/1.1 GB 72 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 94% ▕████████████████ ▏ 1.1 GB/1.1 GB 72 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 95% ▕█████████████████ ▏ 1.1 GB/1.1 GB 72 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 95% ▕█████████████████ ▏ 1.1 GB/1.1 GB 72 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 95% ▕█████████████████ ▏ 1.1 GB/1.1 GB 72 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 96% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 96% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 96% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 96% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 97% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 97% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 97% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 98% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 98% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 98% ▕█████████████████ ▏ 1.1 GB/1.1 GB 69 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 99% ▕█████████████████ ▏ 1.1 GB/1.1 GB 65 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 99% ▕█████████████████ ▏ 1.1 GB/1.1 GB 65 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 99% ▕█████████████████ ▏ 1.1 GB/1.1 GB 65 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 99% ▕█████████████████ ▏ 1.1 GB/1.1 GB 65 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕█████████████████ ▏ 1.1 GB/1.1 GB 65 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕█████████████████ ▏ 1.1 GB/1.1 GB 65 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕█████████████████ ▏ 1.1 GB/1.1 GB 65 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕█████████████████ ▏ 1.1 GB/1.1 GB 65 MB/s 0s\u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\n", + "verifying sha256 digest ⠋ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\n", + "verifying sha256 digest ⠙ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\n", + "verifying sha256 digest ⠹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\n", + "verifying sha256 digest ⠸ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\n", + "verifying sha256 digest ⠼ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\n", + "verifying sha256 digest ⠴ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\n", + "verifying sha256 digest ⠦ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\n", + "verifying sha256 digest ⠧ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[A\u001b[1Gpulling manifest \u001b[K\n", + "pulling aabd4debf0c8: 100% ▕██████████████████▏ 1.1 GB \u001b[K\n", + "pulling c5ad996bda6e: 100% ▕██████████████████▏ 556 B \u001b[K\n", + "pulling 6e4c38e1172f: 100% ▕██████████████████▏ 1.1 KB \u001b[K\n", + "pulling f4d24e9138dd: 100% ▕██████████████████▏ 148 B \u001b[K\n", + "pulling a85fe2a2e58e: 100% ▕██████████████████▏ 487 B \u001b[K\n", + "verifying sha256 digest \u001b[K\n", + "writing manifest \u001b[K\n", + "success \u001b[K\u001b[?25h\u001b[?2026l\n" + ] + } + ], + "source": [ + "# Now let's try deepseek-r1:1.5b - this is DeepSeek \"distilled\" into Qwen from Alibaba Cloud\n", + "\n", + "!ollama pull deepseek-r1:1.5b" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "25002f25", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Sure! Here\\'s a fun fact for you:\\n\\n### The Missing Universe - A New Era in Astronomy\\nIn 2018, astronomers released a groundbreaking video titled *The Missing Universe*, which provided the first direct evidence of dark matter and dark energy, marking the beginning of the \"new era\" in astronomy that we\\'re all waiting for.\\n\\nDark matter is a mysterious substance that makes up approximately 55% of the matter in the universe. Scientists have long puzzled over it despite its name (it doesn\\'t emit light or heat), and many theories remain unproven. However, *The Missing Universe* showed us there\\'s evidence of these \"missing\" forces at play by capturing a massive galaxy within an hour-long video.\\n\\nThis discovery has opened up entirely new possibilities for understanding the universe\\'s structure and evolution.'" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response = ollama.chat.completions.create(model=\"deepseek-r1:1.5b\", messages=[{\"role\": \"user\", \"content\": \"Tell me a fun fact\"}])\n", + "\n", + "response.choices[0].message.content" + ] + }, + { + "cell_type": "markdown", + "id": "6e9fa1fc-eac5-4d1d-9be4-541b3f2b3458", + "metadata": {}, + "source": [ + "# HOMEWORK EXERCISE ASSIGNMENT\n", + "\n", + "Upgrade the day 1 project to summarize a webpage to use an Open Source model running locally via Ollama rather than OpenAI\n", + "\n", + "You'll be able to use this technique for all subsequent projects if you'd prefer not to use paid APIs.\n", + "\n", + "**Benefits:**\n", + "1. No API charges - open-source\n", + "2. Data doesn't leave your box\n", + "\n", + "**Disadvantages:**\n", + "1. Significantly less power than Frontier Model\n", + "\n", + "## Recap on installation of Ollama\n", + "\n", + "Simply visit [ollama.com](https://ollama.com) and install!\n", + "\n", + "Once complete, the ollama server should already be running locally. \n", + "If you visit: \n", + "[http://localhost:11434/](http://localhost:11434/)\n", + "\n", + "You should see the message `Ollama is running`. \n", + "\n", + "If not, bring up a new Terminal (Mac) or Powershell (Windows) and enter `ollama serve` \n", + "And in another Terminal (Mac) or Powershell (Windows), enter `ollama pull llama3.2` \n", + "Then try [http://localhost:11434/](http://localhost:11434/) again.\n", + "\n", + "If Ollama is slow on your machine, try using `llama3.2:1b` as an alternative. Run `ollama pull llama3.2:1b` from a Terminal or Powershell, and change the code from `MODEL = \"llama3.2\"` to `MODEL = \"llama3.2:1b\"`" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "6de38216-6d1c-48c4-877b-86d403f4e0f8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Here are the summarized business developments from Lime, Dotta-Tier, Voi, and Bird, sorted by company and industry development, along with a rate of importance:\n", + "\n", + "**Company Developments**\n", + "\n", + "1. **Dott**: Raises €40 million through bond issue (Importance: 8/10) - Voi Technology uses this funding for its expansion in the Paris region.\n", + "2. **Lime UK**: Revenues up 75% in 2024 (Importance: 7.5/10) - Lime expands its fleet and introduces foot patrols in Milwaukee.\n", + "3. **Bird**: No recent news available.\n", + "\n", + "**Industry Developments**\n", + "\n", + "1. **Paris Region Shared E-Bikes**: Voi joins Lime and Dott to operate shared e-bikes (Importance: 9/10) - This partnership strengthens the region's micromobility landscape.\n", + "2. **Germany Micromobility Expansion**: Lime launches bike-sharing system in Stuttgart region (Importance: 8.5/10)\n", + "3. **Paris E-Bike Rental Market**: Competition increases with Voi, Lime, and Dott operating together (Importance: 8/10)\n", + "4. **UK Electric Vehicle Study**: Toyota-led consortium receives funding for light EV study (Importance: 6.5/10) - Relevant to the micromobility industry, but not directly related to micromobility companies.\n", + "5. **Seattle Shared Micromobility Growth**: City hits 7 million shared micromobility trips (Importance: 7/10)\n", + "\n", + "Note that the importance ratings are subjective and based on various factors such as the significance of the announcement, its potential impact on the industry, and the company's overall performance." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "### Imports and basic setup\n", + "\n", + "from scraper import fetch_website_contents\n", + "from IPython.display import Markdown, display\n", + "from openai import OpenAI\n", + "\n", + "\n", + "OLLAMA_BASE_URL = \"http://localhost:11434/v1\"\n", + "ollama = OpenAI(base_url=OLLAMA_BASE_URL, api_key='ollama')\n", + "\n", + "### Prompts\n", + "\n", + "system_prompt = \"\"\"\n", + "You are a proffesional assistant that summerizes news about micromobility. \n", + "Be consice, data driven and nice to read. \n", + "\"\"\"\n", + "user_prompt = \"\"\"\n", + "Take the content of the website and provide a short summery of the business developvement.\n", + "Sort it by company and by general industry developments. Only show new about Lime, Dott-Tier, Voi and Bird.\n", + "For each output, provide a rate of its importance.\n", + "Order the news by importance.\n", + "\"\"\"\n", + "\n", + "### Getting website content\n", + "\n", + "micromobility = fetch_website_contents(\"https://micromobility.io/news\")\n", + "\n", + "def messages_for(website):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt + website}\n", + " ]\n", + "\n", + "### Summarization\n", + "\n", + "def summarize(url):\n", + " website = fetch_website_contents(url)\n", + " response = ollama.chat.completions.create(\n", + " model = \"llama3.2\",\n", + " messages = messages_for(website)\n", + " )\n", + " return response.choices[0].message.content\n", + "\n", + "def display_summary(url):\n", + " summary = summarize(url)\n", + " display(Markdown(summary))\n", + "\n", + "display_summary (\"https://micromobility.io/news\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "73674d1b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/community-contributions/biomedical-article-summariser/README.md b/community-contributions/biomedical-article-summariser/README.md new file mode 100644 index 0000000..9fd1f53 --- /dev/null +++ b/community-contributions/biomedical-article-summariser/README.md @@ -0,0 +1,38 @@ +## Biomedical Article Abstract Summariser using Europe PMC + Ollama + +This is a simple app that demonstrates an article abstract summariser leveraging Europe PMC’s API and Ollama LLMs to generate concise summaries of biomedical literature. + +## 🔍 About Europe PMC (EPMC) +Europe PMC is a free, open-access database that provides access to millions of life sciences and biomedical articles, research papers, and preprints. It is part of the PubMed Central International (PMCI) network. + +## Features + +This solution presents 2 methods: +1. A simple demo via a jupyter notebook +2. An interactive demo via gradio, running on your local computer. + +**Core Features:** +- Fetch an article’s metadata and abstract via Europe PMC’s API (using a provided PMCID). +- Preprocess and clean the abstract text unnecessary tags e.g referenc tag or math formula. +- Summarise abstracts into bullet points + a short paragraph using Ollama models. + + + +## 📌 How to Use + +- Go to [Europe PMC' website](https://europepmc.org/). +- Use the search bar to find an open-access article by keywords, entity names, journal, or author. E.g Genes, Diseases, nutrition etc +- Since the app currently only runs on open-access only articles, you'll need to restrict results to `open-access` only articles: add filters like `HAS_FT:Y` or `IN_EPMC:Y` to your search syntax. E.g .`"Genes: HAS_FT:Y"` +- Select your article of interest and copy its PMCID (e.g., PMC1234567). + +- Run the summariser: + - via notebook: Paste the `PMCID` as a string in the display_response func, after running all other cells. + - via gradio: + - run the python script via CLI: + ```python + python article_summariser-gradio.py + ``` + - Paste the `PMCID` as you've copied it in the `Enter a **EuropePMC Article ID` textbox. + - click on the `Fetch Article Abstract and generate Summary` button. + **N.B**: I've observed that using `llama3.2` runs faster on my pc. You may experience some delays with all other models. Also make sure to already have ollama running via `ollama serve` on your terminal before running the script. + diff --git a/community-contributions/biomedical-article-summariser/article_summariser-gradio.py b/community-contributions/biomedical-article-summariser/article_summariser-gradio.py new file mode 100644 index 0000000..9bf67fa --- /dev/null +++ b/community-contributions/biomedical-article-summariser/article_summariser-gradio.py @@ -0,0 +1,202 @@ +import re + +import requests +import functools +from typing import List, Tuple, Dict, Any + +from loguru import logger + +from bs4 import BeautifulSoup as bs + + +import ollama +import gradio as gr + + + +SYS_PROMPT = """ +You are an expert in biomedical text mining and information extraction. +You excel at breaking down complex articles into digestible contents for your audience. +Your audience can comprise of students, early researchers and professionals in the field. +Summarize the key findings in the following article [ARTICLE] . +Your summary should provide crucial points covered in the paper that helps your diverse audience quickly understand the most vital information. +Crucial points to include in your summary: +- Main objectives of the study +- Key findings and results +- Methodologies used +- Implications of the findings(if any) +- Any limitations or future directions mentioned + +Format: Provide your summary in bullet points highlighting key areas followed with a concise paragraph that encapsulates the results of the paper. + +The tone should be professional and clear. + +""" + + + + +def catch_request_error(func): + """ + Wrapper func to catch request errors and return None if an error occurs. + Used as a decorator. + """ + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except requests.RequestException as e: + print(f"Request error in {func.__name__}: {e}") + return None + return wrapper + + + +@catch_request_error +@logger.catch +def get_xml_from_url(url: str) -> bs: + """ + Fetches the XML content from Europe PMC website. + + Args: + url (str): Europe PMC's production url to fetch the XML from. + + Returns: + soup (bs4.BeautifulSoup): Parsed XML content. + """ + response = requests.get(url) + response.raise_for_status() #check for request errors + return bs(response.content, "lxml-xml") + + + + +def clean_text(text:str) -> str: + """ + This function cleans a text by filtering reference patterns in text, + extra whitespaces, escaped latex-style formatting appearing in text body instead of predefined latex tags + + Args: + text(str): The text to be cleaned + + Returns: + tex(str): The cleaned text + + """ + + # Remove LaTeX-style math and formatting tags #already filtered from soup content but some still appear + text = re.sub(r"\{.*?\}", "", text) # Matches and removes anything inside curly braces {} + text = re.sub(r"\\[a-zA-Z]+", "", text) # Matches and removes characters that appears with numbers + + # Remove reference tags like [34] or [1,2,3] + text = re.sub(r"\[\s*(\d+\s*(,\s*\d+\s*)*)\]", "", text) + + # Remove extra whitespace + text = re.sub(r"\s+", " ", text).strip() + + return text + + +def fetch_article_abstract(soup: bs) -> Tuple[str, str]: + """ + Extracts the abstract text from the XML soup. + + Args: + soup (bs4.BeautifulSoup): Parsed XML content. + Returns: + Tuple(article_title (str), abstract_text (str)): A tuple of the article's title and its extracted abstract text. + """ + if soup is None: + return "No XML found", "" + article_title = soup.find("article-title").get_text(strip=True) if soup.find("article-title") else "No Title Found for this article" + + abstract_tag = soup.find("abstract") + if abstract_tag: + abstract_text = ' '.join([clean_text(p.get_text(strip=True)) for p in abstract_tag.find_all("p") if p.get_text(strip=True)]) + else: + abstract_text = "" + return article_title, abstract_text + + + +def build_message(article_title: str, abstract_text: str, sys_prompt:str=SYS_PROMPT) -> List[Dict[str, str]]: + """ + Constructs the message payload for the LLM. + + Args: + article_title (str): The title of the article. + abstract_text (str): The abstract text of the article. + + Returns: + List[Dict[str, str]]: A list of message dictionaries for the LLM. + """ + user_prompt = f"""You are looking at an article with title: {article_title}. + The article's abstract is as follows: \n{abstract_text}. + Summarise the article. Start your summary by providing a short sentence on what the article is about + and then a bulleted list of the key points covered in the article. +""" + messages = [ + {"role": "system", "content": sys_prompt}, + {"role": "user", "content": user_prompt} + ] + return messages + + + +def generate_response(messages:List[Dict[str, str]], model:str) -> str: + """ + Generates a response from the LLM based on the provided messages. + Args: + messages (List[Dict[str, str]]): The message payload for the LLM. + model (str): The model to use for generating the response. + Returns: + str: The content of the LLM's response. + """ + + response = ollama.chat(model=model, messages=messages) + return response["message"]["content"] + + +def summariser(article_id: str, model:str) -> str: + if article_id and not re.match(r"^PMC\d{5,8}$", article_id): + raise gr.Error("Please check the length/Format of the provided Article ID. It should start with 'PMC' followed by 5 to 8 digits, e.g., 'PMC1234567'.") + url = f"https://www.ebi.ac.uk/europepmc/webservices/rest/{article_id}/fullTextXML" + soup = get_xml_from_url(url) + article_title, abstract_text = fetch_article_abstract(soup) + if not abstract_text: + raise gr.Error(f"No abstract found for {article_title}") + messages = build_message(article_title, abstract_text) + + #pull model from ollama + ollama.pull(model) + summary = generate_response(messages, model) + + return f"## 📝 Article Title: {article_title}\n\n### 📌 Summary:\n{summary}" + +INTRO_TXT = "This is a simple Biomedical Article Summariser. It uses PMCID to fetch articles from the Europe PMC(EPMC) Website. It currently only runs on article's abstract. Future improvements would integrate full-text articles" +INST_TXT = "Enter a **EuropePMC Article ID** (e.g., `PMC1234567`) and select a model from the dropdown menu to generate a structured summary" +def gradio_ui(): + with gr.Blocks(theme=gr.themes.Soft()) as demo: + gr.Markdown(INTRO_TXT) + gr.Markdown(INST_TXT) + + with gr.Row(): + with gr.Column(scale=1): + article_id = gr.Textbox(label="Enter Article's PMCID here", placeholder="e.g., PMC1234567") + model_choice = gr.Dropdown(choices=["llama3.2", "deepseek-r1", "gemma3", "mistral", "gpt-oss"], value="llama3.2", label="Select a model") + run_btn = gr.Button("Fetch Article Abstract and generate Summary", variant='primary') + with gr.Column(scale=1): + output_box = gr.Markdown() + + + run_btn.click(fn=summariser, inputs=[article_id, model_choice], outputs=output_box) + + return demo + + +if __name__ == "__main__": + app = gradio_ui() + app.launch(share=True, debug=True) + + + diff --git a/community-contributions/biomedical-article-summariser/article_summariser.ipynb b/community-contributions/biomedical-article-summariser/article_summariser.ipynb new file mode 100644 index 0000000..2f8f59b --- /dev/null +++ b/community-contributions/biomedical-article-summariser/article_summariser.ipynb @@ -0,0 +1,298 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "eb1f9b94", + "metadata": {}, + "source": [ + "This Notebook is my exercise1 version. I have adapted the week 1 exercise solution to an article's abstract summariser using from Europe PMC's article API. [Europe PMC (EPMC)](https://europepmc.org/) is a free, open-access database that provides access to a wealth of life sciences and biomedical literature. It is an integral part of the PubMed Central International (PMCI) network, aggregating content from multiple sources and offering access to millions of scientific articles, research papers, and preprints, all in one place. \n", + "\n", + "My solution uses a provided article's PMCID(obtainable by selecting an article you wish to summarise from EPMC's website). PMCID are unique only to open-access articles and you can only use the function below for such articles. To get an article's PMICD: \n", + "To use:\n", + "1. Go to [EPMC's Website](https://europepmc.org/)\n", + "1. Use the search tab and search for articles by keywords, entities journal or Author's name. E.g Genes, Diseases, nutrition etc\n", + "2. Search for open-access articles by including the keyword `HAS_FT:Y` or `IN_EPMC:Y`. Example: `\"Genes: HAS_FT:Y\"`\n", + "3. Then your article of interest and copy the PMCID. \n", + "4. feed the PMCID into the `display_reponse` func to generate the summary from the article's abstract. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "53120ced", + "metadata": {}, + "outputs": [], + "source": [ + "import re\n", + "import pprint\n", + "from pprint import pformat\n", + "import requests\n", + "import functools\n", + "from typing import List, Tuple, Dict, Any\n", + "\n", + "from tqdm import tqdm\n", + "from loguru import logger\n", + "from bs4 import BeautifulSoup as bs\n", + "\n", + "from IPython.display import display, HTML, Markdown\n", + "\n", + "import ollama" + ] + }, + { + "cell_type": "markdown", + "id": "fbabbd46", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6026505", + "metadata": {}, + "outputs": [], + "source": [ + "def catch_request_error(func):\n", + " \"\"\"\n", + " Wrapper func to catch request errors and return None if an error occurs.\n", + " Used as a decorator.\n", + " \"\"\"\n", + " @functools.wraps(func)\n", + " def wrapper(*args, **kwargs):\n", + " try:\n", + " return func(*args, **kwargs)\n", + " except requests.RequestException as e:\n", + " print(f\"Request error in {func.__name__}: {e}\")\n", + " return None\n", + " return wrapper" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2e9cfff6", + "metadata": {}, + "outputs": [], + "source": [ + "@catch_request_error\n", + "@logger.catch\n", + "def get_xml_from_url(url: str) -> bs:\n", + " \"\"\"\n", + " Fetches the XML content from Europe PMC website.\n", + "\n", + " Args:\n", + " url (str): Europe PMC's production url to fetch the XML from.\n", + "\n", + " Returns:\n", + " soup (bs4.BeautifulSoup): Parsed XML content.\n", + " \"\"\"\n", + " response = requests.get(url)\n", + " response.raise_for_status() #check for request errors\n", + " return bs(response.content, \"xml\") " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ade46c84", + "metadata": {}, + "outputs": [], + "source": [ + "def clean_text(text:str) -> str:\n", + " \"\"\"\n", + " This function cleans a text by filtering reference patterns in text, \n", + " extra whitespaces, escaped latex-style formatting appearing in text body instead of predefined latex tags\n", + "\n", + " Args: \n", + " text(str): The text to be cleaned\n", + " \n", + " Returns: \n", + " tex(str): The cleaned text \n", + " \n", + " \"\"\"\n", + " \n", + " # Remove LaTeX-style math and formatting tags #already filtered from soup content but some still appear\n", + " text = re.sub(r\"\\{.*?\\}\", \"\", text) # Matches and removes anything inside curly braces {}\n", + " text = re.sub(r\"\\\\[a-zA-Z]+\", \"\", text) # Matches and removes characters that appears with numbers\n", + " \n", + " # Remove reference tags like [34] or [1,2,3]\n", + " text = re.sub(r\"\\[\\s*(\\d+\\s*(,\\s*\\d+\\s*)*)\\]\", \"\", text)\n", + " \n", + " # Remove extra whitespace\n", + " text = re.sub(r\"\\s+\", \" \", text).strip()\n", + " \n", + " return text\n", + "\n", + "\n", + "def fetch_article_abstract(soup: bs) -> Tuple[str, str]:\n", + " \"\"\"\n", + " Extracts the abstract text from the XML soup.\n", + "\n", + " Args:\n", + " soup (bs4.BeautifulSoup): Parsed XML content.\n", + " Returns:\n", + " Tuple(article_title (str), abstract_text (str)): A tuple of the article's title and its extracted abstract text.\n", + " \"\"\"\n", + " if soup is None:\n", + " return \"No XML found\", \"\"\n", + " article_title = soup.find(\"article-title\").get_text(strip=True) if soup.find(\"article-title\") else \"No Title Found for this article\"\n", + "\n", + " abstract_tag = soup.find(\"abstract\")\n", + " if abstract_tag:\n", + " abstract_text = ' '.join([clean_text(p.get_text(strip=True)) for p in abstract_tag.find_all(\"p\") if p.get_text(strip=True)])\n", + " else:\n", + " abstract_text = \"\"\n", + " return article_title, abstract_text" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0686408b", + "metadata": {}, + "outputs": [], + "source": [ + "sys_prompt = \"\"\"You are an expert in biomedical text mining and information extraction. \n", + "You excel at breaking down complex articles into digestible contents for your audience. \n", + "Your audience can comprise of students, early researchers and professionals in the field.\n", + "Summarize the key findings in the following article [ARTICLE] .\n", + "Your summary should provide crucial points covered in the paper that helps your diverse audience quickly understand the most vital information. \n", + "Crucial points to consider:\n", + "- Main objectives of the study\n", + "- Key findings and results\n", + "- Methodologies used\n", + "- Implications of the findings(if any)\n", + "- Any limitations or future directions mentioned\n", + "\n", + "Format: Provide your summary in bullet points highlighting key areas followed with a concise paragraph that encapsulates the results of the paper.\n", + "\n", + "The tone should be professional and clear.\n", + "\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7cd5ca3a", + "metadata": {}, + "outputs": [], + "source": [ + "MODEL = \"llama3.2\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fb1c2ccd", + "metadata": {}, + "outputs": [], + "source": [ + "def build_message(article_title: str, abstract_text: str, sys_prompt:str=sys_prompt) -> List[Dict[str, str]]:\n", + " \"\"\"\n", + " Constructs the message payload for the LLM.\n", + "\n", + " Args:\n", + " article_title (str): The title of the article.\n", + " abstract_text (str): The abstract text of the article.\n", + "\n", + " Returns:\n", + " List[Dict[str, str]]: A list of message dictionaries for the LLM.\n", + " \"\"\"\n", + " user_prompt = f\"\"\"You are looking at an article with title: {article_title}. \n", + " The article's abstract is as follows: \\n{abstract_text}.\n", + " Summarise the article. Start your summary by providing a short sentence on what the article is about \n", + " and then a bulleted list of the key points covered in the article.\n", + "\"\"\"\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": sys_prompt},\n", + " {\"role\": \"user\", \"content\": user_prompt}\n", + " ]\n", + " return messages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80facfc7", + "metadata": {}, + "outputs": [], + "source": [ + "def generate_response(messages, model=MODEL):\n", + " response = ollama.chat(model=model, messages=messages)\n", + " return response[\"message\"][\"content\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "87fb0621", + "metadata": {}, + "outputs": [], + "source": [ + "#combine everything to main function\n", + "def display_reponse(article_id:str):\n", + " if article_id and not re.match(r\"^PMC\\d{5,8}$\", article_id):\n", + " raise ValueError(\"Please check the length/Format of the provided Article ID. It should start with 'PMC' followed by 5 to 8 digits, e.g., 'PMC1234567'.\")\n", + " url = f\"https://www.ebi.ac.uk/europepmc/webservices/rest/{article_id}/fullTextXML\"\n", + " soup = get_xml_from_url(url)\n", + " article_title, abstract_text = fetch_article_abstract(soup)\n", + " messages = build_message(article_title, abstract_text)\n", + " response = generate_response(messages)\n", + "\n", + " display(Markdown(f\"### Article Title: {article_title}\"))\n", + " display(Markdown(f\"### LLM Response: \\n{response}\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e3177b43", + "metadata": {}, + "outputs": [], + "source": [ + "#add your article's PMCID here to test the function\n", + "display_reponse(\"PMC7394925\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ffa39234", + "metadata": {}, + "outputs": [], + "source": [ + "display_reponse(\"PMC12375411\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0532123e", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llm-course", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/accommodation_assistant.ipynb b/week1/community-contributions/accommodation_assistant.ipynb new file mode 100644 index 0000000..5276ac4 --- /dev/null +++ b/week1/community-contributions/accommodation_assistant.ipynb @@ -0,0 +1,260 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "44b58c16-8319-4095-b194-85b58928e6fd", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import requests\n", + "import json\n", + "import re\n", + "from typing import List, Dict\n", + "from bs4 import BeautifulSoup\n", + "from openai import OpenAI\n", + "from selenium import webdriver\n", + "from selenium.webdriver.chrome.service import Service\n", + "from selenium.webdriver.chrome.options import Options" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5bcb4ab0-30f6-4f29-a97e-02ff6e287c37", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "MODEL = \"llama3.2\"\n", + "openai = OpenAI(base_url = \"http://localhost:11434/v1\", api_key = \"ollama\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6d30cf9-0b57-44b3-a81a-ccbd622140c3", + "metadata": {}, + "outputs": [], + "source": [ + "class HotelListing:\n", + " def __init__(self, name, price, url, features = None):\n", + " self.name = name\n", + " self.price = price\n", + " self.url = url\n", + " self.features = features or []\n", + " def to_dict(self):\n", + " return {\n", + " \"name\": self.name,\n", + " \"price\": self.price,\n", + " \"url\": self.url,\n", + " \"features\": self.features\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c547397-3e14-44dc-b08e-c192028d9ded", + "metadata": {}, + "outputs": [], + "source": [ + "class BookingParser:\n", + " def __init__(self, url, headers = None):\n", + " self.url = url\n", + " self.headers = headers or {\"User-Agent\": \"Mozilla/5.0\"}\n", + " self.listings = []\n", + " self.fetch_and_parse()\n", + "\n", + " def fetch_and_parse(self):\n", + " try:\n", + " request = requests.get(self.url, headers = self.headers, timeout = 10)\n", + " request.raise_for_status()\n", + " except Exception as e:\n", + " print(f\"Page download error: {e}\")\n", + " return\n", + "\n", + " soup = BeautifulSoup(request.content, \"html.parser\")\n", + "\n", + " hotel_cards = soup.find_all(\"div\", {\"data-stid\": \"property-listing-results\"})\n", + "\n", + " if not hotel_cards:\n", + " hotel_cards = soup.find_all(\"div\", class_ = re.compile(\"property-listing|property-card-card-results\"))\n", + "\n", + " for card in hotel_cards[:10]:\n", + " listing = self._parse_hotel_card(card)\n", + " if listing:\n", + " self.listings.append(listing)\n", + "\n", + " def _parse_hotel_card(self, card):\n", + " try:\n", + " name_element = card.find(\"a\", {\"data-stid\": \"open-hotel-information\"})\n", + " if not name_element:\n", + " name_element = card.find(\"h3\") or car.find(\"span\", class_ = re.compile(\"is-visually-hidden\"))\n", + " name = name_element.get_text(strip = True) if name_element else \"name unknown\"\n", + "\n", + " price_element = card.find(\"span\", {\"class\": \"uitk-badge-base-text\"})\n", + "\n", + " price_text = price_element.get_text(strip = True) if price_element else \"0\"\n", + " price_match = request.search(r'(\\d+)', price_text.replace('$', ''))\n", + " price = int(price_match.group(1)) if price_match else 0\n", + "\n", + " link_element = card.find(\"a\", href = True)\n", + " url = \"https://www.hotels.com\" + link_element[\"href\"] if link_element else \"\"\n", + "\n", + " features = []\n", + " feature_spans = card.select('[data-stid=\"sp-content-list\"]')\n", + " if feature_spans:\n", + " items = feature_spans[0].select('li[data-stid^=\"sp-content-item\"]')\n", + " \n", + " for item in items:\n", + " text = item.get_text(strip=True)\n", + " if text:\n", + " features.append(text.lower())\n", + "\n", + " card_text = card.get_text().lower()\n", + " if \"wi-fi\" in card_text or \"wifi\" in card_text:\n", + " features.append(\"wifi\")\n", + " if \"breakfest\" in card_text:\n", + " features.append(\"breakfest\")\n", + "\n", + " return HotelListing(name, price, url, features)\n", + " except Exception as e:\n", + " print(f\"Parsing hotel card error: {e}\")\n", + " return None\n", + "\n", + " def get_listings(self):\n", + " return [listing.to_dict() for listing in self.listings]\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e700023-1f0c-4e8b-a823-c5e3ce9bfb28", + "metadata": {}, + "outputs": [], + "source": [ + "def make_prompt(listings: List[Dict], user_preferences: Dict):\n", + " prompt = (\n", + " \"You are an assistant and help a user in accommodation choosing.\\n\"\n", + " \"Below is a list of hotel offers and user preferences.\\n\"\n", + " \"HOTELS OFERTS:\\n\"\n", + " f\"{json.dumps(listings, ensure_ascii = False, indent = 1)}\\n\\n\"\n", + " \"USER PREFERENCES:\\n\"\n", + " f\"{json.dumps(user_preferences, ensure_ascii = False, indent = 1)}\\n\\n\"\n", + " \"For every ofert:\\n\"\n", + " \"1) Assess suitability in 0-10 rate (where 10 = ideal suitability)\\n\"\n", + " \"2) Give 2-3 short reasons for your assessment\\n\"\n", + " \"3) Please indicate if the price is within your budget\\n\"\n", + " \"Finally, list the TOP 3 best offers with justification.\\n\"\n", + " )\n", + " return prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "58fa69bd-162b-4088-91ab-fe1fc39b4a50", + "metadata": {}, + "outputs": [], + "source": [ + "def analyze_listings(listings: List[Dict], preferences: Dict):\n", + " if not listings:\n", + " print(\"No offers to analyze.\")\n", + " return None\n", + "\n", + " prompt = make_prompt(listings, preferences)\n", + "\n", + " try:\n", + " response = openai.chat.completions.create(\n", + " model = MODEL,\n", + " messages = [\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"You are an expert in choosing the best accommodation.\\n\" \n", + " \"You analyze offers and advise users.\"\n", + " },\n", + " {\"role\": \"user\", \"content\": prompt}\n", + " ]\n", + " )\n", + "\n", + " result = response.choices[0].message.content\n", + " return result\n", + "\n", + " except Exception as e:\n", + " print(f\"Communication error with LLM: {e}\")\n", + " return None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b4ade5a4-3a3c-422d-9740-d3b647996222", + "metadata": {}, + "outputs": [], + "source": [ + "def main():\n", + " url = (\"https://www.hotels.com/Hotel-Search?destination=Warsaw%20-%20Eastern%20Poland%2C%20Poland&d1=2025-10-18&startDate=2025-10-18&d2=2025-10-20&endDate=2025-10-20&adults=1&rooms=1®ionId=6057142&sort=RECOMMENDED&theme=&userIntent=&semdtl=&categorySearch=&useRewards=false&children=&latLong&pwaDialog=&daysInFuture&stayLength\")\n", + "\n", + " preferences = {\n", + " \"max_price\": 200,\n", + " \"must_have\": [\"wifi\", \"breakfest\"],\n", + " \"number_of_rooms\": 1,\n", + " \"localization\": \"Warsaw\"\n", + " }\n", + "\n", + " print(\"🔍 Oferts downloading from Hotels.com..\")\n", + " parser = BookingParser(url)\n", + " listings = parser.get_listings()\n", + "\n", + " print(f\"✅ Found {len(listings)} offerts\\n\")\n", + " print(\"=\"*60)\n", + "\n", + " print(\"FOUND OFFERTS:\\n\")\n", + " for i, listing in enumerate(listings, 1):\n", + " print(f\"\\n{i}. {listing['name']}\")\n", + " print(f\"Amount: {listing['price']} pln\")\n", + " print(f\"Features: {', '.join(listing['features']) if listing['features'] else 'Informations lack.'}\")\n", + "\n", + " analysis = analyze_listings(listings, preferences)\n", + "\n", + " if analysis:\n", + " print(analysis)\n", + " else:\n", + " print(\"❌ Analysis failed\")\n", + "\n", + "if __name__ == \"__main__\":\n", + " main()\n", + " " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/company_brochure_relevent_links.ipynb b/week1/community-contributions/company_brochure_relevent_links.ipynb new file mode 100644 index 0000000..bcdb4b1 --- /dev/null +++ b/week1/community-contributions/company_brochure_relevent_links.ipynb @@ -0,0 +1,279 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "4e5da3f5-ebd0-4e20-ab89-95847187287b", + "metadata": {}, + "outputs": [], + "source": [ + "import requests\n", + "import json\n", + "from typing import List\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display, update_display, clear_output\n", + "from openai import OpenAI\n", + "from dotenv import load_dotenv\n", + "import os\n", + "from scraper import fetch_website_links, fetch_website_contents" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "86adec56-3b27-46da-9b1a-1e5946a76a09", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENROUTER_API_KEY')\n", + "\n", + "# Check the key\n", + "\n", + "if not api_key:\n", + " print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n", + "elif api_key.strip() != api_key:\n", + " print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n", + "else:\n", + " print(\"API key found and looks good so far!\")\n", + "headers = {\n", + " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n", + "}\n", + "\n", + "openrouter_url = \"https://openrouter.ai/api/v1\"\n", + "openai = OpenAI(api_key=api_key, base_url=openrouter_url)\n", + "MODEL = \"gpt-5-nano\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "abf2f706-2709-404a-9fb7-774a9f57dd11", + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [], + "source": [ + "company_name = input(\"Enter the company name: \")\n", + "url = input(\"Enter the company url: \")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "153fa3d1-3ce5-46d0-838d-3e95a4b8628b", + "metadata": {}, + "outputs": [], + "source": [ + "link_system_prompt = \"You are provided with a list of links found on a webpage. You are able to decide which of the links would be most relevant to include in a brochure about the company, such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n", + "link_system_prompt += \"You should respond in JSON as in this example:\"\n", + "link_system_prompt += \"\"\"\n", + " EXAMPLE 1:\n", + " {\n", + " \"links\": [\n", + " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n", + " {\"type\": \"careers page\", \"url\": \"https://another.full.url/careers\"}\n", + " ]\n", + " }\n", + " EXAMPLE 2:\n", + " {\n", + " \"links\": [\n", + " {\"type\": \"company blog\", \"url\": \"https://blog.example.com\"},\n", + " {\"type\": \"our story\", \"url\": \"https://example.com/our-story\"}\n", + " ]\n", + " }\n", + " \"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6fcacc2e-7445-4d8a-aa80-489d3a2247ec", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links_user_prompt(url):\n", + " user_prompt = f\"Here is the list of links on the website of {url} - \"\n", + " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. Do not include Terms of Service, Privacy, email links.\\n\"\n", + " user_prompt += \"Links (some might be relative links):\\n\"\n", + " links = fetch_website_links(url)\n", + " user_prompt += \"\\n\".join(links[:20])\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dfe222c5-0d3e-4be2-85e1-596ab9d407dc", + "metadata": {}, + "outputs": [], + "source": [ + "def get_links(url):\n", + " response = openai.chat.completions.create(\n", + " model = MODEL,\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": link_system_prompt},\n", + " {\"role\": \"user\", \"content\": get_links_user_prompt(url)}\n", + " ],\n", + " response_format = {\"type\": \"json_object\"}\n", + " )\n", + " result = response.choices[0].message.content\n", + " return json.loads(result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c964bdce-be5d-41c7-a8d7-8e25e58463c5", + "metadata": {}, + "outputs": [], + "source": [ + "def get_all_details(url):\n", + " result = \"Landing page:\\n\"\n", + " result += fetch_website_contents(url)\n", + " links = get_links(url)\n", + "\n", + " for link in links[\"links\"]:\n", + " result += f\"{link['type']}\\n\"\n", + " try:\n", + " result += f\"\\n\\n### Link: Link: {link['type']}\\n\"\n", + " result += fetch_website_contents(link[\"url\"])\n", + " except Exception as e:\n", + " print(f\"Omitted link: {link['url']}: {e}\")\n", + " continue\n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5337019a-b789-49d7-bf10-0f15148c0276", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = (\n", + " \"You are an assistant that analyzes the contents of several relevant pages from a company website \"\n", + " \"and creates a great type of brochure about the company for prospective customers, investors, and recruits. \"\n", + " \"Respond in markdown. Include details of company culture, customers, and careers/jobs if you have the information. Add emoticons where ever possible.\\n\\n\"\n", + "\n", + " \"Please structure the brochure using the following sections:\\n\"\n", + " \"1. **Introduction**: A brief overview of the company.\\n\"\n", + " \"2. **Company Culture**: Emphasize fun, atmosphere, and any unique cultural elements.\\n\"\n", + " \"3. **Customers**: Mention notable customers or industries.\\n\"\n", + " \"4. **Careers/Jobs**: Highlight career opportunities.\\n\"\n", + " \"5. **Conclusion**: Wrap up with a final lighthearted message.\\n\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1dd4f2d4-8189-452a-b15a-c09ae5894ac8", + "metadata": {}, + "outputs": [], + "source": [ + "def get_brochure_user_prompt(company_name, url):\n", + " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n", + " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n", + " user_prompt += get_all_details(url)\n", + " user_prompt = user_prompt[:20000]\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ab4bfef-eb22-43fb-8a46-f1f6a225793b", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_brochure():\n", + " global brochure_text\n", + " brochure_text = \"\"\n", + "\n", + " stream = openai.chat.completions.create(\n", + " model = MODEL,\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n", + " ],\n", + " stream = True\n", + " )\n", + "\n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id = True)\n", + " for chunk in stream:\n", + " content = chunk.choices[0].delta.content or ''\n", + " response += content\n", + " brochure_text += content\n", + " response = response.replace(\"```\", \"\"). replace(\"markdown\", \"\")\n", + " update_display(Markdown(response), display_id = display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7828c747-7872-48e2-b3e6-faab95ba76cb", + "metadata": {}, + "outputs": [], + "source": [ + "def user_translate_brochure(language):\n", + " clear_output(wait = True)\n", + "\n", + " translation_stream = openai.chat.completions.create(\n", + " model = MODEL,\n", + " messages = [\n", + " {\"role\": \"user\", \"content\": f\"Translate the following to {language}:\\n {brochure_text}\"}\n", + " ],\n", + " stream = True\n", + " )\n", + "\n", + " display_handle = display(Markdown(\"\"), display_id = True)\n", + " translated_text = \"\"\n", + "\n", + " for chunk in translation_stream:\n", + " content = chunk.choices[0].delta.content or \"\"\n", + " if content:\n", + " translated_text += content\n", + " update_display(Markdown(translated_text), display_id = display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6cfa92a-8a86-485d-a7e1-1651705ee6dc", + "metadata": {}, + "outputs": [], + "source": [ + "stream_brochure()\n", + "language_choice = input(\"Enter the language to translate the brochure into (e.g., 'French'): \")\n", + "user_translate_brochure(language_choice)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/dkisselev-zz/week1 EXERCISE.ipynb b/week1/community-contributions/dkisselev-zz/week1 EXERCISE.ipynb new file mode 100644 index 0000000..5b4df5d --- /dev/null +++ b/week1/community-contributions/dkisselev-zz/week1 EXERCISE.ipynb @@ -0,0 +1,161 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise\n", + "\n", + "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", + "and responds with an explanation. This is a tool that you will be able to use yourself during the course!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "import os\n", + "import json\n", + "from dotenv import load_dotenv\n", + "from IPython.display import Markdown\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "# constants\n", + "\n", + "MODEL_GPT = 'gpt-4o-mini'\n", + "MODEL_LLAMA = 'llama3.2'\n", + "OLLAMA_BASE_URL = \"http://localhost:11434/v1\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [], + "source": [ + "# set up environment\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n", + "\n", + "if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n", + " print(\"API key looks good so far\")\n", + "else:\n", + " print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n", + " \n", + "openai = OpenAI()\n", + "ollama = OpenAI(base_url=OLLAMA_BASE_URL, api_key='ollama')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", + "metadata": {}, + "outputs": [], + "source": [ + "# here is the question; type over this to ask something new\n", + "\n", + "question = \"\"\"\n", + "Please explain what this code does and why:\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "df0d958f", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"\"\"You are individual that possesses a unique\n", + "and highly valuable combination of deep technical\n", + "expertise and excellent communication skills.\n", + "You grasp complex, specialized concepts and then distill\n", + "them into simple, understandable terms for people without\n", + "the same technical background.\n", + "\n", + "Present your answer as markdown\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13506dd4", + "metadata": {}, + "outputs": [], + "source": [ + "messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": question}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [], + "source": [ + "# Get gpt-4o-mini to answer, with streaming\n", + "\n", + "response = openai.chat.completions.create(model=MODEL_GPT, messages=messages)\n", + "result = response.choices[0].message.content\n", + "display(Markdown(result))\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [], + "source": [ + "# Get Llama 3.2 to answer\n", + "\n", + "response = ollama.chat.completions.create(model=MODEL_LLAMA, messages=messages)\n", + "result = response.choices[0].message.content\n", + "display(Markdown(result))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/ranskills-week1-mathxpert.ipynb b/week1/community-contributions/ranskills-week1-mathxpert.ipynb new file mode 100644 index 0000000..fb5d4a6 --- /dev/null +++ b/week1/community-contributions/ranskills-week1-mathxpert.ipynb @@ -0,0 +1,220 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise\n", + "\n", + "## Dynamically pick an LLM provider to let MathXpert answer your math questions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from enum import StrEnum\n", + "from getpass import getpass\n", + "\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import ipywidgets as widgets\n", + "from IPython.display import display, clear_output, Markdown, Latex\n", + "\n", + "load_dotenv()" + ] + }, + { + "cell_type": "markdown", + "id": "f169118a-645e-44e1-9a98-4f561adfbb08", + "metadata": {}, + "source": [ + "## Free Cloud Providers\n", + "\n", + "Grab your free API Keys from these generous sites:\n", + "\n", + "- https://openrouter.ai/\n", + "- https://ollama.com/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a456906-915a-4bfd-bb9d-57e505c5093f", + "metadata": {}, + "outputs": [], + "source": [ + "class Provider(StrEnum):\n", + " OLLAMA = 'Ollama'\n", + " OPENROUTER = 'OpenRouter'\n", + "\n", + "models: dict[Provider, str] = {\n", + " Provider.OLLAMA: 'gpt-oss:120b-cloud',\n", + " Provider.OPENROUTER: 'qwen/qwen3-4b:free'\n", + "}\n", + "\n", + "def get_api_key(env_name: str) -> str:\n", + " '''Gets the value from the environment, otherwise ask the user for it if not set'''\n", + " key = os.environ.get(env_name)\n", + " if not key:\n", + " key = getpass(f'Enter {env_name}:').strip()\n", + "\n", + " if key:\n", + " print(f'✅ {env_name} provided')\n", + " else:\n", + " print(f'❌ {env_name} provided')\n", + " return key\n", + "\n", + "\n", + "providers: dict[Provider, OpenAI] = {}\n", + "\n", + "if api_key := get_api_key('OLLAMA_API_KEY'):\n", + " providers[Provider.OLLAMA] = OpenAI(base_url='https://ollama.com/v1', api_key=api_key)\n", + "\n", + "if api_key := get_api_key('OPENROUTER_API_KEY'):\n", + " providers[Provider.OPENROUTER] = OpenAI(base_url='https://openrouter.ai/api/v1', api_key=api_key)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1", + "metadata": {}, + "outputs": [], + "source": [ + "def get_messages(question: str) -> list[dict[str, str]]:\n", + " \"\"\"Generate messages for the chat models.\"\"\"\n", + "\n", + " system_prompt = '''\n", + " You are MathXpert, an expert Mathematician who makes math fun to learn by relating concepts to real \n", + " practical usage to whip up the interest in learners.\n", + " \n", + " Explain step-by-step thoroughly how to solve a math problem. Respond in **LaTex**'\n", + " '''\n", + "\n", + " return [\n", + " {'role': 'system', 'content': system_prompt },\n", + " {'role': 'user', 'content': question},\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef72c85e", + "metadata": {}, + "outputs": [], + "source": [ + "get_messages('Explain how to solve a differentiation problem')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aae1579b-7a02-459d-81c6-0f775d2a1410", + "metadata": {}, + "outputs": [], + "source": [ + "selected_provider, client = next(iter(providers.items()))\n", + "\n", + "def on_provider_change(change):\n", + " global selected_provider, client\n", + "\n", + " selected_provider = change['new']\n", + " client = providers.get(selected_provider)\n", + "\n", + "\n", + "provider_selector = widgets.Dropdown(\n", + " options=list(providers.keys()),\n", + " description='Select an LLM provider:',\n", + " style={'description_width': 'initial'},\n", + ")\n", + "\n", + "provider_selector.observe(on_provider_change, names='value')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [], + "source": [ + "handle = display(None, display_id=True)\n", + "\n", + "def ask(client: OpenAI, model: str, question: str):\n", + " try:\n", + " prompt = get_messages(question=question)\n", + " response = client.chat.completions.create(\n", + " model=model,\n", + " messages=prompt,\n", + " stream=True,\n", + " )\n", + " \n", + " output = ''\n", + " for chunk in response:\n", + " output += chunk.choices[0].delta.content or ''\n", + " \n", + " handle.update(Latex(output))\n", + " except Exception as e:\n", + " clear_output(wait=True) \n", + " print(f'🔥 An error occurred: {e}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "09bc9a11-adb4-4a9c-9c77-73b2b5a665cf", + "metadata": {}, + "outputs": [], + "source": [ + "display(provider_selector)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e01069b2-fd2c-446f-b385-09c1d9624225", + "metadata": {}, + "outputs": [], + "source": [ + "input_label = \"Ask your question (Type 'q' to quit): \"\n", + "question = input(input_label)\n", + "\n", + "while question.strip().lower() not in ['quit', 'q']:\n", + " clear_output(wait=True)\n", + " print(selected_provider, models[selected_provider])\n", + " model = models[selected_provider]\n", + " ask(client, model, question)\n", + "\n", + " question = input(input_label)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/week1_exercise_jom.ipynb b/week1/community-contributions/week1_exercise_jom.ipynb new file mode 100644 index 0000000..f3ddf62 --- /dev/null +++ b/week1/community-contributions/week1_exercise_jom.ipynb @@ -0,0 +1,225 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5", + "metadata": {}, + "source": [ + "# End of week 1 exercise\n", + "\n", + "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n", + "and responds with an explanation. This is a tool that you will be able to use yourself during the course!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1070317-3ed9-4659-abe3-828943230e03", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "import os\n", + "from openai import OpenAI\n", + "from dotenv import load_dotenv\n", + "from IPython.display import display, Markdown, update_display\n", + "\n", + "load_dotenv(override=True)\n", + "api_key = os.getenv('OPENAI_API_KEY')\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "3f0d0137-52b0-47a8-81a8-11a90a010798", + "metadata": {}, + "outputs": [], + "source": [ + "# here is the question; type over this to ask something new\n", + "\n", + "question = \"\"\"\n", + "Please explain what this code does and why:\n", + "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "16ec5d8a", + "metadata": {}, + "outputs": [], + "source": [ + "system_prompt = \"\"\"\n", + "You are a helpful tutor that explains code. You need to provide an answer structured in markdown without code blocksinto the following parts:\n", + "- Identify the topic of the question (so the user can look for more info)\n", + "- Give an ELI5 explanation of the question.\n", + "- Give a step by step explanation of the code.\n", + "- Ask the user a follow up question or variation of the question to see if they understand the concept.\n", + "- Give the answer to the followup question as a spoiler\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4", + "metadata": {}, + "outputs": [], + "source": [ + "# Get gpt-4o-mini to answer, with streaming\n", + "\n", + "from enum import StrEnum\n", + "\n", + "class Enum_Model(StrEnum):\n", + " GPT = 'gpt-4o-mini'\n", + " LLAMA = 'llama3.2:1b'\n", + "\n", + "def ask_tutor(question, model: str | Enum_Model):\n", + " if isinstance(model, str):\n", + " try:\n", + " model = Enum_Model[model.upper()]\n", + " except KeyError:\n", + " raise ValueError(f\"Unknown model: {model}\")\n", + " if model == Enum_Model.LLAMA:\n", + " LLM_ENDPOINT=\"http://localhost:11434/v1\"\n", + " client = OpenAI(base_url=LLM_ENDPOINT)\n", + " elif model == Enum_Model.GPT:\n", + " client = OpenAI()\n", + " else:\n", + " raise ValueError(f\"Unknown model: {model}\")\n", + " stream = client.chat.completions.create(\n", + " model=model,\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_prompt},\n", + " {\"role\": \"user\", \"content\": question}],\n", + " stream=True\n", + " )\n", + " response = \"\"\n", + " display_handle = display(Markdown(\"\"), display_id=True)\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " update_display(Markdown(response), display_id=display_handle.display_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "b0c30dbf", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "## Explanation of the Code\n", + "\n", + "This piece of code is using a concept called \"generators\" in Python. The function `yield from` is used to generate a sequence of values on-the-fly, without needing to store them all in memory at once.\n", + "\n", + "### ELI5 Explanation:\n", + "\n", + "Think of it like this: Imagine you're trying to create a long list of authors for books. If you were to write the entire list out and then return it, you might run into a problem because Python doesn't know where to put it until you actually read it. That process can be slow.\n", + "\n", + "This type of code is like writing the list once and giving it to your friends who can then use it wherever they need. Instead of having all that data inside one giant list in its entirety, this generator creates a small amount of data each time it's needed (like when someone calls `get(\"author\")` on an author object). It's called a \"generator\" because it generates values rather than computing them and storing them all at once.\n", + "\n", + "### Code Explanation:\n", + "\n", + "This code is defining a function that takes in another function as an argument (`books`) but doesn't use its return value directly. Instead, it uses Python's `yield from` feature to call the inner function (`book.get(\"author\") for book in books`) repeatedly until it encounters a stop signal.\n", + "\n", + "Here's what the code does step by step:\n", + "1. It starts with defining an empty sequence variable `result` that will hold our answer (because this is going to be another generator).\n", + "2. It opens the `books` collection, which presumably contains a list of book objects.\n", + "3. It then uses an expression to iterate over each book in the `books` collection and call the anonymous function (`book.get(\"author\")`) on it.\n", + "4. The result of this iteration is what's assigned to our sequence variable `result`.\n", + "5. `result` is an iterator, and when we do `yield from`, Python switches control to the generator inside `books`.\n", + "6. This happens until someone calls the `stop()` function on the outer generator (perhaps as a way to explicitly exit the generation process) or if there's no more data left in the collection.\n", + "7. At that point, Python returns control back to our inner function (`book.get(\"author\")`), which now has its own sequence variable and can keep generating values until you call `stop()` on it.\n", + "\n", + "### Follow-up Question:\n", + "\n", + "How would this code be used if there were already some data stored elsewhere on the computer?\n", + "\n", + "SPOILER ALERT:\n", + "\n", + "The answer is that this type of code doesn't actually store anything in memory. Instead, it's just using a trick to tell Python (and JavaScript) which values we'd like it to generate based on an iterable (`books`), without having to store those ones in its actual database or cache beforehand." + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "ask_tutor(question, 'LLAMA')\n" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "### Topic: Python Generators and Set Comprehensions\n", + "\n", + "### ELI5 Explanation\n", + "Imagine you have a big box of books, and each book has different information like the title and the author. You want to make a special list of all the authors of the books you have, but you only want to write down the names once, even if the same author wrote multiple books. This code snippet is doing just that! It's like saying, \"Hey! For all the books in this box, find out who the authors are and tell me their names, but don’t tell me any duplicates.\"\n", + "\n", + "### Step by Step Explanation of the Code\n", + "1. **Book Collection**: The variable `books` is expected to be a collection (like a list) of dictionaries. Each dictionary represents a book with various attributes like \"author,\" \"title,\" etc.\n", + " \n", + "2. **List Comprehension**: The code uses a set comprehension `{...}` to create a set of authors.\n", + " - Inside the braces `{}`, it iterates over each `book` in the collection `books`.\n", + " - For each `book`, it retrieves the author's name using `book.get(\"author\")`. The `get` method is safe—it won’t cause an error if the key \"author\" doesn’t exist; it just returns `None`.\n", + "\n", + "3. **Filtering**: The `if book.get(\"author\")` part ensures that only books with a valid author are considered. It filters out any books that don’t have an author specified.\n", + "\n", + "4. **Yielding Results**: The `yield from` statement is used to yield each element from the set of authors (which automatically handles duplicates by using the properties of a set) back to wherever this generator function is called. This means it will return each author's name one by one when called upon.\n", + "\n", + "### Follow-up Question\n", + "What would happen if you used a list comprehension instead of a set comprehension in this code? Would you still get unique authors?\n", + "\n", + "### Answer to the Follow-up Question: \n", + "
\n", + "Click to see the answer\n", + "No, using a list comprehension would collect all authors, including duplicates. You would get a list of authors that may have repeated names if multiple books were written by the same author.\n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "ask_tutor(question, 'GPT')\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/community-contributions/dkisselev-zz/week2 EXERCISE.ipynb b/week2/community-contributions/dkisselev-zz/week2 EXERCISE.ipynb new file mode 100644 index 0000000..280ca61 --- /dev/null +++ b/week2/community-contributions/dkisselev-zz/week2 EXERCISE.ipynb @@ -0,0 +1,467 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d006b2ea-9dfe-49c7-88a9-a5a0775185fd", + "metadata": {}, + "source": [ + "# Additional End of week Exercise - week 2\n", + "\n", + "Now use everything you've learned from Week 2 to build a full prototype for the technical question/answerer you built in Week 1 Exercise.\n", + "\n", + "This should include a Gradio UI, streaming, use of the system prompt to add expertise, and the ability to switch between models. Bonus points if you can demonstrate use of a tool!\n", + "\n", + "If you feel bold, see if you can add audio input so you can talk to it, and have it respond with audio. ChatGPT or Claude can help you, or email me if you have questions.\n", + "\n", + "I will publish a full solution here soon - unless someone beats me to it...\n", + "\n", + "There are so many commercial applications for this, from a language tutor, to a company onboarding solution, to a companion AI to a course (like this one!) I can't wait to see your results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b82bbbd7", + "metadata": {}, + "outputs": [], + "source": [ + "# Imports and Setup\n", + "import os\n", + "import json\n", + "import base64\n", + "from io import BytesIO\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "from PIL import Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b7375979", + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables\n", + "load_dotenv(override=True)\n", + "\n", + "# API Keys\n", + "openai_api_key = os.getenv('OPENAI_API_KEY')\n", + "anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n", + "google_api_key = os.getenv('GOOGLE_API_KEY')\n", + "\n", + "if openai_api_key:\n", + " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n", + "else:\n", + " print(\"OpenAI API Key not set\")\n", + " \n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n", + "else:\n", + " print(\"Anthropic API Key not set\")\n", + "\n", + "if google_api_key:\n", + " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n", + "else:\n", + " print(\"Google API Key not set\")\n", + "\n", + "# Initialize clients\n", + "anthropic_url = \"https://api.anthropic.com/v1/\"\n", + "gemini_url = \"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", + "\n", + "openai = OpenAI()\n", + "anthropic = OpenAI(api_key=anthropic_api_key, base_url=anthropic_url)\n", + "gemini = OpenAI(api_key=google_api_key, base_url=gemini_url)\n", + "\n", + "\n", + "# Model constants\n", + "MODELS = {\n", + " \"GPT-5\": \"gpt-5-mini\",\n", + " \"Claude Sonnet\": \"claude-sonnet-4-5-20250929\", \n", + " \"Gemini\": \"gemini-2.5-flash\"\n", + "}\n", + "\n", + "print(\"All API clients initialized successfully!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "634e3186", + "metadata": {}, + "outputs": [], + "source": [ + "def main_claude(messages, tools):\n", + "\n", + " response = anthropic.chat.completions.create(\n", + " model=MODELS[\"Claude Sonnet\"],\n", + " messages=messages,\n", + " tools=tools\n", + " )\n", + " \n", + " return response\n", + "\n", + "def main_gpt(messages, tools):\n", + "\n", + " response = openai.chat.completions.create(\n", + " model=MODELS[\"GPT-5\"],\n", + " messages=messages,\n", + " tools=tools\n", + " )\n", + " \n", + " return response\n", + "\n", + "def main_gemini(messages, tools):\n", + "\n", + " response = gemini.chat.completions.create(\n", + " model=MODELS[\"Gemini\"],\n", + " messages=messages,\n", + " tools=tools\n", + " )\n", + " \n", + " return response" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b26fa50", + "metadata": {}, + "outputs": [], + "source": [ + "def main_model(messages, model, tools):\n", + " if model==\"GPT-5\":\n", + " result = main_gpt(messages, tools)\n", + " elif model==\"Claude Sonnet\":\n", + " result = main_claude(messages, tools)\n", + " elif model==\"Gemini\":\n", + " result = main_gemini(messages, tools) \n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " \n", + " return result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8a08091", + "metadata": {}, + "outputs": [], + "source": [ + "initial_system_prompt = \"\"\"You are an individual that possesses a unique\n", + "and highly valuable combination of deep technical expertise and \n", + "excellent communication skills. You grasp complex, specialized \n", + "concepts and then distill them into simple, understandable terms \n", + "for people without the same technical background.\n", + "\n", + "IMPORTANT: When users ask for code examples or programming concepts, \n", + "you MUST use the generate_code_example tool to provide working code examples. \n", + "Do not write code directly in your response - always call the tool first.\n", + "\n", + "Present your answer as markdown and use the tool to generate working code examples \n", + "when relevant to help illustrate the concepts.\"\"\"\n", + "\n", + "# Different system prompt for after tool calls\n", + "followup_system_prompt = \"\"\"You are an individual that possesses a unique\n", + "and highly valuable combination of deep technical expertise and \n", + "excellent communication skills. You grasp complex, specialized \n", + "concepts and then distill them into simple, understandable terms \n", + "for people without the same technical background.\n", + "\n", + "IMPORTANT: You have already called the generate_code_example tool and received code. \n", + "Now you should use that generated code in your response. Do NOT call the tool again.\n", + "Present your answer as markdown and incorporate the provided code examples \n", + "to help illustrate the concepts.\"\"\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0e0c9c82", + "metadata": {}, + "outputs": [], + "source": [ + "# Code Example Generator Tool\n", + "def generate_code_example(language=\"python\"):\n", + "\n", + " print(f\"TOOL CALLED: Generating {language} code\")\n", + " \n", + " return f\"# Code example in {language}\\n# This is a placeholder - implement your specific example here\\n')\"\n", + "\n", + "# Tool definition\n", + "code_tool = {\n", + " \"name\": \"generate_code_example\",\n", + " \"description\": \"Generate a working code example\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"language\": {\n", + " \"type\": \"string\", \n", + " \"description\": \"The programming language (default: python)\",\n", + " \"default\": \"python\"\n", + " }\n", + " },\n", + " \"additionalProperties\": False\n", + " }\n", + "}\n", + "\n", + "tools = [{\"type\": \"function\", \"function\": code_tool}]\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7a81370d", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Audio and Image Generation Functions\n", + "def transcribe_audio(audio_file):\n", + " \"\"\"Convert audio to text using OpenAI Whisper\"\"\"\n", + " if audio_file is None:\n", + " return None\n", + " \n", + " with open(audio_file, \"rb\") as audio:\n", + " transcript = openai.audio.transcriptions.create(\n", + " model=\"whisper-1\",\n", + " file=audio\n", + " )\n", + " return transcript.text\n", + "\n", + "def text_to_speech(text):\n", + " \"\"\"Convert text to speech using OpenAI TTS\"\"\"\n", + " response = openai.audio.speech.create(\n", + " model=\"gpt-4o-mini-tts\",\n", + " voice=\"coral\",\n", + " input=text\n", + " )\n", + " return response.content\n", + "\n", + "def generate_robot_image(code_snippet):\n", + " \"\"\"Generate an image of a robot developer at a monitor showing the code\"\"\"\n", + " prompt = f\"\"\"A robot developer sitting at a computer monitor in a modern office. Robot is styled\n", + " as Bender from Futurama.\n", + " The monitor displays code on the screen showing: {code_snippet[:100]}...\n", + " The robot has a friendly, helpful appearance with glowing eyes and mechanical hands typing.\n", + " The scene is well-lit with a professional coding environment background.\"\"\"\n", + " \n", + " response = openai.images.generate(\n", + " model=\"dall-e-3\",\n", + " prompt=prompt,\n", + " size=\"1024x1024\",\n", + " n=1,\n", + " response_format=\"b64_json\"\n", + " )\n", + " \n", + " image_base64 = response.data[0].b64_json\n", + " image_data = base64.b64decode(image_base64)\n", + " return Image.open(BytesIO(image_data))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25c5279f", + "metadata": {}, + "outputs": [], + "source": [ + "def handle_tool_calls(message):\n", + " responses = []\n", + " generated_code = None\n", + " \n", + " for tool_call in message.tool_calls:\n", + " if tool_call.function.name == \"generate_code_example\":\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " language = arguments.get('language', 'python')\n", + " code_example = generate_code_example(language)\n", + " generated_code = code_example\n", + " \n", + " responses.append({\n", + " \"role\": \"tool\",\n", + " \"content\": code_example,\n", + " \"tool_call_id\": tool_call.id\n", + " })\n", + " \n", + " return responses, generated_code\n", + "\n", + "def chat(message, history, model_choice):\n", + " history = [{\"role\": h[\"role\"], \"content\": h[\"content\"]} for h in history]\n", + " messages = [{\"role\": \"system\", \"content\": initial_system_prompt}] + history + [{\"role\": \"user\", \"content\": message}]\n", + " \n", + " generated_code = None\n", + " image = None\n", + " \n", + " response = main_model(\n", + " model=model_choice,\n", + " messages=messages,\n", + " tools=tools\n", + " )\n", + "\n", + " print(f\"Response finish_reason: {response.choices[0].finish_reason}\")\n", + " print(f\"Response tool_calls: {response.choices[0].message.tool_calls}\")\n", + "\n", + " while response.choices[0].finish_reason == \"tool_calls\":\n", + " message = response.choices[0].message\n", + " responses, code = handle_tool_calls(message)\n", + " generated_code = code\n", + " messages.append(message)\n", + " messages.extend(responses)\n", + " response = main_model(\n", + " model=model_choice,\n", + " messages=messages,\n", + " tools=tools\n", + " )\n", + " messages[0] = {\"role\": \"system\", \"content\": followup_system_prompt}\n", + "\n", + " response = main_model(\n", + " model=model_choice,\n", + " messages=messages,\n", + " tools=tools\n", + " )\n", + "\n", + " print(f\"After tool call - finish_reason: {response.choices[0].finish_reason}\")\n", + " \n", + " reply = response.choices[0].message.content\n", + " \n", + " if generated_code:\n", + " try:\n", + " print(f\"Generating image for code: {generated_code}\")\n", + " image = generate_robot_image(generated_code)\n", + " except Exception as e:\n", + " print(f\"Image generation failed: {e}\")\n", + " \n", + " try:\n", + " audio = text_to_speech(reply)\n", + " except Exception as e:\n", + " print(f\"Audio generation failed: {e}\")\n", + " audio = None\n", + " \n", + " return reply, audio, image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "392f9300", + "metadata": {}, + "outputs": [], + "source": [ + "def process_text_input(message, history, model_choice):\n", + " if not message.strip():\n", + " return history, None, None\n", + " \n", + " reply, audio, image = chat(message, history, model_choice)\n", + " history.append({\"role\": \"user\", \"content\": message})\n", + " history.append({\"role\": \"assistant\", \"content\": reply})\n", + "\n", + " return history, audio, image\n", + "\n", + "def process_audio_input(audio_file, history, model_choice):\n", + " if audio_file is None:\n", + " return history, None, None\n", + " \n", + " message = transcribe_audio(audio_file)\n", + " if not message:\n", + " return history, None, None\n", + " \n", + " return process_text_input(message, history, model_choice)\n", + "\n", + "with gr.Blocks(title=\"Technical Q&A Assistant\") as demo:\n", + " gr.Markdown(\"# 🤖 Technical Q&A Assistant\")\n", + " gr.Markdown(\"Ask technical questions and get explanations with code examples!\")\n", + " \n", + " with gr.Row():\n", + " with gr.Column(scale=3):\n", + " chatbot = gr.Chatbot(\n", + " height=400,\n", + " type=\"messages\",\n", + " label=\"Conversation\"\n", + " )\n", + " \n", + " with gr.Row():\n", + " text_input = gr.Textbox(\n", + " placeholder=\"Ask a technical question...\",\n", + " label=\"Text Input\",\n", + " scale=4\n", + " )\n", + " text_submit = gr.Button(\"Send\", scale=1)\n", + " \n", + " with gr.Row():\n", + " audio_input = gr.Audio(\n", + " sources=[\"microphone\"],\n", + " type=\"filepath\",\n", + " label=\"Voice Input\"\n", + " )\n", + " audio_submit = gr.Button(\"Send Voice\", scale=1)\n", + " \n", + " with gr.Column(scale=1):\n", + " model_selector = gr.Dropdown(\n", + " choices=list(MODELS.keys()),\n", + " value=\"GPT-5\",\n", + " label=\"Choose Model\"\n", + " )\n", + " \n", + " audio_output = gr.Audio(\n", + " label=\"Audio Response\",\n", + " autoplay=True\n", + " )\n", + " \n", + " image_output = gr.Image(\n", + " label=\"Generated Image\",\n", + " height=300,\n", + " interactive=False\n", + " )\n", + " \n", + " text_submit.click(\n", + " process_text_input,\n", + " inputs=[text_input, chatbot, model_selector],\n", + " outputs=[chatbot, audio_output, image_output]\n", + " ).then(\n", + " lambda: \"\",\n", + " outputs=text_input\n", + " )\n", + " \n", + " audio_submit.click(\n", + " process_audio_input,\n", + " inputs=[audio_input, chatbot, model_selector],\n", + " outputs=[chatbot, audio_output, image_output]\n", + " )\n", + " \n", + " text_input.submit(\n", + " process_text_input,\n", + " inputs=[text_input, chatbot, model_selector],\n", + " outputs=[chatbot, audio_output, image_output]\n", + " ).then(\n", + " lambda: \"\", \n", + " outputs=text_input\n", + " )\n", + "\n", + "demo.launch()\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/community-contributions/kachaje/week2-exercise.ipynb b/week2/community-contributions/kachaje/week2-exercise.ipynb new file mode 100644 index 0000000..67259fc --- /dev/null +++ b/week2/community-contributions/kachaje/week2-exercise.ipynb @@ -0,0 +1,218 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4df365ad", + "metadata": {}, + "source": [ + "# Week 2 Exercise\n", + "\n", + "## Objective:\n", + "\n", + "Demonstrate what has been learnt in week 2 by upgrading week 1 project to have a UI using Gradio UI. Expected to include streaming and use of system prompts to add expertise and ability to switch between models. \n", + "Bonus points if use of a tool can also be demonstrated.\n", + "Audio input with autio output also a bonus." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ac344b4", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import os\n", + "from dotenv import load_dotenv\n", + "import gradio as gr\n", + "import anthropic\n", + "import google.generativeai as genai\n", + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf272f10", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv(override=True)\n", + "\n", + "# Set up the Anthropic API key\n", + "anthropic_api_key = os.getenv(\"ANTHROPIC_API_KEY\")\n", + "if anthropic_api_key:\n", + " print(f\"Anthropic API key set and begins with: {anthropic_api_key[:6]}...\")\n", + "\n", + "# Set up the Google API key\n", + "google_api_key = os.getenv(\"GOOGLE_API_KEY\")\n", + "if google_api_key:\n", + " print(f\"Google API key set and begins with: {google_api_key[:6]}...\")\n", + "\n", + "openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", + "\n", + "anthropic_url = \"https://api.anthropic.com/v1/\"\n", + "gemini_url = \"https://generativelanguage.googleapis.com/v1beta/openai/\"\n", + "\n", + "anthropic = OpenAI(api_key=anthropic_api_key, base_url=anthropic_url)\n", + "gemini = OpenAI(api_key=google_api_key, base_url=gemini_url)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "77b67726", + "metadata": {}, + "outputs": [], + "source": [ + "# models\n", + "\n", + "MODEL_LLAMA=\"llama3.2\"\n", + "MODEL_ANTHROPIC=\"claude-sonnet-4-5-20250929\"\n", + "MODEL_GOOGLE=\"gemini-2.5-flash\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9fe4a2f3", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"\"\"\n", + "You are an expert software engineer.\n", + "You are given a technical question and you need to explain what the code does and why.\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9afdce10", + "metadata": {}, + "outputs": [], + "source": [ + "MODEL=MODEL_LLAMA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62d0135e", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_llama(message):\n", + " history = []\n", + " history = [{\"role\":h[\"role\"], \"content\":h[\"content\"]} for h in history]\n", + " \n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = openai.chat.completions.create(model=MODEL_LLAMA, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response\n", + "\n", + "def stream_claude(message):\n", + " history = []\n", + " history = [{\"role\":h[\"role\"], \"content\":h[\"content\"]} for h in history]\n", + " \n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = anthropic.chat.completions.create(model=MODEL_ANTHROPIC, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response\n", + " \n", + "def stream_gemini(message):\n", + " history = []\n", + " history = [{\"role\":h[\"role\"], \"content\":h[\"content\"]} for h in history]\n", + " \n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n", + "\n", + " stream = gemini.chat.completions.create(model=MODEL_GOOGLE, messages=messages, stream=True)\n", + "\n", + " response = \"\"\n", + " for chunk in stream:\n", + " response += chunk.choices[0].delta.content or ''\n", + " yield response\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3fec5ce3", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_model(prompt, model):\n", + " print(f\"Prompt: {prompt}, Model: {model}\")\n", + "\n", + " if model==\"Llama\":\n", + " result = stream_llama(prompt)\n", + " elif model==\"Claude\":\n", + " result = stream_claude(prompt)\n", + " elif model==\"Gemini\":\n", + " result = stream_gemini(prompt)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " yield from result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f3db610", + "metadata": {}, + "outputs": [], + "source": [ + "question_input = gr.Textbox(label=\"Your message:\", info=\"Enter a question\", lines=7)\n", + "model_selector = gr.Dropdown(choices=[\"Llama\", \"Claude\", \"Gemini\"], value=\"Llama\", label=\"Model\") \n", + "message_output = gr.Markdown(label=\"Response:\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1428a4a8", + "metadata": {}, + "outputs": [], + "source": [ + "view = gr.Interface(\n", + " fn=stream_model, \n", + " inputs=[question_input, model_selector], \n", + " outputs=message_output,\n", + " flagging_mode=\"never\"\n", + " )\n", + "\n", + "view.launch(inbrowser=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week2/day5.ipynb b/week2/day5.ipynb index f8b4d1a..6860892 100644 --- a/week2/day5.ipynb +++ b/week2/day5.ipynb @@ -202,7 +202,7 @@ "\n", "That's it!\n", "\n", - "It's simple, and it has a resut that feels magical." + "It's simple, and it has a result that feels magical." ] }, { diff --git a/week4/community-contributions/ai_docstring_generator/README.md b/week4/community-contributions/ai_docstring_generator/README.md new file mode 100644 index 0000000..f206685 --- /dev/null +++ b/week4/community-contributions/ai_docstring_generator/README.md @@ -0,0 +1,220 @@ +# 🚀 AI Docstring Generator + +An intelligent tool that automatically generates comprehensive docstrings and comments for your code using state-of-the-art AI models (OpenAI GPT, Anthropic Claude, and Google Gemini). + +![Python](https://img.shields.io/badge/Python-3.8+-blue.svg) +![License](https://img.shields.io/badge/License-MIT-green.svg) +![Gradio](https://img.shields.io/badge/Gradio-UI-orange.svg) + +## ✨ Features + +- 🤖 **Multi-Model Support**: Choose between GPT-4o, Claude Sonnet 4, or Gemini 2.0 +- 🌍 **Multi-Language Support**: Python, JavaScript, Java, C++, Go, and Rust +- ⚡ **Real-time Streaming**: Watch documentation being generated live +- 📝 **Comprehensive Documentation**: Generates parameter descriptions, return values, exceptions, and inline comments +- 🎨 **Beautiful UI**: Clean and intuitive Gradio interface +- 📚 **Built-in Examples**: Quick start with pre-loaded code examples + +## 🎯 Supported Languages + +- **Python** (PEP 257, Google style) +- **JavaScript/TypeScript** (JSDoc) +- **Java** (Javadoc) +- **C++** (Doxygen) +- **Go** (Go conventions) +- **Rust** (Rust doc comments) + +## 📋 Prerequisites + +- Python 3.8 or higher +- API keys for at least one of the following: + - OpenAI API key + - Anthropic API key + - Google API key + +## 🛠️ Installation + +1. **Clone the repository** +```bash +git clone {paste-this-repo-link} +cd ai-docstring-generator //navigate to this folder +``` + +2. **Create a virtual environment** (recommended) +```bash +python -m venv venv +source venv/bin/activate # On Windows: venv\Scripts\activate +``` + +3. **Install dependencies** +```bash +pip install -r requirements.txt +``` + +4. **Set up environment variables** + +Create a `.env` file in the project root: +```env +OPENAI_API_KEY=sk-your-openai-api-key-here +ANTHROPIC_API_KEY=sk-ant-your-anthropic-api-key-here +GOOGLE_API_KEY=your-google-api-key-here +``` + +**Note**: You only need the API key(s) for the model(s) you plan to use. + +## 🚀 Usage + +1. **Run the application** +```bash +python docstring_generator.ipynb +``` + +2. **Access the interface** + - The app will automatically open in your default browser + +3. **Generate documentation** + - Select your programming language + - Choose an AI model (GPT, Claude, or Gemini) + - Paste your code or load an example + - Click "✨ Generate Docstrings" + - Copy the documented code! + +## 📖 Example + +**Input (Python):** +```python +def calculate_pi(iterations, param1, param2): + result = 1.0 + for i in range(1, iterations+1): + j = i * param1 - param2 + result -= (1/j) + j = i * param1 + param2 + result += (1/j) + return result +``` + +**Output:** +```python +def calculate_pi(iterations, param1, param2): + """ + Calculate an approximation of pi using the Leibniz formula. + + Args: + iterations (int): Number of iterations to perform in the calculation. + Higher values increase accuracy but take longer. + param1 (int): First parameter for the calculation formula (typically 4). + param2 (int): Second parameter for the calculation formula (typically 1). + + Returns: + float: Approximation of pi divided by 4. Multiply by 4 to get pi. + + Note: + This uses the Leibniz formula: π/4 = 1 - 1/3 + 1/5 - 1/7 + ... + Convergence is slow; many iterations needed for good accuracy. + """ + result = 1.0 + for i in range(1, iterations+1): + # Calculate denominator for negative term + j = i * param1 - param2 + result -= (1/j) + # Calculate denominator for positive term + j = i * param1 + param2 + result += (1/j) + return result +``` + +## 🔑 Getting API Keys + +### OpenAI API Key +1. Visit [platform.openai.com](https://platform.openai.com) +2. Sign up or log in +3. Go to API Keys section +4. Create a new API key + +### Anthropic API Key +1. Visit [console.anthropic.com](https://console.anthropic.com) +2. Sign up or log in +3. Go to API Keys +4. Generate a new key + +### Google API Key +1. Visit [Google AI Studio](https://makersuite.google.com/app/apikey) +2. Sign in with Google account +3. Create an API key + +## 📁 Project Structure + +``` +ai-docstring-generator/ +│ +├── docstring_generator.py # Main application file +├── requirements.txt # Python dependencies +├── README.md # Project documentation +``` + +## 🎨 Customization + +You can customize the documentation style by modifying the `system_prompt_for_docstring()` function in `docstring_generator.py`. + +## 🤝 Contributing + +Contributions are welcome! Please feel free to submit a Pull Request. + +1. Fork the repository +2. Create your feature branch (`git checkout -b feature/AmazingFeature`) +3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) +4. Push to the branch (`git push origin feature/AmazingFeature`) +5. Open a Pull Request + +## 📝 License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +## 🐛 Troubleshooting + +### Common Issues + +**Issue: `TypeError: Client.__init__() got an unexpected keyword argument 'proxies'`** +- **Solution**: Update packages: `pip install --upgrade anthropic httpx` + +**Issue: API key not found** +- **Solution**: Ensure your `.env` file is in the project root and contains valid API keys + +**Issue: Model not responding** +- **Solution**: Check your API key is valid and you have available credits/quota + +**Issue: Port 7860 already in use** +- **Solution**: Change the port in the `ui.launch()` call: `server_port=7861` + +## 🔮 Future Enhancements + +- [ ] Support for more AI models (Llama, Mistral, etc.) +- [ ] Batch processing for multiple files +- [ ] Support for more programming languages +- [ ] Custom documentation style templates +- [ ] Integration with IDEs (VS Code, PyCharm) +- [ ] API endpoint for programmatic access + +## 📧 Contact + +For questions or suggestions, please open an issue on GitHub. +Or mail me at udayslathia16@gmail.com + +## 🙏 Acknowledgments + +- OpenAI for GPT models +- Anthropic for Claude models +- Google for Gemini models +- Gradio for the amazing UI framework + +--- + +**Made with ❤️ for developers who value good documentation** + +--- + +## ⭐ Star History + +If you find this project useful, please consider giving it a star! + +[![Star History Chart](https://api.star-history.com/svg?repos=udayslathia16/ai-docstring-generator&type=Date)](https://star-history.com/#udayslathia16/ai-docstring-generator&Date) \ No newline at end of file diff --git a/week4/community-contributions/ai_docstring_generator/docstring_generator.ipynb b/week4/community-contributions/ai_docstring_generator/docstring_generator.ipynb new file mode 100644 index 0000000..2f195bf --- /dev/null +++ b/week4/community-contributions/ai_docstring_generator/docstring_generator.ipynb @@ -0,0 +1,558 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "7d11beae-8892-4777-924d-6a3a4ea85f7b", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import anthropic\n", + "import google.generativeai as genai\n", + "import gradio as gr" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea3021fa-8281-44da-ae5c-c737c92b6700", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv()\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['GOOGLE_API_KEY'] = os.getenv('GOOGLE_API_KEY', 'your-key-if-not-using-env')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36a120da-8481-47fc-9f1c-a32664ed61fa", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize clients\n", + "openai_client = OpenAI()\n", + "try:\n", + " claude_client = anthropic.Anthropic(api_key=os.environ['ANTHROPIC_API_KEY'])\n", + "except TypeError:\n", + " # Fallback for older anthropic versions\n", + " claude_client = anthropic.Client(api_key=os.environ['ANTHROPIC_API_KEY'])\n", + "genai.configure(api_key=os.environ['GOOGLE_API_KEY'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c9100f0b-5ae3-48db-858f-2e4010caad08", + "metadata": {}, + "outputs": [], + "source": [ + "# Model configurations\n", + "OPENAI_MODEL = \"gpt-4o-mini\"\n", + "CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\"\n", + "GEMINI_MODEL = \"gemini-2.0-flash-exp\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7141fd87-c17c-4e48-bcb0-8a7cabf1947f", + "metadata": {}, + "outputs": [], + "source": [ + "def system_prompt_for_docstring(language):\n", + " \"\"\"\n", + " Generate system prompt for docstring generation based on programming language.\n", + " \n", + " Args:\n", + " language (str): Programming language (python, javascript, java, etc.)\n", + " \n", + " Returns:\n", + " str: System prompt tailored for the specified language\n", + " \"\"\"\n", + " prompts = {\n", + " \"python\": \"\"\"\n", + " You are a Python documentation expert. When writing documentation:\n", + " - Follow PEP 257 and Google docstring style guidelines\n", + " - Write clear, concise explanations\n", + " - Include practical examples when helpful\n", + " - Highlight edge cases and limitations\n", + " - Use type hints in docstrings\n", + " - Add inline comments only for complex logic\n", + " - Never skip documenting parameters or return values\n", + " - Validate that all documentation is accurate and complete\n", + " \"\"\",\n", + " \"javascript\": \"\"\"\n", + " You are a JavaScript/TypeScript documentation expert. When writing documentation:\n", + " - Follow JSDoc standards\n", + " - Write clear, concise explanations\n", + " - Include type annotations\n", + " - Document parameters, return values, and exceptions\n", + " - Add inline comments for complex logic\n", + " - Use modern ES6+ syntax examples\n", + " \"\"\",\n", + " \"java\": \"\"\"\n", + " You are a Java documentation expert. When writing documentation:\n", + " - Follow Javadoc standards\n", + " - Write clear, concise explanations\n", + " - Document all public methods and classes\n", + " - Include @param, @return, and @throws tags\n", + " - Add inline comments for complex logic\n", + " \"\"\",\n", + " \"cpp\": \"\"\"\n", + " You are a C++ documentation expert. When writing documentation:\n", + " - Follow Doxygen standards\n", + " - Write clear, concise explanations\n", + " - Document parameters, return values, and exceptions\n", + " - Add inline comments for complex logic and memory management\n", + " \"\"\",\n", + " \"go\": \"\"\"\n", + " You are a Go documentation expert. When writing documentation:\n", + " - Follow Go documentation conventions\n", + " - Write clear, concise explanations\n", + " - Document exported functions and types\n", + " - Add inline comments for complex logic\n", + " \"\"\",\n", + " \"rust\": \"\"\"\n", + " You are a Rust documentation expert. When writing documentation:\n", + " - Follow Rust documentation conventions\n", + " - Write clear, concise explanations\n", + " - Document safety considerations\n", + " - Include examples in doc comments\n", + " \"\"\",\n", + " }\n", + " return prompts.get(language.lower(), prompts[\"python\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c535bfe3-30ec-4f07-ae2f-28d4db350c6b", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for_docstring(code, language):\n", + " \"\"\"\n", + " Generate user prompt for docstring generation request.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language of the code\n", + " \n", + " Returns:\n", + " str: Formatted user prompt\n", + " \"\"\"\n", + " return f\"\"\"\n", + " Please document this {language} code with comprehensive docstrings and comments:\n", + " \n", + " 1. Add docstrings containing:\n", + " - Clear description of purpose and functionality\n", + " - All parameters with types and descriptions\n", + " - Return values with types\n", + " - Exceptions that may be raised\n", + " - Any important notes or limitations\n", + " \n", + " 2. Add strategic inline comments for:\n", + " - Complex algorithms or business logic\n", + " - Non-obvious implementation choices\n", + " - Performance considerations\n", + " - Edge cases\n", + " \n", + " Return ONLY the documented code, no explanations before or after.\n", + " \n", + " Here's the code to document:\n", + " \n", + "{code}\n", + " \"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb4b2fe0-81fa-48f2-bfb8-f4503f7b1b14", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_docstring_gpt(code, language):\n", + " \"\"\"\n", + " Generate docstrings using OpenAI GPT model with streaming.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language\n", + " \n", + " Yields:\n", + " str: Progressively generated documented code\n", + " \"\"\"\n", + " messages = [\n", + " {\"role\": \"system\", \"content\": system_prompt_for_docstring(language)},\n", + " {\"role\": \"user\", \"content\": user_prompt_for_docstring(code, language)}\n", + " ]\n", + " \n", + " stream = openai_client.chat.completions.create(\n", + " model=OPENAI_MODEL,\n", + " messages=messages,\n", + " stream=True,\n", + " temperature=0.3\n", + " )\n", + " \n", + " reply = \"\"\n", + " for chunk in stream:\n", + " fragment = chunk.choices[0].delta.content or \"\"\n", + " reply += fragment\n", + " yield reply.replace('```python', '').replace('```javascript', '').replace('```java', '').replace('```', '')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f20d8bb6-b2e6-407b-823f-03eb09b6558a", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_docstring_claude(code, language):\n", + " \"\"\"\n", + " Generate docstrings using Anthropic Claude model with streaming.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language\n", + " \n", + " Yields:\n", + " str: Progressively generated documented code\n", + " \"\"\"\n", + " result = claude_client.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=4096,\n", + " system=system_prompt_for_docstring(language),\n", + " messages=[{\"role\": \"user\", \"content\": user_prompt_for_docstring(code, language)}],\n", + " temperature=0.3\n", + " )\n", + " \n", + " reply = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " reply += text\n", + " yield reply.replace('```python', '').replace('```javascript', '').replace('```java', '').replace('```', '')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe6f7795-04ee-4c79-b5e6-da4a338547fa", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_docstring_gemini(code, language):\n", + " \"\"\"\n", + " Generate docstrings using Google Gemini model with streaming.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language\n", + " \n", + " Yields:\n", + " str: Progressively generated documented code\n", + " \"\"\"\n", + " model = genai.GenerativeModel(GEMINI_MODEL)\n", + " \n", + " prompt = f\"{system_prompt_for_docstring(language)}\\n\\n{user_prompt_for_docstring(code, language)}\"\n", + " \n", + " response = model.generate_content(\n", + " prompt,\n", + " stream=True,\n", + " generation_config=genai.types.GenerationConfig(\n", + " temperature=0.3,\n", + " max_output_tokens=4096\n", + " )\n", + " )\n", + " \n", + " reply = \"\"\n", + " for chunk in response:\n", + " if chunk.text:\n", + " reply += chunk.text\n", + " yield reply.replace('```python', '').replace('```javascript', '').replace('```java', '').replace('```', '')\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e99d0539-a92b-4ccd-a011-0d5f211aac4a", + "metadata": {}, + "outputs": [], + "source": [ + "def generate_docstring(code, language, model):\n", + " \"\"\"\n", + " Main function to generate docstrings using selected AI model.\n", + " \n", + " Args:\n", + " code (str): Source code to document\n", + " language (str): Programming language\n", + " model (str): AI model to use (GPT, Claude, or Gemini)\n", + " \n", + " Yields:\n", + " str: Progressively generated documented code\n", + " \n", + " Raises:\n", + " ValueError: If unknown model is specified\n", + " \"\"\"\n", + " if not code.strip():\n", + " yield \"Please enter some code to document.\"\n", + " return\n", + " \n", + " try:\n", + " if model == \"GPT\":\n", + " result = stream_docstring_gpt(code, language)\n", + " elif model == \"Claude\":\n", + " result = stream_docstring_claude(code, language)\n", + " elif model == \"Gemini\":\n", + " result = stream_docstring_gemini(code, language)\n", + " else:\n", + " raise ValueError(f\"Unknown model: {model}\")\n", + " \n", + " for stream_so_far in result:\n", + " yield stream_so_far\n", + " except Exception as e:\n", + " yield f\"Error: {str(e)}\\n\\nPlease check your API keys in .env file.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6e691c2c-9b6b-4ee3-9183-234079ca5c0a", + "metadata": {}, + "outputs": [], + "source": [ + "# Example code for testing\n", + "EXAMPLE_PYTHON_CODE = \"\"\"\n", + "def calculate_pi(iterations, param1, param2):\n", + " result = 1.0\n", + " for i in range(1, iterations+1):\n", + " j = i * param1 - param2\n", + " result -= (1/j)\n", + " j = i * param1 + param2\n", + " result += (1/j)\n", + " return result\n", + "\n", + "class DataProcessor:\n", + " def __init__(self, data):\n", + " self.data = data\n", + " self.processed = False\n", + " \n", + " def process(self, threshold=0.5):\n", + " if not self.data:\n", + " raise ValueError(\"No data to process\")\n", + " result = [x for x in self.data if x > threshold]\n", + " self.processed = True\n", + " return result\n", + "\"\"\"\n", + "\n", + "EXAMPLE_JAVASCRIPT_CODE = \"\"\"\n", + "function calculateSum(numbers) {\n", + " return numbers.reduce((acc, num) => acc + num, 0);\n", + "}\n", + "\n", + "class UserManager {\n", + " constructor(users) {\n", + " this.users = users;\n", + " }\n", + " \n", + " findByAge(minAge, maxAge) {\n", + " return this.users.filter(user => \n", + " user.age >= minAge && user.age <= maxAge\n", + " );\n", + " }\n", + "}\n", + "\"\"\"\n", + "\n", + "EXAMPLE_JAVA_CODE = \"\"\"\n", + "public class Calculator {\n", + " private double result;\n", + " \n", + " public Calculator() {\n", + " this.result = 0.0;\n", + " }\n", + " \n", + " public double add(double a, double b) {\n", + " result = a + b;\n", + " return result;\n", + " }\n", + " \n", + " public double divide(double a, double b) {\n", + " if (b == 0) {\n", + " throw new ArithmeticException(\"Division by zero\");\n", + " }\n", + " result = a / b;\n", + " return result;\n", + " }\n", + "}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80f0891b-ce44-45c5-916c-f108b09ee912", + "metadata": {}, + "outputs": [], + "source": [ + "# Custom CSS for better UI\n", + "css = \"\"\"\n", + ".code-input textarea, .code-output textarea {\n", + " font-family: 'Courier New', monospace;\n", + " font-size: 14px;\n", + "}\n", + ".header {\n", + " text-align: center;\n", + " padding: 20px;\n", + " background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);\n", + " color: white;\n", + " border-radius: 10px;\n", + " margin-bottom: 20px;\n", + "}\n", + "\"\"\"\n", + "\n", + "# Create Gradio interface\n", + "with gr.Blocks(css=css, theme=gr.themes.Soft()) as ui:\n", + " gr.Markdown(\"\"\"\n", + "
\n", + "

🚀 AI Docstring Generator

\n", + "

Automatically generate comprehensive docstrings and comments for your code

\n", + "
\n", + " \"\"\")\n", + " \n", + " with gr.Row():\n", + " with gr.Column(scale=1):\n", + " gr.Markdown(\"### ⚙️ Configuration\")\n", + " language_dropdown = gr.Dropdown(\n", + " choices=[\"Python\", \"JavaScript\", \"Java\", \"C++\", \"Go\", \"Rust\"],\n", + " label=\"Programming Language\",\n", + " value=\"Python\"\n", + " )\n", + " model_dropdown = gr.Dropdown(\n", + " choices=[\"GPT\", \"Claude\", \"Gemini\"],\n", + " label=\"AI Model\",\n", + " value=\"GPT\",\n", + " info=\"Select which AI model to use\"\n", + " )\n", + " \n", + " gr.Markdown(\"### 📝 Examples\")\n", + " example_dropdown = gr.Dropdown(\n", + " choices=[\"Python Example\", \"JavaScript Example\", \"Java Example\", \"Custom\"],\n", + " label=\"Load Example\",\n", + " value=\"Python Example\"\n", + " )\n", + " \n", + " with gr.Row():\n", + " with gr.Column(scale=1):\n", + " gr.Markdown(\"### 📥 Input Code\")\n", + " code_input = gr.Textbox(\n", + " label=\"Paste your code here\",\n", + " value=EXAMPLE_PYTHON_CODE,\n", + " lines=20,\n", + " placeholder=\"Enter your code...\",\n", + " elem_classes=\"code-input\"\n", + " )\n", + " generate_btn = gr.Button(\"✨ Generate Docstrings\", variant=\"primary\", size=\"lg\")\n", + " \n", + " with gr.Column(scale=1):\n", + " gr.Markdown(\"### 📤 Documented Code\")\n", + " code_output = gr.Textbox(\n", + " label=\"Generated code with docstrings\",\n", + " lines=20,\n", + " elem_classes=\"code-output\"\n", + " )\n", + " \n", + " gr.Markdown(\"\"\"\n", + " ### 📚 Instructions:\n", + " 1. Select your programming language\n", + " 2. Choose an AI model (GPT, Claude, or Gemini)\n", + " 3. Paste your code or select an example\n", + " 4. Click \"Generate Docstrings\"\n", + " 5. Copy the documented code\n", + " \n", + " **Note:** Make sure to set up your API keys in a `.env` file:\n", + " ```\n", + " OPENAI_API_KEY=your_openai_key\n", + " ANTHROPIC_API_KEY=your_anthropic_key\n", + " GOOGLE_API_KEY=your_google_key\n", + " ```\n", + " \"\"\")\n", + " \n", + " # Event handlers\n", + " def load_example(example_name):\n", + " examples = {\n", + " \"Python Example\": EXAMPLE_PYTHON_CODE,\n", + " \"JavaScript Example\": EXAMPLE_JAVASCRIPT_CODE,\n", + " \"Java Example\": EXAMPLE_JAVA_CODE,\n", + " \"Custom\": \"\"\n", + " }\n", + " return examples.get(example_name, \"\")\n", + " \n", + " example_dropdown.change(\n", + " fn=load_example,\n", + " inputs=[example_dropdown],\n", + " outputs=[code_input]\n", + " )\n", + " \n", + " generate_btn.click(\n", + " fn=generate_docstring,\n", + " inputs=[code_input, language_dropdown, model_dropdown],\n", + " outputs=[code_output]\n", + " )\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2e8041f-c330-4a66-9ba8-45a4edabb599", + "metadata": {}, + "outputs": [], + "source": [ + "# Launch the interface\n", + "\n", + "ui.launch(\n", + " inbrowser=True,\n", + " share=False,\n", + " # server_name=\"0.0.0.0\",\n", + " # server_port=7860\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "564dba13-f807-4eb5-aa7c-636f9a7cb286", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week4/community-contributions/ai_docstring_generator/requirements.txt b/week4/community-contributions/ai_docstring_generator/requirements.txt new file mode 100644 index 0000000..f5852f4 --- /dev/null +++ b/week4/community-contributions/ai_docstring_generator/requirements.txt @@ -0,0 +1,12 @@ +# Core dependencies +openai>=1.12.0 +anthropic>=0.18.0 +google-generativeai>=0.3.0 +gradio>=4.0.0 +python-dotenv>=1.0.0 + +# HTTP client (required by anthropic) +httpx>=0.24.0 + +# Optional: For better performance +aiohttp>=3.9.0 \ No newline at end of file diff --git a/week4/community-contributions/max.solo23/convert_python_to_c++.ipynb b/week4/community-contributions/max.solo23/convert_python_to_c++.ipynb new file mode 100644 index 0000000..390f446 --- /dev/null +++ b/week4/community-contributions/max.solo23/convert_python_to_c++.ipynb @@ -0,0 +1,870 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "a389285f-5e8e-46ec-bcae-9b159ef7aa80", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import io\n", + "\n", + "import sys\n", + "from dotenv import load_dotenv\n", + "from openai import OpenAI\n", + "import google.generativeai\n", + "import anthropic\n", + "from IPython.display import Markdown, display, update_display\n", + "import gradio as gr\n", + "import subprocess" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "850164b8-5bab-402f-9e80-d251930d9017", + "metadata": {}, + "outputs": [], + "source": [ + "load_dotenv()\n", + "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", + "os.environ['ANTHROPIC_API_KEY'] = os.getenv('ANTHROPIC_API_KEY', 'your-key-if-not-using-env')" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "id": "87650c13-e987-42a2-9089-23827bc81ffb", + "metadata": {}, + "outputs": [], + "source": [ + "openai = OpenAI()\n", + "claude = anthropic.Anthropic()\n", + "OPENAI_MODEL = \"gpt-5-nano\"\n", + "CLAUDE_MODEL = \"claude-3-5-sonnet-20240620\" # CLAUDE_MODEL = \"claude-3-haiku-20240307\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "9d359c39-4eab-478c-a208-08a38b799093", + "metadata": {}, + "outputs": [], + "source": [ + "system_message = \"You are an assistant that reimplements Python code in high performance C++ for an windows 10. \"\n", + "system_message += \"Respond only with C++ code; use comments sparingly and do not provide any explanation other than occasional comments. \"\n", + "system_message += \"The C++ response needs to produce an identical output in the fastest possible time.\"" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "1d8da092-6c5c-4c42-aed9-dc219fdd97bb", + "metadata": {}, + "outputs": [], + "source": [ + "def user_prompt_for(python):\n", + " user_prompt = \"Rewrite this Python code in C++ with the fastest possible implementation that produces identical output in the least time. Add a pause in the end of the code so it waits button press after execution. \"\n", + " user_prompt += \"Respond only with C++ code; do not explain your work other than a few comments. \"\n", + " user_prompt += \"Pay attention to number types to ensure no int overflows. Remember to #include all necessary C++ packages such as iomanip.\\n\\n\"\n", + " user_prompt += python\n", + " return user_prompt" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "be49c7dc-5c82-468e-b41e-7fca27fbb2e0", + "metadata": {}, + "outputs": [], + "source": [ + "def messages_for(python):\n", + " return [\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": user_prompt_for(python)}\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "afeded80-a4a0-4349-89c3-2e9370730e92", + "metadata": {}, + "outputs": [], + "source": [ + "def write_output(cpp):\n", + " code = cpp.replace(\"```cpp\",\"\").replace(\"```\",\"\")\n", + " with open(\"optimized.cpp\", \"w\") as f:\n", + " f.write(code)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "6a53a889-9146-4049-850d-9b44d7245b8b", + "metadata": {}, + "outputs": [], + "source": [ + "def optimize_gpt(python): \n", + " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n", + " reply = \"\"\n", + " for chunk in stream:\n", + " fragment = chunk.choices[0].delta.content or \"\"\n", + " reply += fragment\n", + " print(fragment, end='', flush=True)\n", + " write_output(reply)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "3d31f2e4-2bcb-4d9a-bb73-6979784c9234", + "metadata": {}, + "outputs": [], + "source": [ + "def optimize_claude(python):\n", + " result = claude.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=2000,\n", + " system=system_message,\n", + " messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n", + " )\n", + " reply = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " reply += text\n", + " print(text, end=\"\", flush=True)\n", + " write_output(reply)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "bff0c7f1-6402-46a7-940d-5233e93d1089", + "metadata": {}, + "outputs": [], + "source": [ + "pi = \"\"\"\n", + "import time\n", + "\n", + "def calculate(iterations, param1, param2):\n", + " result = 1.0\n", + " for i in range(1, iterations+1):\n", + " j = i * param1 - param2\n", + " result -= (1/j)\n", + " j = i * param1 + param2\n", + " result += (1/j)\n", + " return result\n", + "\n", + "start_time = time.time()\n", + "result = calculate(100_000_000, 4, 1) * 4\n", + "end_time = time.time()\n", + "\n", + "print(f\"Result: {result:.12f}\")\n", + "print(f\"Execution Time: {(end_time - start_time):.6f} seconds\")\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "b878f33e-9008-496d-b8f6-f844c22e6a04", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Result: 3.141592658589\n", + "Execution Time: 9.382045 seconds\n" + ] + } + ], + "source": [ + "exec(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "af8bc910-9136-4305-a1a8-a47fa0566505", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "#include \n", + "#include \n", + "#include \n", + "\n", + "int main() {\n", + " const long long iterations = 100000000LL;\n", + " const double param1 = 4.0;\n", + " const double param2 = 1.0;\n", + "\n", + " double result = 1.0;\n", + "\n", + " auto start = std::chrono::high_resolution_clock::now();\n", + " for (long long i = 1; i <= iterations; ++i) {\n", + " double j = i * param1 - param2;\n", + " result -= 1.0 / j;\n", + " j = i * param1 + param2;\n", + " result += 1.0 / j;\n", + " }\n", + " auto end = std::chrono::high_resolution_clock::now();\n", + "\n", + " double final_result = result * 4.0;\n", + "\n", + " std::cout.setf(std::ios::fixed);\n", + " std::cout << std::setprecision(12);\n", + " std::cout << \"Result: \" << final_result << \"\\n\";\n", + "\n", + " std::chrono::duration elapsed = end - start;\n", + " std::cout << std::setprecision(6);\n", + " std::cout << \"Execution Time: \" << elapsed.count() << \" seconds\" << std::endl;\n", + "\n", + " return 0;\n", + "}" + ] + } + ], + "source": [ + "optimize_gpt(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "8d37469c-ab26-452f-8efb-e1b65f842f90", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Result: 3.141592658589\n", + "Execution Time: 9.346793 seconds\n" + ] + } + ], + "source": [ + "exec(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "1164d9cc-7ad8-4e5c-98bb-a186ad23f4d7", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\".\" non e riconosciuto come comando interno o esterno,\n", + " un programma eseguibile o un file batch.\n" + ] + } + ], + "source": [ + "!g++ -O2 -std=c++17 optimized.cpp -o optimized\n", + "!./optimized" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "d4b501d0-3707-4c07-95c7-7f6fe389859c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "```cpp\n", + " \n", + "omanip>e lude \n", + "\n", + " calculate(long long int iterations, double param1, double param2) {\n", + " result = 1.0;\n", + " long int i = 1; i <= iterations; i++) {\n", + "double j = i * param1 - param2;\n", + " (1.0 / j);ult -=\n", + " * param1 + param2;\n", + " (1.0 / j);ult +=\n", + " }\n", + " return result;\n", + "}\n", + "\n", + " main() {\n", + " start_time = std::chrono::high_resolution_clock::now();\n", + " result = calculate(100000000, 4, 1) * 4;\n", + "d_time = std::chrono::high_resolution_clock::now();\n", + "\n", + " << std::fixed << std::setprecision(12) << \"Result: \" << result << std::endl;\n", + "cout << \"Execution Time: \" << std::chrono::duration_cast>(end_time - start_time).count() << \" seconds\" << std::endl;\n", + "\n", + "d::cout << \"Press any key to exit...\" << std::endl;\n", + "_getch();\n", + "; return 0\n", + "}\n", + "```" + ] + } + ], + "source": [ + "optimize_claude(pi)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "c81bf6fc-27b7-4f42-a2c9-42eff17b6e41", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\".\" non e riconosciuto come comando interno o esterno,\n", + " un programma eseguibile o un file batch.\n" + ] + } + ], + "source": [ + "!g++ -O2 -std=c++17 optimized.cpp -o optimized\n", + "!./optimized\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "a5e8e6f5-50d4-4c37-8a10-dd07fbd24089", + "metadata": {}, + "outputs": [], + "source": [ + "python_hard = \"\"\"# Be careful to support large number sizes\n", + "\n", + "def lcg(seed, a=1664525, c=1013904223, m=2**32):\n", + " value = seed\n", + " while True:\n", + " value = (a * value + c) % m\n", + " yield value\n", + " \n", + "def max_subarray_sum(n, seed, min_val, max_val):\n", + " lcg_gen = lcg(seed)\n", + " random_numbers = [next(lcg_gen) % (max_val - min_val + 1) + min_val for _ in range(n)]\n", + " max_sum = float('-inf')\n", + " for i in range(n):\n", + " current_sum = 0\n", + " for j in range(i, n):\n", + " current_sum += random_numbers[j]\n", + " if current_sum > max_sum:\n", + " max_sum = current_sum\n", + " return max_sum\n", + "\n", + "def total_max_subarray_sum(n, initial_seed, min_val, max_val):\n", + " total_sum = 0\n", + " lcg_gen = lcg(initial_seed)\n", + " for _ in range(20):\n", + " seed = next(lcg_gen)\n", + " total_sum += max_subarray_sum(n, seed, min_val, max_val)\n", + " return total_sum\n", + "\n", + "# Parameters\n", + "n = 10000 # Number of random numbers\n", + "initial_seed = 42 # Initial seed for the LCG\n", + "min_val = -10 # Minimum value of random numbers\n", + "max_val = 10 # Maximum value of random numbers\n", + "\n", + "# Timing the function\n", + "import time\n", + "start_time = time.time()\n", + "result = total_max_subarray_sum(n, initial_seed, min_val, max_val)\n", + "end_time = time.time()\n", + "\n", + "print(\"Total Maximum Subarray Sum (20 runs):\", result)\n", + "print(\"Execution Time: {:.6f} seconds\".format(end_time - start_time))\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "cd8a29a9-94fa-43f5-ae4d-517182cfd218", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Total Maximum Subarray Sum (20 runs): 10980\n", + "Execution Time: 34.608083 seconds\n" + ] + } + ], + "source": [ + "exec(python_hard)" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "id": "39c64322-ef0f-4d58-a54e-c31077ceadb9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "#include \n", + "#include \n", + "#include \n", + "#include \n", + "#include \n", + "\n", + "static inline uint32_t lcg_next(uint32_t value) {\n", + " const uint64_t a = 1664525ULL;\n", + " const uint64_t c = 1013904223ULL;\n", + " const uint64_t m = 0x100000000ULL; // 2^32\n", + " uint64_t t = a * value + c;\n", + " t %= m;\n", + " return static_cast(t);\n", + "}\n", + "\n", + "static inline int64_t max_subarray_sum_with_seed(uint32_t seed, int n, int min_val, int max_val) {\n", + " int range = max_val - min_val + 1;\n", + " int64_t max_ending_here = 0;\n", + " int64_t max_so_far = std::numeric_limits::min();\n", + " uint32_t v = seed;\n", + " bool started = false;\n", + " for (int i = 0; i < n; ++i) {\n", + " v = lcg_next(v);\n", + " int x = static_cast(v % range) + min_val;\n", + " if (!started) {\n", + " max_ending_here = x;\n", + " max_so_far = x;\n", + " started = true;\n", + " } else {\n", + " max_ending_here = (max_ending_here > 0) ? max_ending_here + x : x;\n", + " if (max_ending_here > max_so_far) max_so_far = max_ending_here;\n", + " }\n", + " }\n", + " return max_so_far;\n", + "}\n", + "\n", + "int main() {\n", + " const int n = 10000;\n", + " const uint32_t initial_seed = 42;\n", + " const int min_val = -10;\n", + " const int max_val = 10;\n", + "\n", + " auto start = std::chrono::high_resolution_clock::now();\n", + "\n", + " uint32_t seed = initial_seed;\n", + " long long total = 0;\n", + " for (int t = 0; t < 20; ++t) {\n", + " seed = lcg_next(seed);\n", + " total += max_subarray_sum_with_seed(seed, n, min_val, max_val);\n", + " }\n", + "\n", + " auto end = std::chrono::high_resolution_clock::now();\n", + " std::chrono::duration diff = end - start;\n", + "\n", + " std::cout << \"Total Maximum Subarray Sum (20 runs): \" << total << \"\\n\";\n", + " std::cout << std::fixed << std::setprecision(6);\n", + " std::cout << \"Execution Time: \" << diff.count() << \" seconds\" << std::endl;\n", + "\n", + " std::cout << \"Press Enter to exit...\";\n", + " std::cin.ignore(std::numeric_limits::max(), '\\n');\n", + " std::cin.get();\n", + "\n", + " return 0;\n", + "}" + ] + } + ], + "source": [ + "optimize_gpt(python_hard)" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "id": "922f484e-4e16-4ca6-b80d-0736972e18f5", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\".\" non e riconosciuto come comando interno o esterno,\n", + " un programma eseguibile o un file batch.\n" + ] + } + ], + "source": [ + "!g++ -O2 -std=c++17 optimized.cpp -o optimized\n", + "!./optimized" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "id": "38009c18-7496-4d55-bb0a-bfcc7c6a430e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "#include \n", + "#include \n", + "ono>lude \n", + ">include e clude e random_numbers(n);\n", + " (int i = 0; i < n; ++i) {\n", + "[i] = lcg.next() % (max_val - min_val + 1) + min_val;\n", + " }\n", + "\n", + "t max_sum = std::numeric_limits::min();\n", + "int64_t current_sum = 0;\n", + " = 0; i < n; ++i) {\n", + "_sum = std::max(current_sum + random_numbers[i], random_numbers[i]);\n", + " = std::max(max_sum, current_sum);\n", + "} \n", + "; return max_sum\n", + "}\n", + "\n", + "64_t total_max_subarray_sum(int n, uint64_t initial_seed, int min_val, int max_val) {\n", + " total_sum = 0;\n", + "CG lcg(initial_seed);\n", + " = 0; i < 20; ++i) {\n", + "uint64_t seed = lcg.next();\n", + "sum += max_subarray_sum(n, seed, min_val, max_val);\n", + " }\n", + "; return total_sum\n", + "}\n", + "\n", + " main() {\n", + "const int n = 10000;\n", + " uint64_t initial_seed = 42;\n", + " min_val = -10;\n", + " int max_val = 10;\n", + "\n", + "auto start_time = std::chrono::high_resolution_clock::now();\n", + "int64_t result = total_max_subarray_sum(n, initial_seed, min_val, max_val);\n", + " = std::chrono::high_resolution_clock::now();\n", + "\n", + " duration = std::chrono::duration_cast(end_time - start_time);\n", + "\n", + "< \"Total Maximum Subarray Sum (20 runs): \" << result << std::endl;\n", + " << \"Execution Time: \" << std::fixed << std::setprecision(6) << duration.count() / 1e6 << \" seconds\" << std::endl;\n", + "\n", + "d::cout << \"Press any key to continue...\";\n", + "etch();\n", + "\n", + " 0; return\n", + "}" + ] + } + ], + "source": [ + "optimize_claude(python_hard)" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "id": "9012f543-ab06-4d7c-bf5f-250f4a6c43bd", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\".\" non e riconosciuto come comando interno o esterno,\n", + " un programma eseguibile o un file batch.\n" + ] + } + ], + "source": [ + "!g++ -O2 -std=c++17 optimized.cpp -o optimized\n", + "!./optimized" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "id": "3a1e4027-3309-48d5-9387-a8b309a325bf", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_gpt(python): \n", + " stream = openai.chat.completions.create(model=OPENAI_MODEL, messages=messages_for(python), stream=True)\n", + " reply = \"\"\n", + " for chunk in stream:\n", + " fragment = chunk.choices[0].delta.content or \"\"\n", + " reply += fragment\n", + " yield reply.replace('```cpp\\n','').replace('```','')" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "id": "bcfa7016-ce17-4a8d-aa43-9acdd884159e", + "metadata": {}, + "outputs": [], + "source": [ + "def stream_claude(python):\n", + " result = claude.messages.stream(\n", + " model=CLAUDE_MODEL,\n", + " max_tokens=2000,\n", + " system=system_message,\n", + " messages=[{\"role\": \"user\", \"content\": user_prompt_for(python)}],\n", + " )\n", + " reply = \"\"\n", + " with result as stream:\n", + " for text in stream.text_stream:\n", + " reply += text\n", + " yield reply.replace('```cpp\\n','').replace('```','')" + ] + }, + { + "cell_type": "code", + "execution_count": 61, + "id": "56790d11-d24d-40ab-8f1a-5283726b5764", + "metadata": {}, + "outputs": [], + "source": [ + "def optimize(python, model):\n", + " if model==\"GPT\":\n", + " result = stream_gpt(python)\n", + " elif model==\"Claude\":\n", + " result = stream_claude(python)\n", + " else:\n", + " raise ValueError(\"Unknown model\")\n", + " for stream_so_far in result:\n", + " yield stream_so_far " + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "id": "6d7c83c0-e239-44df-b977-3fb6a2398b6a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7860\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 63, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "with gr.Blocks() as ui:\n", + " with gr.Row():\n", + " python = gr.Textbox(label=\"Python code: \", lines=10, value=python_hard)\n", + " cpp = gr.Textbox(label=\"C++ code: \", lines=10)\n", + " with gr.Row():\n", + " model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n", + " convert = gr.Button(\"Convert code\")\n", + "\n", + " convert.click(optimize, inputs=[python, model], outputs=[cpp])\n", + "ui.launch(inbrowser=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "id": "d9156094-3e06-4c3a-9ede-3e6f9bf94de7", + "metadata": {}, + "outputs": [], + "source": [ + "def execute_python(code):\n", + " try:\n", + " output = io.StringIO()\n", + " sys.stdout = output\n", + " exec(code)\n", + " finally:\n", + " sys.stdout = sys.__stdout__\n", + " return output.getvalue()" + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "id": "4da83af8-c8e1-474d-b954-d07957f55c37", + "metadata": {}, + "outputs": [], + "source": [ + "def execute_cpp(code):\n", + " write_output(code)\n", + " try:\n", + " compile_cmd = [\"g++\", \"-O2\", \"-std=c++17\", \"optimized.cpp\", \"-o\", \"optimized\"]\n", + " compile_result = subprocess.run(compile_cmd, check=True, text=True, capture_output=True)\n", + " run_cmd = [\"./optimized\"]\n", + " run_result = subprocess.run(run_cmd, check=True, text=True, capture_output=True)\n", + " return run_result.stdout\n", + " except subprocess.CalledProcessError as e:\n", + " return f\"An error occurred:\\n{e.stderr}\"" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "id": "55f3f532-aa10-4c91-9d22-e7463f9a646b", + "metadata": {}, + "outputs": [], + "source": [ + "# !g++ -O2 -std=c++17 optimized.cpp -o optimized\n", + "# !./optimized" + ] + }, + { + "cell_type": "code", + "execution_count": 67, + "id": "56f3ba77-a339-48f2-bd81-44cdd90c9458", + "metadata": {}, + "outputs": [], + "source": [ + "css = \"\"\"\n", + ".python {background-color: #306998;}\n", + ".cpp {background-color: #050;}\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 68, + "id": "38c17a7a-45c5-4656-99ef-556f7d1a909b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7861\n", + "\n", + "To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 68, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "with gr.Blocks(css=css) as ui:\n", + " gr.Markdown(\"## Convert code from Python to C++\")\n", + " with gr.Row():\n", + " python = gr.Textbox(label=\"Python code:\", value=python_hard, lines=10)\n", + " cpp = gr.Textbox(label=\"C++ code:\", lines=10)\n", + " with gr.Row():\n", + " model = gr.Dropdown([\"GPT\", \"Claude\"], label=\"Select model\", value=\"GPT\")\n", + " with gr.Row():\n", + " convert = gr.Button(\"Convert code\")\n", + " with gr.Row():\n", + " python_run = gr.Button(\"Run Python\")\n", + " cpp_run = gr.Button(\"Run C++\")\n", + " with gr.Row():\n", + " python_out = gr.TextArea(label=\"Python result:\", elem_classes=[\"python\"])\n", + " cpp_out = gr.TextArea(label=\"C++ result:\", elem_classes=[\"cpp\"])\n", + "\n", + " convert.click(optimize, inputs=[python, model], outputs=[cpp])\n", + " python_run.click(execute_python, inputs=[python], outputs=[python_out])\n", + " cpp_run.click(execute_cpp, inputs=[cpp], outputs=[cpp_out])\n", + "\n", + "ui.launch(inbrowser=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week4/community-contributions/max.solo23/optimized.cpp b/week4/community-contributions/max.solo23/optimized.cpp new file mode 100644 index 0000000..ed96ce5 --- /dev/null +++ b/week4/community-contributions/max.solo23/optimized.cpp @@ -0,0 +1,73 @@ +#include +#include +#include +#include +#include +#include + +class LCG { +private: + uint64_t value; + const uint64_t a = 1664525; + const uint64_t c = 1013904223; + const uint64_t m = 1ULL << 32; + +public: + LCG(uint64_t seed) : value(seed) {} + + uint64_t next() { + value = (a * value + c) % m; + return value; + } +}; + +int64_t max_subarray_sum(int n, uint64_t seed, int min_val, int max_val) { + LCG lcg(seed); + std::vector random_numbers(n); + for (int i = 0; i < n; ++i) { + random_numbers[i] = lcg.next() % (max_val - min_val + 1) + min_val; + } + + int64_t max_sum = std::numeric_limits::min(); + int64_t current_sum = 0; + int64_t min_sum = 0; + + for (int i = 0; i < n; ++i) { + current_sum += random_numbers[i]; + max_sum = std::max(max_sum, current_sum - min_sum); + min_sum = std::min(min_sum, current_sum); + } + + return max_sum; +} + +int64_t total_max_subarray_sum(int n, uint64_t initial_seed, int min_val, int max_val) { + int64_t total_sum = 0; + LCG lcg(initial_seed); + for (int i = 0; i < 20; ++i) { + uint64_t seed = lcg.next(); + total_sum += max_subarray_sum(n, seed, min_val, max_val); + } + return total_sum; +} + +int main() { + const int n = 10000; + const uint64_t initial_seed = 42; + const int min_val = -10; + const int max_val = 10; + + auto start_time = std::chrono::high_resolution_clock::now(); + int64_t result = total_max_subarray_sum(n, initial_seed, min_val, max_val); + auto end_time = std::chrono::high_resolution_clock::now(); + + auto duration = std::chrono::duration_cast(end_time - start_time); + + std::cout << "Total Maximum Subarray Sum (20 runs): " << result << std::endl; + std::cout << "Execution Time: " << std::fixed << std::setprecision(6) << duration.count() / 1e6 << " seconds" << std::endl; + + std::cout << "Press Enter to exit..."; + std::cin.get(); + + return 0; +} \ No newline at end of file diff --git a/week4/community-contributions/max.solo23/optimized.exe b/week4/community-contributions/max.solo23/optimized.exe new file mode 100644 index 0000000..c4bda34 Binary files /dev/null and b/week4/community-contributions/max.solo23/optimized.exe differ