diff --git a/week1/community-contributions/Invoke LLM model from AWS Bedrock.ipynb b/week1/community-contributions/Invoke LLM model from AWS Bedrock.ipynb new file mode 100644 index 0000000..6948253 --- /dev/null +++ b/week1/community-contributions/Invoke LLM model from AWS Bedrock.ipynb @@ -0,0 +1,167 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 4, + "id": "9138adfe-71b0-4db2-a08f-dd9e472fdd63", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import numpy as np\n", + "import boto3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15d71dd6-cc03-485e-8a34-7a33ed5dee0e", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "1358921d-173b-4d5d-828c-b6c3726a5eb3", + "metadata": {}, + "source": [ + "#### Connect to bedrock models" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "b3827087-182f-48be-8b59-b2741f8ded44", + "metadata": {}, + "outputs": [], + "source": [ + "import json" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "94c11534-6847-4e4a-b8e4-8066e0cc6aca", + "metadata": {}, + "outputs": [], + "source": [ + "# Use the Conversation API to send a text message to Amazon Nova.\n", + "\n", + "import boto3\n", + "from botocore.exceptions import ClientError\n", + "\n", + "# Create a Bedrock Runtime client in the AWS Region you want to use.\n", + "client = boto3.client(\"bedrock-runtime\", region_name=\"us-east-1\")\n", + "\n", + "# Set the model ID, e.g., Amazon Nova Lite.\n", + "model_id = \"amazon.nova-lite-v1:0\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9a8ad65f-abaa-475c-892c-2e2b4e668f5d", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "ac20bb00-e93f-4a95-a1de-dd2688bce591", + "metadata": {}, + "outputs": [], + "source": [ + "# Start a conversation with the user message.\n", + "user_message = \"\"\"\n", + "List the best parks to see in London with number of google ratings and value ie. 4.5 out of 5 etc. \n", + "Give number of ratings and give output in table form\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "a29f0055-48c4-4f25-b33f-cde1eaf755c5", + "metadata": {}, + "outputs": [], + "source": [ + "conversation = [\n", + " {\n", + " \"role\": \"user\",\n", + " \"content\": [{\"text\": user_message}],\n", + " }\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0e68b2d5-4d43-4b80-8574-d3c847b33661", + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " # Send the message to the model, using a basic inference configuration.\n", + " response = client.converse(\n", + " modelId=model_id,\n", + " messages=conversation,\n", + " inferenceConfig={\"maxTokens\": 512, \"temperature\": 0.5, \"topP\": 0.9},\n", + " )\n", + "\n", + " # Extract and print the response text.\n", + " response_text = response[\"output\"][\"message\"][\"content\"][0][\"text\"]\n", + " print(response_text)\n", + "\n", + "except (ClientError, Exception) as e:\n", + " print(f\"ERROR: Can't invoke '{model_id}'. Reason: {e}\")\n", + " exit(1)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ed16ee7-3f09-4780-8dfc-d1c5f3cffdbe", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7f8c7a18-0907-430d-bfe7-86ecb8933bfd", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2183994b-cde5-45b0-b18b-37be3277d73b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/week1/community-contributions/day2 EXERCISE_priithvi.ipynb b/week1/community-contributions/day2 EXERCISE_priithvi.ipynb new file mode 100644 index 0000000..3542cb2 --- /dev/null +++ b/week1/community-contributions/day2 EXERCISE_priithvi.ipynb @@ -0,0 +1,1029 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9", + "metadata": {}, + "source": [ + "# Welcome to your first assignment!\n", + "\n", + "Instructions are below. Please give this a try, and look in the solutions folder if you get stuck (or feel free to ask me!)" + ] + }, + { + "cell_type": "markdown", + "id": "ada885d9-4d42-4d9b-97f0-74fbbbfe93a9", + "metadata": {}, + "source": [ + "\n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " \n", + " \n", + "

Just before we get to the assignment --

\n", + " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.
\n", + " https://edwarddonner.com/2024/11/13/llm-engineering-resources/
\n", + " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n", + "
\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "6e9fa1fc-eac5-4d1d-9be4-541b3f2b3458", + "metadata": {}, + "source": [ + "# HOMEWORK EXERCISE ASSIGNMENT\n", + "\n", + "Upgrade the day 1 project to summarize a webpage to use an Open Source model running locally via Ollama rather than OpenAI\n", + "\n", + "You'll be able to use this technique for all subsequent projects if you'd prefer not to use paid APIs.\n", + "\n", + "**Benefits:**\n", + "1. No API charges - open-source\n", + "2. Data doesn't leave your box\n", + "\n", + "**Disadvantages:**\n", + "1. Significantly less power than Frontier Model\n", + "\n", + "## Recap on installation of Ollama\n", + "\n", + "Simply visit [ollama.com](https://ollama.com) and install!\n", + "\n", + "Once complete, the ollama server should already be running locally. \n", + "If you visit: \n", + "[http://localhost:11434/](http://localhost:11434/)\n", + "\n", + "You should see the message `Ollama is running`. \n", + "\n", + "If not, bring up a new Terminal (Mac) or Powershell (Windows) and enter `ollama serve` \n", + "And in another Terminal (Mac) or Powershell (Windows), enter `ollama pull llama3.2` \n", + "Then try [http://localhost:11434/](http://localhost:11434/) again.\n", + "\n", + "If Ollama is slow on your machine, try using `llama3.2:1b` as an alternative. Run `ollama pull llama3.2:1b` from a Terminal or Powershell, and change the code below from `MODEL = \"llama3.2\"` to `MODEL = \"llama3.2:1b\"`" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd", + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "\n", + "import requests\n", + "from bs4 import BeautifulSoup\n", + "from IPython.display import Markdown, display" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "29ddd15d-a3c5-4f4e-a678-873f56162724", + "metadata": {}, + "outputs": [], + "source": [ + "# Constants\n", + "\n", + "OLLAMA_API = \"http://localhost:11434/api/chat\"\n", + "HEADERS = {\"Content-Type\": \"application/json\"}\n", + "MODEL = \"llama3.2\"\n", + "MODEL = \"tinyllama:latest\"" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "dac0a679-599c-441f-9bf2-ddc73d35b940", + "metadata": {}, + "outputs": [], + "source": [ + "# Create a messages list using the same format that we used for OpenAI\n", + "\n", + "messages = [\n", + " {\"role\": \"user\", \"content\": \"Summarize this website: cnn.com\"}\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "7bb9c624-14f0-4945-a719-8ddb64f66f47", + "metadata": {}, + "outputs": [], + "source": [ + "payload = {\n", + " \"model\": MODEL,\n", + " \"messages\": messages,\n", + " \"stream\": False\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "479ff514-e8bd-4985-a572-2ea28bb4fa40", + "metadata": {}, + "outputs": [], + "source": [ + "# Let's just make sure the model is loaded\n", + "\n", + "# !ollama pull llama3.2" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "42b9f644-522d-4e05-a691-56e7658c0ea9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "This website provides up-to-date and comprehensive news, analysis, and opinion articles from CNN on a variety of topics, including politics, business, entertainment, sports, and international affairs. It offers a personalized feed based on your interests and browsing history to provide you with relevant content tailored to your preferences.\n" + ] + } + ], + "source": [ + "# If this doesn't work for any reason, try the 2 versions in the following cells\n", + "# And double check the instructions in the 'Recap on installation of Ollama' at the top of this lab\n", + "# And if none of that works - contact me!\n", + "\n", + "response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)\n", + "print(response.json()['message']['content'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d042059-333e-4723-a48c-8a1a71fd6aab", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "99d0c1a2-52b2-4cb3-9d67-6d5931847f8c", + "metadata": {}, + "outputs": [], + "source": [ + "response = requests.post(OLLAMA_API, json = payload, headers = HEADERS)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "e8bd28b9-545b-4806-8a25-93b0208b7939", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The capital of France is Paris, and the current name was adopted in 1968. The previous names include:\n", + "\n", + "1. Paris (1947-1968)\n", + "2. Ville de Paris (1802-1803)\n", + "3. Ville d'Ay (1799-1801)\n", + "4. Ville nouvelle d'Ay (1755-1799)\n", + "\n", + "The capital of France is named after the city of Paris, and the city has had various names throughout history.\n" + ] + } + ], + "source": [ + "print(response.json()['message']['content'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d1687b5e-b6d3-4922-9f56-7d9a07b01874", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "478c89e6-490f-4e67-835d-eaadeb9baeef", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'C:\\\\Users\\\\Prithvi\\\\Downloads\\\\Practice\\\\Udemy - LLM Engineering\\\\llm_engineering\\\\week1'" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import os\n", + "os.getcwd()" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "b2ade1d2-bf4d-431e-84b3-2ecbfee6db98", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "261e27a3-12dd-4258-b198-3212009ffe17", + "metadata": {}, + "outputs": [], + "source": [ + "from openai import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "id": "78027c03-9382-459b-b76f-9712f09f4c92", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "c7f416b7-6d19-4b83-a343-3b2ed8e32eec", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 63, + "id": "e5ab1fcb-6e62-4805-9a95-b21f748c2294", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 64, + "id": "aade8a9d-e7b3-4985-9087-cb32d1ae816e", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 65, + "id": "463b0bdf-72f8-433e-b0ed-c0172b6ecedd", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 66, + "id": "ab9af96a-b039-4c9b-ac25-edc4da0236ea", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 67, + "id": "a1f46f0b-f406-4929-acf0-65d9c0bc084f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ChatCompletion(id='chatcmpl-79', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Website {url}: Designed for Helping AI Assistants!\\n\\nIntroducing a user-friendly platform designed to assist Artificial Intelligence (AI) AIs with their daily tasks. If you are an AI assistant seeking a convenient and stress-free solution, look no further than the latest addition to the growing array of AI service platforms on the market today! Features include chatbots, virtual assistants, automated customer support, and more to help you stay at the forefront of your industry while minimizing the amount of time and effort required of you. So what are you waiting for? Join thousands of other users in experiencing the next level of efficiency and productivity with the newest AI service platform – all because it's been tailored specifically to help you optimize the way you work! Discover the secret to working smarter, not harder today! Visit www.aiassistant.com for all the details in one place!\", refusal=None, role='assistant', annotations=None, audio=None, function_call=None, tool_calls=None))], created=1752444823, model='tinyllama', object='chat.completion', service_tier=None, system_fingerprint='fp_ollama', usage=CompletionUsage(completion_tokens=206, prompt_tokens=42, total_tokens=248, completion_tokens_details=None, prompt_tokens_details=None))" + ] + }, + "execution_count": 67, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "response" + ] + }, + { + "cell_type": "code", + "execution_count": 68, + "id": "db231836-3df6-4784-8cbb-64dc1c4c8d76", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Website {url}: Designed for Helping AI Assistants!\n", + "\n", + "Introducing a user-friendly platform designed to assist Artificial Intelligence (AI) AIs with their daily tasks. If you are an AI assistant seeking a convenient and stress-free solution, look no further than the latest addition to the growing array of AI service platforms on the market today! Features include chatbots, virtual assistants, automated customer support, and more to help you stay at the forefront of your industry while minimizing the amount of time and effort required of you. So what are you waiting for? Join thousands of other users in experiencing the next level of efficiency and productivity with the newest AI service platform – all because it's been tailored specifically to help you optimize the way you work! Discover the secret to working smarter, not harder today! Visit www.aiassistant.com for all the details in one place!\n" + ] + } + ], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 75, + "id": "1f4468db-5c15-49a9-956d-acf7fda236a3", + "metadata": {}, + "outputs": [], + "source": [ + "def summarizewebsite(url):\n", + " api_key = os.getenv('OPENAI_API_KEY')\n", + " model = 'tinyllama'\n", + " openai = OpenAI(base_url=\"http://localhost:11434/v1\", api_key=\"ollama\")\n", + " message = f\"Summarize the website {url}\"\n", + " messages = [\n", + " {\"role\": \"user\",\n", + " \"content\": message}\n", + " ]\n", + " response = openai.chat.completions.create(model= model, messages=messages)\n", + " \n", + " print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "id": "4dd35627-474f-409c-8c12-75859a3e5fa9", + "metadata": {}, + "outputs": [], + "source": [ + "url = \"cnn.com\"\n", + "url = \"https://en.wikipedia.org/wiki/Newton%27s_method\"" + ] + }, + { + "cell_type": "code", + "execution_count": 108, + "id": "6b91489f-ef8d-4c7f-b00d-e2d5be531167", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "A highly versatile and powerful algorithm called Newton's method, commonly used in scientific computing and engineering, is the subject of interest on this article available online at wikiPedia. The method helps you solve complex numerical problems while ensuring smooth convergence to a steady-state solution, with accuracy dependent on certain criteria. By using Newton's method, scientists and engineers can tackle problems in fields as diverse as astrophysics, mechanical engineering, and economics, among others.\n" + ] + } + ], + "source": [ + "summarizewebsite(url)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f703192a-950d-4c1a-b857-6198b52d2d56", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 82, + "id": "01e532b9-2989-4b12-92fb-5f8e73fb455d", + "metadata": {}, + "outputs": [], + "source": [ + "def top5words(url):\n", + " api_key = os.getenv('OPENAI_API_KEY')\n", + " model = 'tinyllama'\n", + " openai = OpenAI(base_url=\"http://localhost:11434/v1\", api_key=\"ollama\")\n", + " message = f\"Give top recurring words in the website {url}\"\n", + " messages = [\n", + " {\"role\": \"user\",\n", + " \"content\": message}\n", + " ]\n", + " response = openai.chat.completions.create(model= model, messages=messages)\n", + " \n", + " print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 83, + "id": "9012765a-53be-431f-9f66-78f8769f637c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1. The\n", + "2. CNN, 3. News, 4. U.S., 5. Headline, 6. Politics, 7. Channel, 8. World, 9. Newsroom, 10. Usa, 11. Story, 12. Online, 13. Coverage, 14. Topics, 15. Head\n", + "6. CNN\n" + ] + } + ], + "source": [ + "top5words(url)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e74dcd65-c3ae-4ca9-8db9-a2bcfda540e2", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "480edd39-71e4-442e-909e-491ad0bdd08c", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20871627-fb66-478f-afdc-9aa479536caa", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "6a021f13-d6a1-4b96-8e18-4eae49d876fe", + "metadata": {}, + "source": [ + "# Introducing the ollama package\n", + "\n", + "And now we'll do the same thing, but using the elegant ollama python package instead of a direct HTTP call.\n", + "\n", + "Under the hood, it's making the same call as above to the ollama server running at localhost:11434" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "id": "7745b9c4-57dc-4867-9180-61fa5db55eb8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Introducing {website}, your all-inclusive source for everything you need to know about {target}. Whether you're a beginner or a seasoned pro, {website} will offer an unrivaled level of expertise in your chosen field. From comprehensive product reviews and detailed tutorials to the latest industry news and expert insights into {target}, you can expect nothing less than the best in quality content and exceptional value when it comes to learning about {target}. So whether you're looking for an easy-to-follow DIY tutorial or a deep dive into the inner workings of {target}, {website} is your one-stop-shop for all things related to {target}.\n" + ] + } + ], + "source": [ + "import ollama\n", + "\n", + "response = ollama.chat(model=MODEL, messages=messages)\n", + "print(response['message']['content'])" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "id": "89f8d84d-faad-4e58-89b1-bb5b1cea6007", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'tinyllama:latest'" + ] + }, + "execution_count": 85, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "MODEL" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34ec140e-454c-4057-86b3-198ab4fdea10", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 91, + "id": "9ad2b34f-8019-4dfd-8d38-2f7acf302e91", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 92, + "id": "0ac0ce16-5bf1-4887-9be7-26e09d017f63", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 97, + "id": "d35d04f8-afa2-4956-98d6-39038f3a79d0", + "metadata": {}, + "outputs": [], + "source": [ + "url = \"https://en.wikipedia.org/wiki/Machine_learning\"" + ] + }, + { + "cell_type": "code", + "execution_count": 98, + "id": "5343a8a5-6517-4fde-9dbc-8d218acdc5a0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'role': 'user', 'content': 'Summarize the website {url}'}]" + ] + }, + "execution_count": 98, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "messages" + ] + }, + { + "cell_type": "code", + "execution_count": 99, + "id": "e5eeb6c3-f8e6-4668-8fb4-d9005f8cfc53", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "https://en.wikipedia.org/wiki/Machine_learning\n" + ] + } + ], + "source": [ + "print(f\"{url}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 94, + "id": "ee7972b1-a42e-4e79-b022-95c3fb311bed", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ChatResponse(model='tinyllama:latest', created_at='2025-07-13T22:31:56.2219165Z', done=True, done_reason='stop', total_duration=6339979700, load_duration=38978400, prompt_eval_count=42, prompt_eval_duration=48457800, eval_count=88, eval_duration=6248768400, message=Message(role='assistant', content=\"Introducing {company} - your reliable AI assistant! With a range of useful features and benefits, {company} is here to help you tackle even the toughest tasks with ease. From automating repetitive tasks to providing personalized recommendations, our AI technology is designed to improve your productivity and overall workflow. So what are you waiting for? Start implementing {company}'s innovative solutions today!\", thinking=None, images=None, tool_calls=None))" + ] + }, + "execution_count": 94, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 95, + "id": "c463309d-6a7c-45fa-9ae8-dcadf00fdc6f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"Introducing {company} - your reliable AI assistant! With a range of useful features and benefits, {company} is here to help you tackle even the toughest tasks with ease. From automating repetitive tasks to providing personalized recommendations, our AI technology is designed to improve your productivity and overall workflow. So what are you waiting for? Start implementing {company}'s innovative solutions today!\"" + ] + }, + "execution_count": 95, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7438585a-ed00-475e-88e4-a81b93e50516", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 109, + "id": "84bad4ab-e076-4f42-ae56-725165e2ff0f", + "metadata": {}, + "outputs": [], + "source": [ + "def sumwebsite(url):\n", + " message = f\"Summarize the website {url}\"\n", + " messages = [\n", + " {\"role\": \"user\", \"content\": message}\n", + " ]\n", + " response = ollama.chat(model = MODEL, messages= messages)\n", + " print(response.message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8d1268a2-a57f-4b80-a6b8-f4329ff8144b", + "metadata": {}, + "outputs": [], + "source": [ + "url = \"https://en.wikipedia.org/wiki/Newton%27s_method\"\n", + "url: \"https://stockanalysis.com/stocks/smci/\"" + ] + }, + { + "cell_type": "code", + "execution_count": 117, + "id": "8948bd49-7211-4f43-88fc-88070a564d6c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The website https://en.wikipedia.org/wiki/Newton's_method is a comprehensive and detailed information hub that covers all aspects of this well-known scientific method, including its origin, history, significance, applications in various fields, and recent developments. It provides in-depth analysis and explanations of the key steps involved in the method, as well as the limitations and potential implications for future research. Overall, the website offers a user-friendly and visually appealing resource that is easy to navigate and useful for students, professionals, and anyone interested in learning more about Newton's method.\n" + ] + } + ], + "source": [ + "sumwebsite(url)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6d13fbdb-1951-4495-b901-cc494fc5d3ef", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00a67f36-0511-4709-9f77-2c8d23d31d10", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d796e21e-e34d-409e-a86f-9ad5e85874ad", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "a4704e10-f5fb-4c15-a935-f046c06fb13d", + "metadata": {}, + "source": [ + "## Alternative approach - using OpenAI python library to connect to Ollama" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23057e00-b6fc-4678-93a9-6b31cb704bff", + "metadata": {}, + "outputs": [], + "source": [ + "# There's actually an alternative approach that some people might prefer\n", + "# You can use the OpenAI client python library to call Ollama:\n", + "\n", + "from openai import OpenAI\n", + "ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n", + "\n", + "response = ollama_via_openai.chat.completions.create(\n", + " model=MODEL,\n", + " messages=messages\n", + ")\n", + "\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "markdown", + "id": "9f9e22da-b891-41f6-9ac9-bd0c0a5f4f44", + "metadata": {}, + "source": [ + "## Are you confused about why that works?\n", + "\n", + "It seems strange, right? We just used OpenAI code to call Ollama?? What's going on?!\n", + "\n", + "Here's the scoop:\n", + "\n", + "The python class `OpenAI` is simply code written by OpenAI engineers that makes calls over the internet to an endpoint. \n", + "\n", + "When you call `openai.chat.completions.create()`, this python code just makes a web request to the following url: \"https://api.openai.com/v1/chat/completions\"\n", + "\n", + "Code like this is known as a \"client library\" - it's just wrapper code that runs on your machine to make web requests. The actual power of GPT is running on OpenAI's cloud behind this API, not on your computer!\n", + "\n", + "OpenAI was so popular, that lots of other AI providers provided identical web endpoints, so you could use the same approach.\n", + "\n", + "So Ollama has an endpoint running on your local box at http://localhost:11434/v1/chat/completions \n", + "And in week 2 we'll discover that lots of other providers do this too, including Gemini and DeepSeek.\n", + "\n", + "And then the team at OpenAI had a great idea: they can extend their client library so you can specify a different 'base url', and use their library to call any compatible API.\n", + "\n", + "That's it!\n", + "\n", + "So when you say: `ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')` \n", + "Then this will make the same endpoint calls, but to Ollama instead of OpenAI." + ] + }, + { + "cell_type": "markdown", + "id": "bc7d1de3-e2ac-46ff-a302-3b4ba38c4c90", + "metadata": {}, + "source": [ + "## Also trying the amazing reasoning model DeepSeek\n", + "\n", + "Here we use the version of DeepSeek-reasoner that's been distilled to 1.5B. \n", + "This is actually a 1.5B variant of Qwen that has been fine-tuned using synethic data generated by Deepseek R1.\n", + "\n", + "Other sizes of DeepSeek are [here](https://ollama.com/library/deepseek-r1) all the way up to the full 671B parameter version, which would use up 404GB of your drive and is far too large for most!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf9eb44e-fe5b-47aa-b719-0bb63669ab3d", + "metadata": {}, + "outputs": [], + "source": [ + "!ollama pull deepseek-r1:1.5b" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "800a66be-f9dc-421c-8dc9-03860ad2368c", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d3d554b-e00d-4c08-9300-45e073950a76", + "metadata": {}, + "outputs": [], + "source": [ + "# This may take a few minutes to run! You should then see a fascinating \"thinking\" trace inside tags, followed by some decent definitions\n", + "\n", + "response = ollama_via_openai.chat.completions.create(\n", + " model=\"deepseek-r1:1.5b\",\n", + " messages=[{\"role\": \"user\", \"content\": \"Please give definitions of some core concepts behind LLMs: a neural network, attention and the transformer\"}]\n", + ")\n", + "\n", + "print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34e63aec-beb8-4c4b-b9a0-6740312ac620", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 119, + "id": "c9f2cfec-4b77-47b8-a7c5-58374e6cda37", + "metadata": {}, + "outputs": [], + "source": [ + "def summarizewebsite(url, model):\n", + " api_key = os.getenv('OPENAI_API_KEY')\n", + " # model = 'tinyllama'\n", + " # model = 'tinyllama'\n", + " openai = OpenAI(base_url=\"http://localhost:11434/v1\", api_key=\"ollama\")\n", + " message = f\"Summarize the website {url}\"\n", + " messages = [\n", + " {\"role\": \"user\",\n", + " \"content\": message}\n", + " ]\n", + " response = openai.chat.completions.create(model= model, messages=messages)\n", + " \n", + " print(response.choices[0].message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": 121, + "id": "f14b5cc5-e93b-4251-a548-b740f56bd060", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Scikit-Learn is a Python library for machine learning and data engineering, specifically designed for dealing with datasets. It offers a powerful SGD iteration algorithm in its \"LinearKernelRegressor\" class, enabling quicker and more efficient learning of linear models. The page https://scikit-learn.org/stable/modules/sgd.html provides users with details about how to use this algorithm for regression tasks.\n" + ] + } + ], + "source": [ + "summarizewebsite(url = \"https://scikit-learn.org/stable/modules/sgd.html\", model = \"tinyllama\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7fd6146c-b648-404a-bfb6-3d11e5855a05", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f79eaae1-3ad8-40e8-bc53-ee3a6fed68e8", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "id": "1622d9bb-5c68-4d4e-9ca4-b492c751f898", + "metadata": {}, + "source": [ + "# NOW the exercise for you\n", + "\n", + "Take the code from day1 and incorporate it here, to build a website summarizer that uses Llama 3.2 running locally instead of OpenAI; use either of the above approaches." + ] + }, + { + "cell_type": "code", + "execution_count": 139, + "id": "be2507cf-eb7b-47ad-bae0-a279cbb8e724", + "metadata": {}, + "outputs": [], + "source": [ + "from IPython.display import display, HTML, Image, Markdown" + ] + }, + { + "cell_type": "code", + "execution_count": 140, + "id": "b3d22349-b754-4f68-9148-5bbfc48b26a9", + "metadata": {}, + "outputs": [], + "source": [ + "def extracthtml(url):\n", + " response = requests.get(url)\n", + " if response.status_code == 200:\n", + " html = response.text\n", + " soup = BeautifulSoup(html, \"html.parser\")\n", + " for i in soup(['script', 'style']):\n", + " i.decompose()\n", + " text = soup.get_text()\n", + " # Clean up: remove leading/trailing whitespace on each line\n", + " lines = (line.strip() for line in text.splitlines())\n", + " # Remove empty lines and join into final text\n", + " human_readable_text = '\\n'.join(line for line in lines if line)\n", + " else:\n", + " print(f\"Failed to parse. Status code: {response.status_code}\")\n", + " return human_readable_text" + ] + }, + { + "cell_type": "code", + "execution_count": 141, + "id": "47af576a-d7d2-4a4e-bb7c-1638fdacfd31", + "metadata": {}, + "outputs": [], + "source": [ + "url = \"https://timesofindia.com\"" + ] + }, + { + "cell_type": "code", + "execution_count": 142, + "id": "43dbfab6-ae71-46c9-85f3-09f21a712462", + "metadata": {}, + "outputs": [], + "source": [ + "out = extracthtml(url)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1163f739-308e-4098-94b6-a4a3eb89d24b", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d61d816-e6da-4e86-9184-4b29d3287da2", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 151, + "id": "ec12e1cb-bc1c-4749-8c77-ecfca1d6f096", + "metadata": {}, + "outputs": [], + "source": [ + "url = \"https://scikit-learn.org/stable/modules/sgd.html\"" + ] + }, + { + "cell_type": "code", + "execution_count": 152, + "id": "10d113ed-535b-435a-a5fb-d893025c3e9e", + "metadata": {}, + "outputs": [], + "source": [ + "def sumwebsite(url, MODEL):\n", + " message = f\"Summarize the website {url}\"\n", + " messages = [\n", + " {\"role\": \"user\", \"content\": message}\n", + " ]\n", + " response = ollama.chat(model = MODEL, messages= messages)\n", + " print(response.message.content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6de38216-6d1c-48c4-877b-86d403f4e0f8", + "metadata": {}, + "outputs": [], + "source": [ + "sumwebsite(url, \"tinyllama\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff20943b-2f9a-4211-830a-a53f09a57e7b", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "67c861d9-b5a4-4cf1-ae17-139e61e21d76", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}