Merge pull request #569 from Rwothoromo/community-contributions-branch
Rwothoromo - Community contributions branch
This commit is contained in:
484
week1/community-contributions/rwothoromo/day1.ipynb
Normal file
484
week1/community-contributions/rwothoromo/day1.ipynb
Normal file
@@ -0,0 +1,484 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# How to run a cell\n",
|
||||||
|
"\n",
|
||||||
|
"Press `Shift` + `Return` to run a Cell.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# imports\n",
|
||||||
|
"\n",
|
||||||
|
"import os, requests, time\n",
|
||||||
|
"from dotenv import load_dotenv\n",
|
||||||
|
"from bs4 import BeautifulSoup\n",
|
||||||
|
"from IPython.display import Markdown, display\n",
|
||||||
|
"from openai import OpenAI\n",
|
||||||
|
"\n",
|
||||||
|
"# Load environment variables in a file called .env\n",
|
||||||
|
"load_dotenv(override=True)\n",
|
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||||
|
"\n",
|
||||||
|
"# Check the key\n",
|
||||||
|
"if not api_key:\n",
|
||||||
|
" print(\"No API key was found\")\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\"API key found and looks good so far!\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Instantiate an OpenAI object\n",
|
||||||
|
"openai = OpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Make a test call to a Frontier model (Open AI) to get started:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"message = \"Hello, GPT! Holla back to this space probe!\"\n",
|
||||||
|
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n",
|
||||||
|
"print(response.choices[0].message.content)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "2aa190e5-cb31-456a-96cc-db109919cd78",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Summarization project"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "c5e793b2-6775-426a-a139-4848291d0463",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Some websites need proper headers when fetching them:\n",
|
||||||
|
"headers = {\n",
|
||||||
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"A class to represent a Webpage\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"class Website:\n",
|
||||||
|
"\n",
|
||||||
|
" def __init__(self, url):\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" Create this Website object from the given url using the BeautifulSoup library\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" self.url = url\n",
|
||||||
|
" response = requests.get(url, headers=headers)\n",
|
||||||
|
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||||
|
" irrelevant.decompose()\n",
|
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Summarize website content\n",
|
||||||
|
"website = Website(\"https://rwothoromo.wordpress.com/\")\n",
|
||||||
|
"# print(eli.title, \"\\n\", eli.text)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# A system prompt tells a model like GPT4o what task they are performing and what tone they should use\n",
|
||||||
|
"# A user prompt is the conversation starter that they should reply to\n",
|
||||||
|
"\n",
|
||||||
|
"system_prompt = \"You are an assistant that analyzes the contents of a given website, \\\n",
|
||||||
|
"and returns a brief summary, ignoring text that might be navigation-related. \\\n",
|
||||||
|
"Respond in markdown.\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# A function that writes a User Prompt that asks for summaries of websites:\n",
|
||||||
|
"\n",
|
||||||
|
"def user_prompt_for(website):\n",
|
||||||
|
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
|
||||||
|
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
|
||||||
|
"please provide a short summary of this website in markdown. \\\n",
|
||||||
|
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
|
||||||
|
" user_prompt += website.text\n",
|
||||||
|
" return user_prompt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(user_prompt_for(website))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# The API from OpenAI expects to receive messages in a particular structure. Many of the other APIs share this structure:\n",
|
||||||
|
"messages = [\n",
|
||||||
|
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"}, # system message\n",
|
||||||
|
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}, # user message\n",
|
||||||
|
"]\n",
|
||||||
|
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
|
||||||
|
"print(response.choices[0].message.content)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# To build useful messages for GPT-4o-mini\n",
|
||||||
|
"\n",
|
||||||
|
"def messages_for(website):\n",
|
||||||
|
" return [\n",
|
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
|
||||||
|
" ]\n",
|
||||||
|
"\n",
|
||||||
|
"messages_for(website)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Call the OpenAI API.\n",
|
||||||
|
"\n",
|
||||||
|
"url = \"https://rwothoromo.wordpress.com/\"\n",
|
||||||
|
"website = Website(url)\n",
|
||||||
|
"\n",
|
||||||
|
"def summarize(website):\n",
|
||||||
|
" response = openai.chat.completions.create(\n",
|
||||||
|
" model = \"gpt-4o-mini\",\n",
|
||||||
|
" messages = messages_for(website)\n",
|
||||||
|
" )\n",
|
||||||
|
" return response.choices[0].message.content"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"summarize(website)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# A function to display this nicely in the Jupyter output, using markdown\n",
|
||||||
|
"\n",
|
||||||
|
"summary = summarize(website)\n",
|
||||||
|
"def display_summary(summary):\n",
|
||||||
|
" display(Markdown(summary))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"display_summary(summary)\n",
|
||||||
|
"# display_summary(summarize(Website(\"https://edwarddonner.com\")))\n",
|
||||||
|
"# display_summary(summarize(Website(\"https://cnn.com\")))\n",
|
||||||
|
"# display_summary(summarize(Website(\"https://anthropic.com\")))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "5a904323-acd9-4c8e-9a17-70df76184590",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Websites protected with CloudFront (and similar) or with JavaScript need a Selenium or Playwright implementation. They return 403\n",
|
||||||
|
"\n",
|
||||||
|
"# display_summary(summarize(Website(\"https://openai.com\")))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "139ad985",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# To generate the above summary, use selenium\n",
|
||||||
|
"\n",
|
||||||
|
"from selenium import webdriver\n",
|
||||||
|
"from selenium.webdriver.chrome.service import Service\n",
|
||||||
|
"from selenium.webdriver.common.by import By\n",
|
||||||
|
"from selenium.webdriver.support.ui import WebDriverWait\n",
|
||||||
|
"from selenium.webdriver.support import expected_conditions as EC\n",
|
||||||
|
"\n",
|
||||||
|
"class WebsiteSelenium:\n",
|
||||||
|
" def __init__(self, url):\n",
|
||||||
|
" self.url = url\n",
|
||||||
|
" self.title = \"No title found\"\n",
|
||||||
|
" self.text = \"\"\n",
|
||||||
|
"\n",
|
||||||
|
" # Configure Chrome options (headless mode is recommended for server environments)\n",
|
||||||
|
" chrome_options = webdriver.ChromeOptions()\n",
|
||||||
|
" chrome_options.add_argument(\"--headless\") # Run Chrome in headless mode (without a UI)\n",
|
||||||
|
" chrome_options.add_argument(\"--no-sandbox\") # Required for running as root in some environments\n",
|
||||||
|
" chrome_options.add_argument(\"--disable-dev-shm-usage\") # Overcomes limited resource problems\n",
|
||||||
|
"\n",
|
||||||
|
" # Path to your WebDriver executable (e.g., chromedriver)\n",
|
||||||
|
" # Make sure to replace this with the actual path to your chromedriver\n",
|
||||||
|
" # You might need to download it from: https://chromedriver.chromium.org/downloads and place it in a drivers dir\n",
|
||||||
|
" service = Service('./drivers/chromedriver-mac-x64/chromedriver')\n",
|
||||||
|
"\n",
|
||||||
|
" driver = None\n",
|
||||||
|
" try:\n",
|
||||||
|
" driver = webdriver.Chrome(service=service, options=chrome_options)\n",
|
||||||
|
" driver.get(url)\n",
|
||||||
|
"\n",
|
||||||
|
" # Wait for the page to load and dynamic content to render\n",
|
||||||
|
" # You might need to adjust the wait condition based on the website\n",
|
||||||
|
" WebDriverWait(driver, 10).until(\n",
|
||||||
|
" EC.presence_of_element_located((By.TAG_NAME, \"body\"))\n",
|
||||||
|
" )\n",
|
||||||
|
" time.sleep(3) # Give more time for JavaScript to execute\n",
|
||||||
|
"\n",
|
||||||
|
" # Get the page source after dynamic content has loaded\n",
|
||||||
|
" soup = BeautifulSoup(driver.page_source, 'html.parser')\n",
|
||||||
|
"\n",
|
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||||
|
" irrelevant.decompose()\n",
|
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
|
||||||
|
"\n",
|
||||||
|
" except Exception as e:\n",
|
||||||
|
" print(f\"Error accessing {url} with Selenium: {e}\")\n",
|
||||||
|
" finally:\n",
|
||||||
|
" if driver:\n",
|
||||||
|
" driver.quit() # Always close the browser\n",
|
||||||
|
"\n",
|
||||||
|
"display_summary(summarize(WebsiteSelenium(\"https://openai.com\")))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "130d4572",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import asyncio\n",
|
||||||
|
"from playwright.async_api import async_playwright\n",
|
||||||
|
"import nest_asyncio\n",
|
||||||
|
"\n",
|
||||||
|
"# Apply nest_asyncio to allow asyncio.run in Jupyter\n",
|
||||||
|
"nest_asyncio.apply()\n",
|
||||||
|
"\n",
|
||||||
|
"class WebsitePlaywright:\n",
|
||||||
|
" def __init__(self, url):\n",
|
||||||
|
" self.url = url\n",
|
||||||
|
" self.title = \"No title found\"\n",
|
||||||
|
" self.text = \"\"\n",
|
||||||
|
" asyncio.run(self._fetch_content())\n",
|
||||||
|
"\n",
|
||||||
|
" async def _fetch_content(self):\n",
|
||||||
|
" async with async_playwright() as p:\n",
|
||||||
|
" browser = None\n",
|
||||||
|
" try:\n",
|
||||||
|
" browser = await p.chromium.launch(headless=True)\n",
|
||||||
|
" page = await browser.new_page()\n",
|
||||||
|
"\n",
|
||||||
|
" # Increase timeout for navigation and other operations\n",
|
||||||
|
" await page.goto(self.url, timeout=60000) # Wait up to 60 seconds for navigation\n",
|
||||||
|
" print(f\"Accessing {self.url} with Playwright - goto()\")\n",
|
||||||
|
"\n",
|
||||||
|
" # You might need to adjust or add more specific waits\n",
|
||||||
|
" await page.wait_for_load_state('domcontentloaded', timeout=60000) # Wait for basic HTML\n",
|
||||||
|
" # await page.wait_for_load_state('networkidle', timeout=60000) # Wait for network activity to settle\n",
|
||||||
|
" await page.wait_for_selector('div.duration-short', timeout=60000) # instead of networkidle\n",
|
||||||
|
" await page.wait_for_selector('body', timeout=60000) # Wait for the body to be present\n",
|
||||||
|
" await asyncio.sleep(5) # Give a bit more time for final rendering\n",
|
||||||
|
"\n",
|
||||||
|
" content = await page.content()\n",
|
||||||
|
" soup = BeautifulSoup(content, 'html.parser')\n",
|
||||||
|
"\n",
|
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||||
|
" irrelevant.decompose()\n",
|
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
|
||||||
|
" print(f\"Accessed {self.url} with Playwright\")\n",
|
||||||
|
"\n",
|
||||||
|
" except Exception as e:\n",
|
||||||
|
" print(f\"Error accessing {self.url} with Playwright: {e}\")\n",
|
||||||
|
" finally:\n",
|
||||||
|
" if browser:\n",
|
||||||
|
" await browser.close()\n",
|
||||||
|
"\n",
|
||||||
|
"display_summary(summarize(WebsitePlaywright(\"https://openai.com/\")))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Step 1: Create your prompts\n",
|
||||||
|
"\n",
|
||||||
|
"system_prompt = \"You are a professional assistant. Review this conversation and provide a comprehensive summary. Also, suggest how much better the converation could have gone:\"\n",
|
||||||
|
"user_prompt = \"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"Dear Email Contact,\n",
|
||||||
|
"\n",
|
||||||
|
"I hope this message finds you well.\n",
|
||||||
|
"I would like to share that I have proficiency in front-end design tools, particularly Figma, react and Angular. At this stage, I am keenly interested in finding opportunities to apply these skills professionally.\n",
|
||||||
|
"\n",
|
||||||
|
"If you are aware of any companies, projects, or platforms seeking enterprise in front-end design, I would be grateful for any advice or recommendations you might kindly provide.\n",
|
||||||
|
"\n",
|
||||||
|
"Thank you very much for your time and consideration.\n",
|
||||||
|
"\n",
|
||||||
|
"Hello Job Seeker,\n",
|
||||||
|
"\n",
|
||||||
|
"I hope you are doing well.\n",
|
||||||
|
"\n",
|
||||||
|
"The last role (3 months gig) I saw was looking for a junior PHP Developer. Does your CV include that?\n",
|
||||||
|
"\n",
|
||||||
|
"Hello Email Contact,\n",
|
||||||
|
"Thank you for your feedback.\n",
|
||||||
|
"Yes my CV has PHP as one of my skill set. Can I share it with you?\n",
|
||||||
|
"\n",
|
||||||
|
"Email Contact: They said \"It's late. Interviews were on Monday\"\n",
|
||||||
|
"\n",
|
||||||
|
"Hello Email Contact\n",
|
||||||
|
"\n",
|
||||||
|
"Thanks for the update. When you hear of any opportunity please let me know.\n",
|
||||||
|
"\n",
|
||||||
|
"Email Contact: For now, check out https://refactory.academy/courses/refactory-apprenticeship/\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Step 2: Make the messages list\n",
|
||||||
|
"\n",
|
||||||
|
"messages = [\n",
|
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": user_prompt},\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"# Step 3: Call OpenAI\n",
|
||||||
|
"\n",
|
||||||
|
"response = openai.chat.completions.create(\n",
|
||||||
|
" model = \"gpt-4o-mini\",\n",
|
||||||
|
" messages = messages\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# Step 4: print the result\n",
|
||||||
|
"\n",
|
||||||
|
"print(response.choices[0].message.content)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "4b583226-9b13-4990-863a-86517a5ccfec",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# To perform summaries using a model running locally\n",
|
||||||
|
"import ollama\n",
|
||||||
|
"\n",
|
||||||
|
"# OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
|
||||||
|
"# HEADERS = {\"Content-Type\": \"application/json\"}\n",
|
||||||
|
"MODEL = \"llama3.2\"\n",
|
||||||
|
"\n",
|
||||||
|
"def summarize_with_local_model(url):\n",
|
||||||
|
" website = Website(url)\n",
|
||||||
|
" messages = messages_for(website)\n",
|
||||||
|
" response = ollama.chat(\n",
|
||||||
|
" model=MODEL,\n",
|
||||||
|
" messages=messages,\n",
|
||||||
|
" stream=False # just get the results, don't stream them\n",
|
||||||
|
" )\n",
|
||||||
|
" return response['message']['content']\n",
|
||||||
|
"\n",
|
||||||
|
"display(Markdown(summarize_with_local_model(\"https://rwothoromo.wordpress.com/\")))"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.11.7"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
||||||
477
week1/community-contributions/rwothoromo/day5.ipynb
Normal file
477
week1/community-contributions/rwothoromo/day5.ipynb
Normal file
@@ -0,0 +1,477 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "a98030af-fcd1-4d63-a36e-38ba053498fa",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# A full business solution\n",
|
||||||
|
"\n",
|
||||||
|
"## Now we will take our project from Day 1 to the next level\n",
|
||||||
|
"\n",
|
||||||
|
"### BUSINESS CHALLENGE:\n",
|
||||||
|
"\n",
|
||||||
|
"Create a product that builds a Brochure for a company to be used for prospective clients, investors and potential recruits.\n",
|
||||||
|
"\n",
|
||||||
|
"We will be provided a company name and their primary website.\n",
|
||||||
|
"\n",
|
||||||
|
"See the end of this notebook for examples of real-world business applications.\n",
|
||||||
|
"\n",
|
||||||
|
"And remember: I'm always available if you have problems or ideas! Please do reach out."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "d5b08506-dc8b-4443-9201-5f1848161363",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# imports\n",
|
||||||
|
"# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n",
|
||||||
|
"\n",
|
||||||
|
"import os\n",
|
||||||
|
"import requests\n",
|
||||||
|
"import json\n",
|
||||||
|
"from typing import List\n",
|
||||||
|
"from dotenv import load_dotenv\n",
|
||||||
|
"from bs4 import BeautifulSoup\n",
|
||||||
|
"from IPython.display import Markdown, display, update_display\n",
|
||||||
|
"from openai import OpenAI"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "fc5d8880-f2ee-4c06-af16-ecbc0262af61",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Initialize and constants\n",
|
||||||
|
"\n",
|
||||||
|
"load_dotenv(override=True)\n",
|
||||||
|
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||||
|
"\n",
|
||||||
|
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
|
||||||
|
" print(\"API key looks good so far\")\n",
|
||||||
|
"else:\n",
|
||||||
|
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n",
|
||||||
|
" \n",
|
||||||
|
"MODEL = 'gpt-4o-mini'\n",
|
||||||
|
"openai = OpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "106dd65e-90af-4ca8-86b6-23a41840645b",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# A class to represent a Webpage\n",
|
||||||
|
"\n",
|
||||||
|
"# Some websites need you to use proper headers when fetching them:\n",
|
||||||
|
"headers = {\n",
|
||||||
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"class Website:\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" A utility class to represent a Website that we have scraped, now with links\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
" def __init__(self, url):\n",
|
||||||
|
" self.url = url\n",
|
||||||
|
" response = requests.get(url, headers=headers)\n",
|
||||||
|
" self.body = response.content\n",
|
||||||
|
" soup = BeautifulSoup(self.body, 'html.parser')\n",
|
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||||
|
" if soup.body:\n",
|
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||||
|
" irrelevant.decompose()\n",
|
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
|
||||||
|
" else:\n",
|
||||||
|
" self.text = \"\"\n",
|
||||||
|
" links = [link.get('href') for link in soup.find_all('a')]\n",
|
||||||
|
" self.links = [link for link in links if link]\n",
|
||||||
|
"\n",
|
||||||
|
" def get_contents(self):\n",
|
||||||
|
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "e30d8128-933b-44cc-81c8-ab4c9d86589a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ed = Website(\"https://edwarddonner.com\")\n",
|
||||||
|
"ed.links"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "1771af9c-717a-4fca-bbbe-8a95893312c3",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## First step: Have GPT-4o-mini figure out which links are relevant\n",
|
||||||
|
"\n",
|
||||||
|
"### Use a call to gpt-4o-mini to read the links on a webpage, and respond in structured JSON. \n",
|
||||||
|
"It should decide which links are relevant, and replace relative links such as \"/about\" with \"https://company.com/about\". \n",
|
||||||
|
"We will use \"one shot prompting\" in which we provide an example of how it should respond in the prompt.\n",
|
||||||
|
"\n",
|
||||||
|
"This is an excellent use case for an LLM, because it requires nuanced understanding. Imagine trying to code this without LLMs by parsing and analyzing the webpage - it would be very hard!\n",
|
||||||
|
"\n",
|
||||||
|
"Sidenote: there is a more advanced technique called \"Structured Outputs\" in which we require the model to respond according to a spec. We cover this technique in Week 8 during our autonomous Agentic AI project."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "6957b079-0d96-45f7-a26a-3487510e9b35",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n",
|
||||||
|
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n",
|
||||||
|
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n",
|
||||||
|
"link_system_prompt += \"You should respond in JSON as in this example:\"\n",
|
||||||
|
"link_system_prompt += \"\"\"\n",
|
||||||
|
"{\n",
|
||||||
|
" \"links\": [\n",
|
||||||
|
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
|
||||||
|
" {\"type\": \"careers page\", \"url\": \"https://another.full.url/careers\"}\n",
|
||||||
|
" ]\n",
|
||||||
|
"}\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"link_system_prompt += \"And this example:\"\n",
|
||||||
|
"link_system_prompt += \"\"\"\n",
|
||||||
|
"{\n",
|
||||||
|
" \"links\": [\n",
|
||||||
|
" {\"type\": \"for-you page\", \"url\": \"https://full.url/goes/here/services\"},\n",
|
||||||
|
" {\"type\": \"speak-to-a-human page\", \"url\": \"https://another.full.url/contact-us\"}\n",
|
||||||
|
" ]\n",
|
||||||
|
"}\n",
|
||||||
|
"\"\"\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "b97e4068-97ed-4120-beae-c42105e4d59a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(link_system_prompt)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "8e1f601b-2eaf-499d-b6b8-c99050c9d6b3",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_links_user_prompt(website):\n",
|
||||||
|
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n",
|
||||||
|
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n",
|
||||||
|
"Do not include Terms of Service, Privacy, email links.\\n\"\n",
|
||||||
|
" user_prompt += \"Links (some might be relative links):\\n\"\n",
|
||||||
|
" user_prompt += \"\\n\".join(website.links)\n",
|
||||||
|
" return user_prompt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "6bcbfa78-6395-4685-b92c-22d592050fd7",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(get_links_user_prompt(ed))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "a29aca19-ca13-471c-a4b4-5abbfa813f69",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_links(url):\n",
|
||||||
|
" website = Website(url)\n",
|
||||||
|
" response = openai.chat.completions.create(\n",
|
||||||
|
" model=MODEL,\n",
|
||||||
|
" messages=[\n",
|
||||||
|
" {\"role\": \"system\", \"content\": link_system_prompt},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n",
|
||||||
|
" ],\n",
|
||||||
|
" response_format={\"type\": \"json_object\"}\n",
|
||||||
|
" )\n",
|
||||||
|
" result = response.choices[0].message.content\n",
|
||||||
|
" return json.loads(result)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "74a827a0-2782-4ae5-b210-4a242a8b4cc2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Anthropic has made their site harder to scrape, so I'm using HuggingFace..\n",
|
||||||
|
"\n",
|
||||||
|
"# anthropic = Website(\"https://anthropic.com\")\n",
|
||||||
|
"# anthropic.links\n",
|
||||||
|
"# get_links(\"https://anthropic.com\")\n",
|
||||||
|
"huggingface = Website(\"https://huggingface.co\")\n",
|
||||||
|
"huggingface.links"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "d3d583e2-dcc4-40cc-9b28-1e8dbf402924",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"get_links(\"https://huggingface.co\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "0d74128e-dfb6-47ec-9549-288b621c838c",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Second step: make the brochure!\n",
|
||||||
|
"\n",
|
||||||
|
"Assemble all the details into another prompt to GPT4-o"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "85a5b6e2-e7ef-44a9-bc7f-59ede71037b5",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_all_details(url):\n",
|
||||||
|
" result = \"Landing page:\\n\"\n",
|
||||||
|
" result += Website(url).get_contents()\n",
|
||||||
|
" links = get_links(url)\n",
|
||||||
|
" print(\"Found links:\", links)\n",
|
||||||
|
" for link in links[\"links\"]:\n",
|
||||||
|
" result += f\"\\n\\n{link['type']}\\n\"\n",
|
||||||
|
" result += Website(link[\"url\"]).get_contents()\n",
|
||||||
|
" return result"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "5099bd14-076d-4745-baf3-dac08d8e5ab2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"print(get_all_details(\"https://huggingface.co\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "9b863a55-f86c-4e3f-8a79-94e24c1a8cf2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
|
||||||
|
"# and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
|
||||||
|
"# Include details of company culture, customers and careers/jobs if you have the information.\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n",
|
||||||
|
"\n",
|
||||||
|
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
|
||||||
|
"and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
|
||||||
|
"Include details of company culture, customers and careers/jobs if you have the information.\"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "6ab83d92-d36b-4ce0-8bcc-5bb4c2f8ff23",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_brochure_user_prompt(company_name, url):\n",
|
||||||
|
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n",
|
||||||
|
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n",
|
||||||
|
" user_prompt += f\"Keep the details brief or concise, factoring in that they would be printed on a simple hand-out flyer.\\n\"\n",
|
||||||
|
" user_prompt += get_all_details(url)\n",
|
||||||
|
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n",
|
||||||
|
" return user_prompt"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "cd909e0b-1312-4ce2-a553-821e795d7572",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"get_brochure_user_prompt(\"HuggingFace\", \"https://huggingface.co\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "e44de579-4a1a-4e6a-a510-20ea3e4b8d46",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def create_brochure(company_name, url):\n",
|
||||||
|
" response = openai.chat.completions.create(\n",
|
||||||
|
" model=MODEL,\n",
|
||||||
|
" messages=[\n",
|
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n",
|
||||||
|
" ],\n",
|
||||||
|
" )\n",
|
||||||
|
" result = response.choices[0].message.content\n",
|
||||||
|
" # display(Markdown(result))\n",
|
||||||
|
" # print(result)\n",
|
||||||
|
" return result"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "0029e063-0c07-4712-82d9-536ec3579e80",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def translate_brochure(brochure, language):\n",
|
||||||
|
" system_prompt_for_language = \"You're an expert in \" + language + \". Translate the brochure!\"\n",
|
||||||
|
" response = openai.chat.completions.create(\n",
|
||||||
|
" model=MODEL,\n",
|
||||||
|
" messages=[\n",
|
||||||
|
" {\"role\": \"system\", \"content\": system_prompt_for_language},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": brochure}\n",
|
||||||
|
" ],\n",
|
||||||
|
" )\n",
|
||||||
|
" result = response.choices[0].message.content\n",
|
||||||
|
" display(Markdown(result))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "e093444a-9407-42ae-924a-145730591a39",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"create_brochure(\"HuggingFace\", \"https://huggingface.co\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f8371bf5-c4c0-4e52-9a2a-066d994b0510",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"brochure = create_brochure(\"Paint and Sip Uganda\", \"https://paintandsipuganda.com/\")\n",
|
||||||
|
"# translate_brochure(brochure, \"Spanish\")\n",
|
||||||
|
"translate_brochure(brochure, \"Swahili\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "34e03db6-61d0-4fc5-bf66-4f679b9befde",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"create_brochure(\"Wabeh\", \"https://wabeh.com/\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "61eaaab7-0b47-4b29-82d4-75d474ad8d18",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Finally - a minor improvement\n",
|
||||||
|
"\n",
|
||||||
|
"With a small adjustment, we can change this so that the results stream back from OpenAI,\n",
|
||||||
|
"with the familiar typewriter animation"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "51db0e49-f261-4137-aabe-92dd601f7725",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def stream_brochure(company_name, url):\n",
|
||||||
|
" stream = openai.chat.completions.create(\n",
|
||||||
|
" model=MODEL,\n",
|
||||||
|
" messages=[\n",
|
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n",
|
||||||
|
" ],\n",
|
||||||
|
" stream=True\n",
|
||||||
|
" )\n",
|
||||||
|
" \n",
|
||||||
|
" response = \"\"\n",
|
||||||
|
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
||||||
|
" for chunk in stream:\n",
|
||||||
|
" response += chunk.choices[0].delta.content or ''\n",
|
||||||
|
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
|
||||||
|
" update_display(Markdown(response), display_id=display_handle.display_id)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "56bf0ae3-ee9d-4a72-9cd6-edcac67ceb6d",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"stream_brochure(\"HuggingFace\", \"https://huggingface.co\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "fdb3f8d8-a3eb-41c8-b1aa-9f60686a653b",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Try changing the system prompt to the humorous version when you make the Brochure for Hugging Face:\n",
|
||||||
|
"\n",
|
||||||
|
"stream_brochure(\"HuggingFace\", \"https://huggingface.co\")"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.11.7"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
||||||
240
week1/community-contributions/rwothoromo/week1 EXERCISE.ipynb
Normal file
240
week1/community-contributions/rwothoromo/week1 EXERCISE.ipynb
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# End of week 1 exercise\n",
|
||||||
|
"\n",
|
||||||
|
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n",
|
||||||
|
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "c1070317-3ed9-4659-abe3-828943230e03",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# imports\n",
|
||||||
|
"\n",
|
||||||
|
"import re, requests, ollama\n",
|
||||||
|
"from bs4 import BeautifulSoup\n",
|
||||||
|
"from IPython.display import Markdown, display, update_display\n",
|
||||||
|
"from openai import OpenAI"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# constants\n",
|
||||||
|
"\n",
|
||||||
|
"MODEL_GPT = 'gpt-4o-mini'\n",
|
||||||
|
"MODEL_LLAMA = 'llama3.2'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# set up environment\n",
|
||||||
|
"\n",
|
||||||
|
"headers = {\n",
|
||||||
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"class Website:\n",
|
||||||
|
"\n",
|
||||||
|
" def __init__(self, url):\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" Create this Website object from the given url using the BeautifulSoup library\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" self.url = url\n",
|
||||||
|
" response = requests.get(url, headers=headers)\n",
|
||||||
|
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||||
|
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||||
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||||
|
" irrelevant.decompose()\n",
|
||||||
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
|
||||||
|
"\n",
|
||||||
|
"openai = OpenAI()\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# here is the question; type over this to ask something new\n",
|
||||||
|
"\n",
|
||||||
|
"# question = \"\"\"\n",
|
||||||
|
"# Please explain what this code does and why:\n",
|
||||||
|
"# yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
|
||||||
|
"# \"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"# question = \"\"\"\n",
|
||||||
|
"# Please explain what this code does and why:\n",
|
||||||
|
"# yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
|
||||||
|
"# Popular dev site https://projecteuler.net/\n",
|
||||||
|
"# \"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"# question = \"\"\"\n",
|
||||||
|
"# Who is Blessed Goodteam (https://www.linkedin.com/in/blessed-goodteam-49b3ab30a)? \\\n",
|
||||||
|
"# How relevant is her work at Paint and Sip Uganda (https://paintandsipuganda.com/). \\\n",
|
||||||
|
"# What can we learn from her?\n",
|
||||||
|
"# \"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"question = \"\"\"\n",
|
||||||
|
"How good at Software Development is Elijah Rwothoromo? \\\n",
|
||||||
|
"He has a Wordpress site https://rwothoromo.wordpress.com/. \\\n",
|
||||||
|
"He also has a LinkedIn profile https://www.linkedin.com/in/rwothoromoelaijah/. \\\n",
|
||||||
|
"What can we learn from him?\n",
|
||||||
|
"\"\"\"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "e14fd3a1-0aca-4794-a0e0-57458e111fc9",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Process URLs in the question to improve the prompt\n",
|
||||||
|
"\n",
|
||||||
|
"# Extract all URLs from the question string using regular expressions\n",
|
||||||
|
"urls = re.findall(r'https?://[^\\s)]+', question)\n",
|
||||||
|
"# print(urls)\n",
|
||||||
|
"\n",
|
||||||
|
"if len(urls) > 0:\n",
|
||||||
|
" \n",
|
||||||
|
" # Fetch the content for each URL using the Website class\n",
|
||||||
|
" scraped_content = []\n",
|
||||||
|
" for url in urls:\n",
|
||||||
|
" print(f\"Scraping: {url}\")\n",
|
||||||
|
" try:\n",
|
||||||
|
" site = Website(url)\n",
|
||||||
|
" content = f\"Content from {url}:\\n---\\n{site.text}\\n---\\n\" # delimiter ---\n",
|
||||||
|
" scraped_content.append(content)\n",
|
||||||
|
" except Exception as e:\n",
|
||||||
|
" print(f\"Could not scrape {url}: {e}\")\n",
|
||||||
|
" scraped_content.append(f\"Could not retrieve content from {url}.\\n\")\n",
|
||||||
|
" \n",
|
||||||
|
" # Combine all the scraped text into one string\n",
|
||||||
|
" all_scraped_text = \"\\n\".join(scraped_content)\n",
|
||||||
|
" \n",
|
||||||
|
" # Update the question with the scraped content\n",
|
||||||
|
" updated_question = f\"\"\"\n",
|
||||||
|
" Based on the following information, please answer the user's original question.\n",
|
||||||
|
" \n",
|
||||||
|
" --- TEXT FROM WEBSITES ---\n",
|
||||||
|
" {all_scraped_text}\n",
|
||||||
|
" --- END TEXT FROM WEBSITES ---\n",
|
||||||
|
" \n",
|
||||||
|
" --- ORIGINAL QUESTION ---\n",
|
||||||
|
" {question}\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
"else:\n",
|
||||||
|
" updated_question = question\n",
|
||||||
|
"\n",
|
||||||
|
"# print(updated_question)\n",
|
||||||
|
"\n",
|
||||||
|
"# system prompt to be more accurate for AI to just analyze the provided text.\n",
|
||||||
|
"system_prompt = \"You are an expert assistant. \\\n",
|
||||||
|
"Analyze the user's question and the provided text from relevant websites to synthesize a comprehensive answer in markdown format.\\\n",
|
||||||
|
"Provide a short summary, ignoring text that might be navigation-related.\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Create the messages list with the newly updated prompt\n",
|
||||||
|
"messages = [\n",
|
||||||
|
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||||
|
" {\"role\": \"user\", \"content\": updated_question},\n",
|
||||||
|
"]\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Get gpt-4o-mini to answer, with streaming\n",
|
||||||
|
"\n",
|
||||||
|
"def get_gpt_response(question):\n",
|
||||||
|
" stream = openai.chat.completions.create(\n",
|
||||||
|
" model=MODEL_GPT,\n",
|
||||||
|
" messages=messages,\n",
|
||||||
|
" stream=True\n",
|
||||||
|
" )\n",
|
||||||
|
" \n",
|
||||||
|
" response = \"\"\n",
|
||||||
|
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
||||||
|
" for chunk in stream:\n",
|
||||||
|
" response += chunk.choices[0].delta.content or ''\n",
|
||||||
|
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
|
||||||
|
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
|
||||||
|
"\n",
|
||||||
|
"get_gpt_response(question)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Get Llama 3.2 to answer\n",
|
||||||
|
"\n",
|
||||||
|
"def get_llama_response(question):\n",
|
||||||
|
" response = ollama.chat(\n",
|
||||||
|
" model=MODEL_LLAMA,\n",
|
||||||
|
" messages=messages,\n",
|
||||||
|
" stream=False # just get the results, don't stream them\n",
|
||||||
|
" )\n",
|
||||||
|
" return response['message']['content']\n",
|
||||||
|
"\n",
|
||||||
|
"display(Markdown(get_llama_response(question)))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "157d5bb3-bed7-4fbd-9a5d-f2a14aaac869",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.11.7"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user