235 lines
7.4 KiB
Plaintext
235 lines
7.4 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5",
|
|
"metadata": {},
|
|
"source": [
|
|
"# End of week 1 exercise\n",
|
|
"\n",
|
|
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n",
|
|
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "c1070317-3ed9-4659-abe3-828943230e03",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# imports\n",
|
|
"\n",
|
|
"import re, requests, ollama\n",
|
|
"from bs4 import BeautifulSoup\n",
|
|
"from IPython.display import Markdown, display, update_display\n",
|
|
"from openai import OpenAI"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# constants\n",
|
|
"\n",
|
|
"MODEL_GPT = 'gpt-4o-mini'\n",
|
|
"MODEL_LLAMA = 'llama3.2'"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# set up environment\n",
|
|
"\n",
|
|
"headers = {\n",
|
|
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
|
|
"}\n",
|
|
"\n",
|
|
"class Website:\n",
|
|
"\n",
|
|
" def __init__(self, url):\n",
|
|
" \"\"\"\n",
|
|
" Create this Website object from the given url using the BeautifulSoup library\n",
|
|
" \"\"\"\n",
|
|
" self.url = url\n",
|
|
" response = requests.get(url, headers=headers)\n",
|
|
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
|
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
|
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
|
" irrelevant.decompose()\n",
|
|
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
|
|
"\n",
|
|
"openai = OpenAI()\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# here is the question; type over this to ask something new\n",
|
|
"\n",
|
|
"# question = \"\"\"\n",
|
|
"# Please explain what this code does and why:\n",
|
|
"# yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
|
|
"# \"\"\"\n",
|
|
"\n",
|
|
"# question = \"\"\"\n",
|
|
"# Please explain what this code does and why:\n",
|
|
"# yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
|
|
"# Popular dev site https://projecteuler.net/\n",
|
|
"# \"\"\"\n",
|
|
"\n",
|
|
"question = \"\"\"\n",
|
|
"How good at Software Development is Elijah Rwothoromo? \\\n",
|
|
"He has a Wordpress site https://rwothoromo.wordpress.com/. \\\n",
|
|
"He also has a LinkedIn profile https://www.linkedin.com/in/rwothoromoelaijah/. \\\n",
|
|
"What can we learn from him?\n",
|
|
"\"\"\"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "e14fd3a1-0aca-4794-a0e0-57458e111fc9",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Process URLs in the question to improve the prompt\n",
|
|
"\n",
|
|
"# Extract all URLs from the question string using regular expressions\n",
|
|
"urls = re.findall(r'https?://[^\\s)]+', question)\n",
|
|
"# print(urls)\n",
|
|
"\n",
|
|
"if len(urls) > 0:\n",
|
|
" \n",
|
|
" # Fetch the content for each URL using the Website class\n",
|
|
" scraped_content = []\n",
|
|
" for url in urls:\n",
|
|
" print(f\"Scraping: {url}\")\n",
|
|
" try:\n",
|
|
" site = Website(url)\n",
|
|
" content = f\"Content from {url}:\\n---\\n{site.text}\\n---\\n\" # delimiter ---\n",
|
|
" scraped_content.append(content)\n",
|
|
" except Exception as e:\n",
|
|
" print(f\"Could not scrape {url}: {e}\")\n",
|
|
" scraped_content.append(f\"Could not retrieve content from {url}.\\n\")\n",
|
|
" \n",
|
|
" # Combine all the scraped text into one string\n",
|
|
" all_scraped_text = \"\\n\".join(scraped_content)\n",
|
|
" \n",
|
|
" # Update the question with the scraped content\n",
|
|
" updated_question = f\"\"\"\n",
|
|
" Based on the following information, please answer the user's original question.\n",
|
|
" \n",
|
|
" --- TEXT FROM WEBSITES ---\n",
|
|
" {all_scraped_text}\n",
|
|
" --- END TEXT FROM WEBSITES ---\n",
|
|
" \n",
|
|
" --- ORIGINAL QUESTION ---\n",
|
|
" {question}\n",
|
|
" \"\"\"\n",
|
|
"else:\n",
|
|
" updated_question = question\n",
|
|
"\n",
|
|
"# print(updated_question)\n",
|
|
"\n",
|
|
"# system prompt to be more accurate for AI to just analyze the provided text.\n",
|
|
"system_prompt = \"You are an expert assistant. \\\n",
|
|
"Analyze the user's question and the provided text from relevant websites to synthesize a comprehensive answer in markdown format.\\\n",
|
|
"Provide a short summary, ignoring text that might be navigation-related.\"\n",
|
|
"\n",
|
|
"# Create the messages list with the newly updated prompt\n",
|
|
"messages = [\n",
|
|
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
|
" {\"role\": \"user\", \"content\": updated_question},\n",
|
|
"]\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Get gpt-4o-mini to answer, with streaming\n",
|
|
"\n",
|
|
"def get_gpt_response(question):\n",
|
|
" stream = openai.chat.completions.create(\n",
|
|
" model=MODEL_GPT,\n",
|
|
" messages=messages,\n",
|
|
" stream=True\n",
|
|
" )\n",
|
|
" \n",
|
|
" response = \"\"\n",
|
|
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
|
" for chunk in stream:\n",
|
|
" response += chunk.choices[0].delta.content or ''\n",
|
|
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
|
|
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
|
|
"\n",
|
|
"get_gpt_response(question)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Get Llama 3.2 to answer\n",
|
|
"\n",
|
|
"def get_llama_response(question):\n",
|
|
" response = ollama.chat(\n",
|
|
" model=MODEL_LLAMA,\n",
|
|
" messages=messages,\n",
|
|
" stream=False # just get the results, don't stream them\n",
|
|
" )\n",
|
|
" return response['message']['content']\n",
|
|
"\n",
|
|
"display(Markdown(get_llama_response(question)))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "fa1e9987-7b6d-49c1-9a81-b1a92aceea72",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.11.7"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|