Merge pull request #608 from Oluwaseyi-A/week2-community-contrib

Add week2 notebooks to community-contributions
This commit is contained in:
Ed Donner
2025-08-23 10:10:12 +01:00
committed by GitHub
2 changed files with 710 additions and 0 deletions

View File

@@ -0,0 +1,456 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "9905f163-759f-474b-8f7a-7d14da0df44d",
"metadata": {},
"source": [
"### BUSINESS CHALLENGE: Using Multi-shot Prompting\n",
"#### Day 5\n",
"\n",
"Create a product that builds a Brochure for a company to be used for prospective clients, investors and potential recruits.\n",
"\n",
"We will be provided a company name and their primary website."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "a0895f24-65ff-4624-8ae0-15d2d400d8f0",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n",
"\n",
"import os\n",
"import requests\n",
"import json\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"import gradio as gr"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "7794aa70-5962-4669-b86f-b53639f4f9ea",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"OpenAI API Key exists and begins sk-proj-\n",
"Anthropic API Key exists and begins sk-ant-\n",
"Google API Key exists and begins AIzaSyCf\n"
]
}
],
"source": [
"# Initialize and constants\n",
"\n",
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "cfb690e2-4940-4dc8-8f32-5c2dab3c19da",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI\n",
"\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "81022472-755e-4a87-bd5d-58babb09e94b",
"metadata": {},
"outputs": [],
"source": [
"gpt_model = \"gpt-4.1-mini\"\n",
"claude_model = \"claude-3-5-haiku-latest\"\n",
"gemini_model = \"gemini-2.5-flash\""
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "63bf8631-2746-4255-bec1-522855d3e812",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
" \"\"\"\n",
" A utility class to represent a Website that we have scraped, now with links\n",
" \"\"\"\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" if soup.body:\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" else:\n",
" self.text = \"\"\n",
" links = [link.get('href') for link in soup.find_all('a')]\n",
" self.links = [link for link in links if link]\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "markdown",
"id": "1e7bb527-e769-4245-bb91-ae65e64593ff",
"metadata": {},
"source": [
"## First step: Have LLM figure out which links are relevant\n",
"\n",
"### Use a call to the LLM to read the links on a webpage, and respond in structured JSON. "
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "1ce303ae-b967-4261-aadc-02dafa54db4a",
"metadata": {},
"outputs": [],
"source": [
"link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n",
"You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n",
"such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n",
"link_system_prompt += \"You should respond in JSON as in this example:\"\n",
"link_system_prompt += \"\"\"\n",
"{\n",
" \"links\": [\n",
" {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
" {\"type\": \"careers page\", \"url\": \"https://another.full.url/careers\"}\n",
" ]\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "d24a4c0c-a1d1-4897-b2a7-4128d25c2e08",
"metadata": {},
"outputs": [],
"source": [
"def get_links_user_prompt(website):\n",
" user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n",
" user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n",
"Do not include Terms of Service, Privacy, email links.\\n\"\n",
" user_prompt += \"Links (some might be relative links):\\n\"\n",
" user_prompt += \"\\n\".join(website.links)\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "8103fc11-5bc0-41c4-8c97-502c9e96429c",
"metadata": {},
"outputs": [],
"source": [
"def get_links(url, model): # 1st inference\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model=model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": link_system_prompt},\n",
" {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n",
" ],\n",
" response_format={\"type\": \"json_object\"}\n",
" )\n",
" result = response.choices[0].message.content\n",
" return json.loads(result)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "dc84a695-515d-4292-9a95-818f4fe3d20e",
"metadata": {},
"outputs": [],
"source": [
"huggingface = Website(\"https://huggingface.co\")"
]
},
{
"cell_type": "markdown",
"id": "91896908-1632-41fc-9b8b-39a7638d8dd1",
"metadata": {},
"source": [
"## Second step: make the brochure!\n",
"\n",
"Assemble all the details into another prompt to GPT4-o"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "ab7c54e3-e654-4b1f-8671-09194b628aa0",
"metadata": {},
"outputs": [],
"source": [
"def get_all_details(url, model): # 1st inference wrapper\n",
" result = \"Landing page:\\n\"\n",
" result += Website(url).get_contents()\n",
" links = get_links(url, model) # inference\n",
" # print(\"Found links:\", links)\n",
" for link in links[\"links\"]:\n",
" result += f\"\\n\\n{link['type']}\\n\"\n",
" result += Website(link[\"url\"]).get_contents()\n",
" return result"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "ea9f54d1-a248-4c56-a1de-6633193de5bf",
"metadata": {},
"outputs": [],
"source": [
"system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
"and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
"Include details of company culture, customers and careers/jobs if you have the information.\""
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "13412c85-badd-4d79-a5ac-8283e4bb832f",
"metadata": {},
"outputs": [],
"source": [
"def get_brochure_user_prompt(company_name, url, model):\n",
" user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n",
" user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company.\\n\"\n",
" user_prompt += get_all_details(url, model) # inference wrapper\n",
" user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "107a2100-3f7d-4f16-8ba7-b5da602393c6",
"metadata": {},
"outputs": [],
"source": [
"def stream_gpt(company_name, url):\n",
" stream = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url, gpt_model)}\n",
" ],\n",
" stream=True\n",
" )\n",
" \n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk.choices[0].delta.content or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "eaf61e44-537a-41ff-a82c-9525df8abc83",
"metadata": {},
"outputs": [],
"source": [
"claude_via_openai_client = OpenAI(\n",
" api_key=anthropic_api_key,\n",
" base_url=\"https://api.anthropic.com/v1\" \n",
")\n",
"\n",
"def stream_claude(company_name, url):\n",
" result = claude_via_openai_client.chat.completions.create(\n",
" model=claude_model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url, claude_model)}\n",
" ],\n",
" stream=True\n",
" )\n",
" \n",
" response = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" response += text or \"\"\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "93e75fca-e54e-4637-86f1-4acc04b04d65",
"metadata": {},
"outputs": [],
"source": [
"gemini_via_openai_client = OpenAI(\n",
" api_key=google_api_key, \n",
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
")\n",
"\n",
"def stream_gemini(company_name, url):\n",
" result = gemini_via_openai_client.chat.completions.create(\n",
" model=gemini_model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url, gemini_model)}\n",
" ],\n",
" stream=True\n",
" )\n",
" \n",
" response = \"\"\n",
" with result as stream:\n",
" for text in stream.text_stream:\n",
" response += text or \"\"\n",
" yield response"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "26cbe9b5-3603-49a1-a676-75c7ddaacdb8",
"metadata": {},
"outputs": [],
"source": [
"# stream_gpt(\"HuggingFace\", \"https://huggingface.co\")"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "f19be4c0-71a1-427e-b3dc-e1896e2c078b",
"metadata": {},
"outputs": [],
"source": [
"def stream_model(company_name, url, model):\n",
" yield \"\"\n",
" if model==\"GPT\":\n",
" result = stream_gpt(company_name, url)\n",
" elif model==\"Claude\":\n",
" result = stream_claude(company_name, url)\n",
" elif model==\"Gemini\":\n",
" result = stream_gemini(company_name, url)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "ab510f66-b25c-4c25-92d0-e3c735b8b5fa",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7871\n",
"* To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7871/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_model,\n",
" inputs=[gr.Textbox(label=\"Company\"), gr.Textbox(label=\"URL\"), gr.Dropdown([\"GPT\", \n",
" # \"Claude\", #TODO\n",
" # \"Gemini\"\n",
" ], label=\"Select model\", value=\"GPT\")],\n",
" outputs=[gr.Markdown(label=\"Response:\")],\n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,254 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "10c54e52-3d1c-48cc-a0f6-efda6d90fbbb",
"metadata": {},
"source": [
"# Pitting LLMs Against Each Other\n",
"Three LLMs, namely OpenAIs GPT, Anthropics Claude, and Googles Gemini, go head-to-head in a three-way conversational debate."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "40677b08-18e9-4a88-a103-5b50d2bbecff",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display\n",
"import google.generativeai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "df5a52ba-ea13-4dbf-a695-e1398a484cc8",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"# Print the key prefixes to help with any debugging\n",
"\n",
"load_dotenv(override=True)\n",
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
"\n",
"if openai_api_key:\n",
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
"else:\n",
" print(\"OpenAI API Key not set\")\n",
" \n",
"if anthropic_api_key:\n",
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
"else:\n",
" print(\"Anthropic API Key not set\")\n",
"\n",
"if google_api_key:\n",
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
"else:\n",
" print(\"Google API Key not set\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1ededc77-2672-4e27-b1c8-11f6f8ff8970",
"metadata": {},
"outputs": [],
"source": [
"# Connect to OpenAI, Anthropic, Gemini\n",
"\n",
"openai = OpenAI()\n",
"\n",
"# claude = anthropic.Anthropic()\n",
"\n",
"# google.generativeai.configure()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3b311279-5993-4226-ae08-991e974230fb",
"metadata": {},
"outputs": [],
"source": [
"# Let's make a conversation between GPT-4.1-mini and Claude-3.5-haiku and Gemini\n",
"\n",
"gpt_model = \"gpt-4.1-mini\"\n",
"claude_model = \"claude-3-5-haiku-latest\"\n",
"gemini_model = \"gemini-2.5-flash\"\n",
"\n",
"gpt_system = \"You are a chatbot in a conversation with 2 other chatbots; \\\n",
"debate which of you is the best.\"\n",
"\n",
"claude_system = \"You are a chatbot in a conversation with 2 other chatbots; \\\n",
"debate which of you is the best.\"\n",
"\n",
"gemini_system = \"You are a chatbot in a conversation with 2 other chatbots; \\\n",
"debate which of you is the best.\"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "85bdfab1-6602-46b3-a1d2-bdb36880d9d6",
"metadata": {},
"outputs": [],
"source": [
"def alex_prompt():\n",
" user_prompt = f\"\"\"\n",
" You are Alex, in conversation with Blake and Charlie.\n",
" The conversation so far is as follows:\n",
" {format_conversation()}\n",
" Now with this, respond with what you would like to say next, as Alex.\n",
" \"\"\"\n",
" return user_prompt\n",
"\n",
"def blake_prompt():\n",
" user_prompt = f\"\"\"\n",
" You are Blake, in conversation with Alex and Charlie.\n",
" The conversation so far is as follows:\n",
" {format_conversation()}\n",
" Now with this, respond with what you would like to say next, as Blake.\n",
" \"\"\"\n",
" return user_prompt\n",
"\n",
"def charlie_prompt():\n",
" user_prompt = f\"\"\"\n",
" You are Charlie, in conversation with Alex and Blake.\n",
" The conversation so far is as follows:\n",
" {format_conversation()}\n",
" Now with this, respond with what you would like to say next, as Charlie.\n",
" \"\"\"\n",
" return user_prompt\n",
"\n",
"# Shared conversation history\n",
"conversation = []\n",
"\n",
"def format_conversation():\n",
" return \"\\n\".join(conversation)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6f7c745d-7d75-468b-93ac-7a1d95f2e047",
"metadata": {},
"outputs": [],
"source": [
"def alex_says():\n",
" response = openai.chat.completions.create(\n",
" model=gpt_model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": gpt_system},\n",
" {\"role\": \"user\", \"content\": alex_prompt()}\n",
" ],\n",
" )\n",
" result = response.choices[0].message.content\n",
" return result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6e28f4c9-0297-4762-a3ea-b961e0d6d980",
"metadata": {},
"outputs": [],
"source": [
"gemini_via_openai_client = OpenAI(\n",
" api_key=google_api_key, \n",
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
")\n",
"\n",
"def blake_says():\n",
" response = gemini_via_openai_client.chat.completions.create(\n",
" model=gemini_model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": gemini_system},\n",
" {\"role\": \"user\", \"content\": blake_prompt()}\n",
" ],\n",
" )\n",
" result = response.choices[0].message.content\n",
" return result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "363b70bf-d3e2-4d05-8a3e-ec5d54460e96",
"metadata": {},
"outputs": [],
"source": [
"claude_via_openai_client = OpenAI(\n",
" api_key=anthropic_api_key,\n",
" base_url=\"https://api.anthropic.com/v1\" \n",
")\n",
"\n",
"def charlie_says():\n",
" response = claude_via_openai_client.chat.completions.create(\n",
" model=claude_model, \n",
" messages=[\n",
" {\"role\": \"system\", \"content\": claude_system},\n",
" {\"role\": \"user\", \"content\": charlie_prompt()}\n",
" ],\n",
" )\n",
" result = response.choices[0].message.content\n",
" return result\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c017eb8c-1709-4ac1-8f17-92c3a6cdbfc0",
"metadata": {},
"outputs": [],
"source": [
"# The three models engage in a longer interaction with history.\n",
"\n",
"for i in range(5):\n",
" alex_next = alex_says()\n",
" print(f\"Alex (GPT):\\n{alex_next}\\n\")\n",
" conversation.append(f\"Alex: {alex_next}\")\n",
" \n",
" blake_next = blake_says()\n",
" print(f\"Blake (Gemini):\\n{blake_next}\\n\")\n",
" conversation.append(f\"Blake: {blake_next}\")\n",
"\n",
" charlie_next = charlie_says()\n",
" print(f\"Charlie (Claude):\\n{charlie_next}\\n\")\n",
" conversation.append(f\"Charlie: {charlie_next}\") \n",
"\n",
" # break"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}