Add week1 contributions

This commit is contained in:
lisekarimi
2025-06-05 16:20:51 +02:00
parent 5782ca2b43
commit ba1b3b702f
3 changed files with 869 additions and 0 deletions

View File

@@ -0,0 +1,357 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "53211323-6a09-452a-b471-98e22d92bfc2",
"metadata": {},
"source": [
"# 🌐 WebPage Summarizer\n",
"---\n",
"- 🌍 **Task:** Summarizing webpage content using AI. \n",
"- 🧠 **Model:** OpenAI's ``gpt-4o-mini`` and ``llama3.2:3b`` for text summarization. \n",
"- 🕵️‍♂️ **Data Extraction:** Selenium for handling both static and JavaScript-rendered websites. \n",
"- 📌 **Output Format:** Markdown-formatted summaries. \n",
"- 🔗 **Scope:** Processes only the given webpage URL (not the entire site). \n",
"- 🚀 **Tools:** Python, Requests, Selenium, BeautifulSoup, OpenAI API, Ollama. \n",
"- 🧑‍💻 **Skill Level:** Beginner.\n",
"\n",
"🛠️ Requirements\n",
"- ⚙️ Hardware: ✅ CPU is sufficient — no GPU required\n",
"- 🔑 OpenAI API Key (for GPT model)\n",
"- Install Ollama and pull llama3.2:3b or another lightweight model\n",
"- Google Chrome browser installed\n",
"\n",
"**✨ This script handles both JavaScript and non-JavaScript websites using Selenium with Chrome WebDriver for reliable content extraction from modern web applications.**\n",
"\n",
"Let's get started and automate website summarization! 🚀\n",
"\n",
"![](https://github.com/lisekarimi/lexo/blob/main/assets/01_basic_llm_project.jpg?raw=true)\n",
"\n",
"---\n",
"📢 Find more LLM notebooks on my [GitHub repository](https://github.com/lisekarimi/lexo)"
]
},
{
"cell_type": "markdown",
"id": "d70aa4b0",
"metadata": {},
"source": [
"## 🛠️ Environment Setup & Dependencies"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ebf2fa36",
"metadata": {},
"outputs": [],
"source": [
"%pip install selenium webdriver-manager"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1dcf1d9d-c540-4900-b14e-ad36a28fc822",
"metadata": {},
"outputs": [],
"source": [
"# ===========================\n",
"# System & Environment\n",
"# ===========================\n",
"import os\n",
"from dotenv import load_dotenv\n",
"\n",
"# ===========================\n",
"# Web Scraping\n",
"# ===========================\n",
"import time\n",
"from bs4 import BeautifulSoup\n",
"from selenium import webdriver\n",
"from selenium.webdriver.chrome.options import Options\n",
"from selenium.webdriver.common.by import By\n",
"from selenium.webdriver.support.ui import WebDriverWait\n",
"from selenium.webdriver.support import expected_conditions as EC\n",
"\n",
"# ===========================\n",
"# AI-related\n",
"# ===========================\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"import ollama"
]
},
{
"cell_type": "markdown",
"id": "cc20642b",
"metadata": {},
"source": [
"## 🔐 Model Configuration & Authentication"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8598c299-05ca-492e-b085-6bcc2f7dda0d",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"if not api_key:\n",
" raise ValueError(\"OPENAI_API_KEY not found in environment variables\")\n",
"\n",
"print(\"✅ API key loaded successfully!\")\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8098defb",
"metadata": {},
"outputs": [],
"source": [
"MODEL_OPENAI = \"gpt-4o-mini\"\n",
"MODEL_OLLAMA = \"llama3.2:3b\""
]
},
{
"cell_type": "markdown",
"id": "2bd1d83f",
"metadata": {},
"source": [
"## 🌐 Web Scraping Infrastructure"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6fe5114",
"metadata": {},
"outputs": [],
"source": [
"class WebsiteCrawler:\n",
" def __init__(self, url):\n",
" self.url = url\n",
" self.title = \"\"\n",
" self.text = \"\"\n",
" self.scrape()\n",
"\n",
" def scrape(self):\n",
" try:\n",
" # Chrome options\n",
" chrome_options = Options()\n",
" chrome_options.add_argument(\"--headless\")\n",
" chrome_options.add_argument(\"--no-sandbox\")\n",
" chrome_options.add_argument(\"--disable-dev-shm-usage\")\n",
" chrome_options.add_argument(\"--disable-gpu\")\n",
" chrome_options.add_argument(\"--window-size=1920,1080\")\n",
" chrome_options.add_argument(\"--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36\")\n",
"\n",
" # Try to find Chrome\n",
" chrome_paths = [\n",
" r\"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\",\n",
" r\"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe\",\n",
" r\"C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\Application\\chrome.exe\".format(os.getenv('USERNAME')),\n",
" ]\n",
"\n",
" chrome_binary = None\n",
" for path in chrome_paths:\n",
" if os.path.exists(path):\n",
" chrome_binary = path\n",
" break\n",
"\n",
" if chrome_binary:\n",
" chrome_options.binary_location = chrome_binary\n",
"\n",
" # Create driver\n",
" driver = webdriver.Chrome(options=chrome_options)\n",
" driver.set_page_load_timeout(30)\n",
"\n",
" print(f\"🔍 Loading: {self.url}\")\n",
" driver.get(self.url)\n",
"\n",
" # Wait for page to load\n",
" time.sleep(5)\n",
"\n",
" # Try to wait for main content\n",
" try:\n",
" WebDriverWait(driver, 10).until(\n",
" EC.presence_of_element_located((By.TAG_NAME, \"main\"))\n",
" )\n",
" except Exception:\n",
" try:\n",
" WebDriverWait(driver, 10).until(\n",
" EC.presence_of_element_located((By.TAG_NAME, \"body\"))\n",
" )\n",
" except Exception:\n",
" pass # Continue anyway\n",
"\n",
" # Get title and page source\n",
" self.title = driver.title\n",
" page_source = driver.page_source\n",
" driver.quit()\n",
"\n",
" print(f\"✅ Page loaded: {self.title}\")\n",
"\n",
" # Parse with BeautifulSoup\n",
" soup = BeautifulSoup(page_source, 'html.parser')\n",
"\n",
" # Remove unwanted elements\n",
" for element in soup([\"script\", \"style\", \"img\", \"input\", \"button\", \"nav\", \"footer\", \"header\"]):\n",
" element.decompose()\n",
"\n",
" # Get main content\n",
" main = soup.find('main') or soup.find('article') or soup.find('.content') or soup.find('body')\n",
" if main:\n",
" self.text = main.get_text(separator=\"\\n\", strip=True)\n",
" else:\n",
" self.text = soup.get_text(separator=\"\\n\", strip=True)\n",
"\n",
" # Clean up text\n",
" lines = [line.strip() for line in self.text.split('\\n') if line.strip() and len(line.strip()) > 2]\n",
" self.text = '\\n'.join(lines[:200]) # Limit to first 200 lines\n",
"\n",
" print(f\"📄 Extracted {len(self.text)} characters\")\n",
"\n",
" except Exception as e:\n",
" print(f\"❌ Error occurred: {e}\")\n",
" self.title = \"Error occurred\"\n",
" self.text = \"Could not scrape website content\""
]
},
{
"cell_type": "markdown",
"id": "d727feff",
"metadata": {},
"source": [
"## 🧠 Prompt Engineering & Templates"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "02e3a673-a8a1-4101-a441-3816f7ab9e4d",
"metadata": {},
"outputs": [],
"source": [
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "86bb80f9-9e7c-4825-985f-9b83fe50839f",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "89998b18-77aa-4aaf-a137-f0d078d61f75",
"metadata": {},
"outputs": [],
"source": [
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]"
]
},
{
"cell_type": "markdown",
"id": "cde36d4f",
"metadata": {},
"source": [
"## 📝 Summarization "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5636affe",
"metadata": {},
"outputs": [],
"source": [
"def summarize_gpt(url):\n",
" \"\"\"Scrape website and summarize with GPT\"\"\"\n",
" site = WebsiteCrawler(url)\n",
"\n",
" if \"Error occurred\" in site.title or len(site.text) < 50:\n",
" print(f\"❌ Failed to scrape meaningful content from {url}\")\n",
" return\n",
"\n",
" print(\"🤖 Creating summary...\")\n",
"\n",
" # Create summary\n",
" response = openai.chat.completions.create(\n",
" model=MODEL_OPENAI,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(site)}\n",
" ]\n",
" )\n",
"\n",
" web_summary = response.choices[0].message.content\n",
" display(Markdown(web_summary))\n",
"\n",
"summarize_gpt('https://openai.com')\n",
"# summarize_gpt('https://stripe.com')\n",
"# summarize_gpt('https://vercel.com')\n",
"# summarize_gpt('https://react.dev')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "90b9a8f8-0c1c-40c8-a4b3-e8e1fcd29df5",
"metadata": {},
"outputs": [],
"source": [
"def summarize_ollama(url):\n",
" website = WebsiteCrawler(url)\n",
" response = ollama.chat(\n",
" model=MODEL_OLLAMA,\n",
" messages=messages_for(website))\n",
" display(Markdown(response['message']['content'])) # Generate and display output\n",
"\n",
"summarize_ollama('https://github.com')\n",
"# summarize_ollama('https://nextjs.org')"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,370 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "dc8af57c-23a9-452e-9fc3-0e5027edda14",
"metadata": {},
"source": [
"# AI-powered Brochure Generator\n",
"---\n",
"- 🌍 Task: Generate a company brochure using its name and website for clients, investors, and recruits.\n",
"- 🧠 Model: Toggle `USE_OPENAI` to switch between OpenAI and Ollama models\n",
"- 🕵️‍♂️ Data Extraction: Scraping website content and filtering key links (About, Products, Careers, Contact).\n",
"- 📌 Output Format: a Markdown-formatted brochure streamed in real-time.\n",
"- 🚀 Tools: BeautifulSoup, OpenAI API, and IPython display, ollama.\n",
"- 🧑‍💻 Skill Level: Intermediate.\n",
"\n",
"🛠️ Requirements\n",
"- ⚙️ Hardware: ✅ CPU is sufficient — no GPU required\n",
"- 🔑 OpenAI API Key \n",
"- Install Ollama and pull llama3.2:3b or another lightweight model\n",
"---\n",
"📢 Find more LLM notebooks on my [GitHub repository](https://github.com/lisekarimi/lexo)"
]
},
{
"cell_type": "markdown",
"id": "ec869f2c",
"metadata": {},
"source": [
"## 🧩 System Design Overview\n",
"\n",
"### Class Structure\n",
"\n",
"![](https://github.com/lisekarimi/lexo/blob/main/assets/02_brochure_class_diagram.png?raw=true)\n",
"\n",
"This code consists of three main classes:\n",
"\n",
"1. **`Website`**: \n",
" - Scrapes and processes webpage content. \n",
" - Extracts **text** and **links** from a given URL. \n",
"\n",
"2. **`LLMClient`**: \n",
" - Handles interactions with **OpenAI or Ollama (`llama3`, `deepseek`, `qwen`)**. \n",
" - Uses `get_relevant_links()` to filter webpage links. \n",
" - Uses `generate_brochure()` to create and stream a Markdown-formatted brochure. \n",
"\n",
"3. **`BrochureGenerator`**: \n",
" - Uses `Website` to scrape the main webpage and relevant links. \n",
" - Uses `LLMClient` to filter relevant links and generate a brochure. \n",
" - Calls `generate()` to run the entire process.\n",
"\n",
"### Workflow\n",
"\n",
"1. **`main()`** initializes `BrochureGenerator` and calls `generate()`. \n",
"2. **`generate()`** calls **`LLMClient.get_relevant_links()`** to extract relevant links using **LLM (OpenAI/Ollama)**. \n",
"3. **`Website` scrapes the webpage**, extracting **text and links** from the given URL. \n",
"4. **Relevant links are re-scraped** using `Website` to collect additional content. \n",
"5. **All collected content is passed to `LLMClient.generate_brochure()`**. \n",
"6. **`LLMClient` streams the generated brochure** using **OpenAI or Ollama**. \n",
"7. **The final brochure is displayed in Markdown format.**\n",
"\n",
"![](https://github.com/lisekarimi/lexo/blob/main/assets/02_brochure_process.png?raw=true)\n",
"\n",
"\n",
"### Intermediate reasoning\n",
"\n",
"In this workflow, we have intermediate reasoning because the LLM is called twice:\n",
"\n",
"1. **First LLM call**: Takes raw links → filters/selects relevant ones (reasoning step).\n",
"2. **Second LLM call**: Takes selected content → generates final brochure.\n",
"\n",
"🧠 **LLM output becomes LLM input** — thats intermediate reasoning.\n",
"\n",
"![](https://github.com/lisekarimi/lexo/blob/main/assets/02_llm_intermd_reasoning.png?raw=true)"
]
},
{
"cell_type": "markdown",
"id": "4b286461-35ee-4bc5-b07d-af554923e36d",
"metadata": {},
"source": [
"## 📦 Import Libraries"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3fe5670c-5146-474b-9e75-484210533f55",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"import json\n",
"import ollama\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import display, Markdown, update_display\n",
"from openai import OpenAI"
]
},
{
"cell_type": "markdown",
"id": "f3e23181-1e66-410d-a910-1fb4230f8088",
"metadata": {},
"source": [
"## 🧠 Define the Model\n",
"\n",
"The user can switch between OpenAI and Ollama by changing a single variable (`USE_OPENAI`). The model selection is dynamic."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fa2bd452-0cf4-4fec-9542-e1c86584c23f",
"metadata": {},
"outputs": [],
"source": [
"# Load API key\n",
"load_dotenv()\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"if not api_key or not api_key.startswith('sk-'):\n",
" raise ValueError(\"Invalid OpenAI API key. Check your .env file.\")\n",
"\n",
"# Define the model dynamically\n",
"USE_OPENAI = True # True to use openai and False to use Ollama\n",
"MODEL = 'gpt-4o-mini' if USE_OPENAI else 'llama3.2:3b'\n",
"\n",
"openai_client = OpenAI() if USE_OPENAI else None"
]
},
{
"cell_type": "markdown",
"id": "4fd997b7-1b89-4817-b53a-078164f5f71f",
"metadata": {},
"source": [
"## 🏗️ Define Classes"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aed1af59-8b8f-4add-98dc-a9f1b5b511a5",
"metadata": {},
"outputs": [],
"source": [
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
" \"\"\"\n",
" A utility class to scrape and process website content.\n",
" \"\"\"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" self.text = self.extract_text(soup)\n",
" self.links = self.extract_links(soup)\n",
"\n",
" def extract_text(self, soup):\n",
" if soup.body:\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" return soup.body.get_text(separator=\"\\n\", strip=True)\n",
" return \"\"\n",
"\n",
" def extract_links(self, soup):\n",
" links = [link.get('href') for link in soup.find_all('a')]\n",
" return [link for link in links if link and 'http' in link]\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ea04dc7e-ff4c-4113-83b7-0bddcf5072b9",
"metadata": {},
"outputs": [],
"source": [
"class LLMClient:\n",
" def __init__(self, model=MODEL):\n",
" self.model = model\n",
"\n",
" def get_relevant_links(self, website):\n",
" link_system_prompt = \"\"\"\n",
" You are given a list of links from a company website.\n",
" Select only relevant links for a brochure (About, Company, Careers, Products, Contact).\n",
" Exclude login, terms, privacy, and emails.\n",
"\n",
" ### **Instructions**\n",
" - Return **only valid JSON**.\n",
" - **Do not** include explanations, comments, or Markdown.\n",
" - Example output:\n",
" {\n",
" \"links\": [\n",
" {\"type\": \"about\", \"url\": \"https://company.com/about\"},\n",
" {\"type\": \"contact\", \"url\": \"https://company.com/contact\"},\n",
" {\"type\": \"product\", \"url\": \"https://company.com/products\"}\n",
" ]\n",
" }\n",
" \"\"\"\n",
"\n",
" user_prompt = f\"\"\"\n",
" Here is the list of links on the website of {website.url}:\n",
" Please identify the relevant web links for a company brochure. Respond in JSON format.\n",
" Do not include login, terms of service, privacy, or email links.\n",
" Links (some might be relative links):\n",
" {', '.join(website.links)}\n",
" \"\"\"\n",
"\n",
" if USE_OPENAI:\n",
" response = openai_client.chat.completions.create(\n",
" model=self.model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": link_system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]\n",
" )\n",
" return json.loads(response.choices[0].message.content.strip())\n",
" else:\n",
" response = ollama.chat(\n",
" model=self.model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": link_system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]\n",
" )\n",
" result = response.get(\"message\", {}).get(\"content\", \"\").strip()\n",
" try:\n",
" return json.loads(result) # Attempt to parse JSON\n",
" except json.JSONDecodeError:\n",
" print(\"Error: Response is not valid JSON\")\n",
" return {\"links\": []} # Return empty list if parsing fails\n",
"\n",
"\n",
" def generate_brochure(self, company_name, content, language):\n",
" system_prompt = \"\"\"\n",
" You are a professional translator and writer who creates fun and engaging brochures.\n",
" Your task is to read content from a companys website and write a short, humorous, joky,\n",
" and entertaining brochure for potential customers, investors, and job seekers.\n",
" Include details about the companys culture, customers, and career opportunities if available.\n",
" Respond in Markdown format.\n",
" \"\"\"\n",
"\n",
" user_prompt = f\"\"\"\n",
" Create a fun brochure for '{company_name}' using the following content:\n",
" {content[:5000]}\n",
" Respond in {language} only, and format your response correctly in Markdown.\n",
" Do NOT escape characters or return extra backslashes.\n",
" \"\"\"\n",
"\n",
" if USE_OPENAI:\n",
" response_stream = openai_client.chat.completions.create(\n",
" model=self.model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ],\n",
" stream=True\n",
" )\n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in response_stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
" else:\n",
" response_stream = ollama.chat(\n",
" model=self.model,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ],\n",
" stream=True\n",
" )\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" full_text = \"\"\n",
" for chunk in response_stream:\n",
" if \"message\" in chunk:\n",
" content = chunk[\"message\"][\"content\"] or \"\"\n",
" full_text += content\n",
" update_display(Markdown(full_text), display_id=display_handle.display_id)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1c69651f-e004-421e-acc5-c439e57a8762",
"metadata": {},
"outputs": [],
"source": [
"class BrochureGenerator:\n",
" \"\"\"\n",
" Main class to generate a company brochure.\n",
" \"\"\"\n",
" def __init__(self, company_name, url, language='English'):\n",
" self.company_name = company_name\n",
" self.url = url\n",
" self.language = language\n",
" self.website = Website(url)\n",
" self.llm_client = LLMClient()\n",
"\n",
" def generate(self):\n",
" links = self.llm_client.get_relevant_links(self.website)\n",
" content = self.website.get_contents()\n",
"\n",
" for link in links['links']:\n",
" linked_website = Website(link['url'])\n",
" content += f\"\\n\\n{link['type']}:\\n\"\n",
" content += linked_website.get_contents()\n",
"\n",
" self.llm_client.generate_brochure(self.company_name, content, self.language)\n"
]
},
{
"cell_type": "markdown",
"id": "1379d39d",
"metadata": {},
"source": [
"## 📝 Generate Brochure"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1a63519a-1981-477b-9de1-f1ff9be94201",
"metadata": {},
"outputs": [],
"source": [
"def main():\n",
" company_name = \"Tour Eiffel\"\n",
" url = \"https://www.toureiffel.paris/fr\"\n",
" language = \"French\"\n",
"\n",
" generator = BrochureGenerator(company_name, url, language)\n",
" generator.generate()\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,142 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "6e907206-4c13-4698-91c6-9ca1c32be8e7",
"metadata": {},
"source": [
"# TechExplainAI\n",
"---\n",
"\n",
"AI-driven tool that provides concise, structured explanations for technical questions and code snippets.\n",
"\n",
"- 🌍 Task: AI-powered technical explanation generator\n",
"- 🧠 Model: OpenAI's `GPT-4o-mini`, Ollama's `llama3.2:3b`\n",
"- 📌 Output Format: Markdown with real-time streaming\n",
"- 🧑‍💻 Skill Level: Beginner\n",
"- 🔄 Interaction Mode: User enters a technical question → AI generates a structured, concise explanation\n",
"- 🎯 Purpose: Quickly explain technical concepts and Python code snippets\n",
"- 🔧 Customization: Users can modify the models, prompts, and formatting as needed\n",
"\n",
"🛠️ Requirements\n",
"- ⚙️ Hardware: ✅ CPU is sufficient — no GPU required\n",
"- 🔑 OpenAI API Key\n",
"- Install Ollama and pull llama3.2:3b or another lightweight model\n",
"\n",
"---\n",
"📢 Find more LLM notebooks on my [GitHub repository](https://github.com/lisekarimi/lexo)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f743c87a-ed80-43d5-84ad-c78c8bdacb09",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import openai\n",
"import ollama\n",
"from dotenv import load_dotenv\n",
"from IPython.display import display, Markdown, update_display\n",
"\n",
"# Load environment variables\n",
"load_dotenv(override=True)\n",
"\n",
"# Set up OpenAI API key\n",
"OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')\n",
"if not OPENAI_API_KEY:\n",
" raise ValueError(\"Please set your OpenAI API key in environment variables.\")\n",
"\n",
"# Constants\n",
"MODEL_GPT = \"gpt-4o-mini\"\n",
"MODEL_LLAMA = \"llama3.2:3b\"\n",
"\n",
"# Prompt user for question (until input is provided)\n",
"while True:\n",
" question = input(\"Hello, I am your personal technical tutor. Enter your question: \").strip()\n",
" if question:\n",
" break # Proceed only if a valid question is entered\n",
" print(\"Question cannot be empty. Please enter a question.\")\n",
"\n",
"# Common user prompt\n",
"user_prompt = f\"\"\"\n",
"Please give a detailed explanation to the following question: {question}.\n",
"Be less verbose.\n",
"Provide a clear and concise explanation without unnecessary elaboration.\n",
"\"\"\"\n",
"\n",
"# Common system prompt\n",
"system_prompt = \"\"\"\n",
"You are a helpful AI assistant that explains Python code in a clear and concise manner. Provide structured explanations and examples when necessary.\n",
"Be less verbose.\n",
"\"\"\"\n",
"\n",
"def ask_openai():\n",
" \"\"\"Gets response from OpenAI's GPT model with streaming.\"\"\"\n",
" print(\"\\n\\n\\n🚀🤖🚀 Response from OpenAI GPT-4o-mini 🚀🤖🚀\")\n",
" client = openai.OpenAI(api_key=OPENAI_API_KEY)\n",
" response_stream = client.chat.completions.create(\n",
" model=MODEL_GPT,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ],\n",
" stream=True\n",
" )\n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in response_stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
"\n",
"def ask_ollama():\n",
" \"\"\"Gets response from Ollama's Llama 3.2 model with streaming.\"\"\"\n",
" print(\"\\n\\n\\n🔥✨🔥 Response from Llama 3.2 🔥✨🔥\\n\")\n",
" response = ollama.chat(\n",
" model=MODEL_LLAMA,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ],\n",
" stream=True\n",
" )\n",
"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" full_text = \"\"\n",
" for chunk in response:\n",
" if \"message\" in chunk:\n",
" content = chunk[\"message\"][\"content\"] or \"\"\n",
" full_text += content\n",
" update_display(Markdown(full_text), display_id=display_handle.display_id)\n",
"\n",
"# Call the functions\n",
"ask_openai()\n",
"ask_ollama()\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}