Merge branch 'ed-donner:main' into main
This commit is contained in:
@@ -0,0 +1,240 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9964872b-225d-4ced-93e4-fc5b279ec2ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Webpage English summarizer with user inputs (url, ollama-based LLM) "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4e49d399-d18c-4c91-8abc-cf3289e11e2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"# from dotenv import load_dotenv\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import ollama, time\n",
|
||||
"from tqdm import tqdm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "46e7d809-248d-41b8-80e1-36b210041581",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define system prompt.\n",
|
||||
"\n",
|
||||
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
|
||||
"and provides a detailed summary, ignoring text that might be navigation related. \\\n",
|
||||
"Respond in markdown, in English.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e8bf237f-591f-4c32-9415-5d5d4e2522b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A function that writes a User Prompt that asks for summaries of websites:\n",
|
||||
"\n",
|
||||
"def user_prompt_for(website):\n",
|
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
|
||||
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
|
||||
"please provide a detailed summary of this website in markdown. \\\n",
|
||||
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
|
||||
" user_prompt += website.text\n",
|
||||
" return user_prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7d39ee6d-c670-41ba-a0b8-debd55bda8e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# See how this function creates exactly the format above\n",
|
||||
"\n",
|
||||
"def messages_for(website):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "43e28ff5-2def-4a47-acdd-2e06c0666956",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Constants\n",
|
||||
"\n",
|
||||
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
|
||||
"HEADERS = {\"Content-Type\": \"application/json\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "32f4f481-81a3-479d-817b-4e754d9af46d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A class to represent a Webpage\n",
|
||||
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
|
||||
"\n",
|
||||
"# Some websites need you to use proper headers when fetching them:\n",
|
||||
"headers = HEADERS\n",
|
||||
"\n",
|
||||
"class Website:\n",
|
||||
"\n",
|
||||
" def __init__(self, url):\n",
|
||||
" \"\"\"\n",
|
||||
" Create this Website object from the given url using the BeautifulSoup library\n",
|
||||
" \"\"\"\n",
|
||||
" self.url = url\n",
|
||||
" response = requests.get(url, headers=headers)\n",
|
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||
" irrelevant.decompose()\n",
|
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f81cfd17-8208-4192-a59f-485ff3ea74e4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# And now: call the ollama API wrapper and return the relevant component of the response\n",
|
||||
"\n",
|
||||
"def summarize(url):\n",
|
||||
" website = Website(url)\n",
|
||||
" response = ollama.chat(\n",
|
||||
" model=MODEL,\n",
|
||||
" messages = messages_for(website)\n",
|
||||
" )\n",
|
||||
" return response['message']['content']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7a9eedc6-2183-473d-84ca-b10d40e2a1e6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Ask the user the name of the url address\n",
|
||||
"\n",
|
||||
"url= str(input(\"\"\"\n",
|
||||
"Please provide a valid url address:\n",
|
||||
"https://\"\"\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5d012de2-0ef2-43db-9f51-fc7f989c3642",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Ask the user to select a valid model\n",
|
||||
"\n",
|
||||
"MODEL= str(input(\"\"\"\n",
|
||||
"Please select a LLM:\n",
|
||||
"(examples: llama3.2, deepseek-r1:1.5b)\n",
|
||||
"\"\"\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1ac8c02e-4a62-448b-a231-8c6f65891811",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Let's just make sure the model is loaded\n",
|
||||
"\n",
|
||||
"!ollama pull {MODEL}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0544541f-11a8-4eb7-8eb6-bc032ed6d0d1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print('url: https://{0}\\nModel= {1}'.format(url, MODEL))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "45518950-f2c9-43af-b897-4fe8fe48dfd8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"summary = summarize('https://'+ url)\n",
|
||||
"for summ in tqdm(summary):\n",
|
||||
" time.sleep(0.01)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "02c0c15e-216d-47c7-843d-ac27af02820b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display(Markdown(summary))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "985a3689-5827-4b15-b8d5-276f9b292afd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
269
week1/community-contributions/W1D5_Code_instructor.ipynb
Normal file
269
week1/community-contributions/W1D5_Code_instructor.ipynb
Normal file
@@ -0,0 +1,269 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0e5dc476-e3c9-49bd-934a-35dbe0d55b13",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# End of week 1 exercise (with user input(question, model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "353fba18-a9b4-4ba8-be7e-f3e3c37521ff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display, update_display\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "be2b859d-b3d2-41f7-8666-28ecde26e3b8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# set up environment and constants\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
|
||||
" print(\"API key looks good so far\")\n",
|
||||
"else:\n",
|
||||
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c1b2b694-11a1-4d2a-8e34-d1fb02617fa3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_prompt = \"You are an expert coder with educational skills for beginners. \\\n",
|
||||
"You are able to explain, debbug or generate code in Python, R or bash, and to provide examples of use case if applicable. \\\n",
|
||||
"Please add references to relevant sources if available. If not, do not invent.\\n\"\n",
|
||||
"system_prompt += \"this is an example of a response:\"\n",
|
||||
"system_prompt += \"\"\"\n",
|
||||
"Sure! Here’s the explanation in plain text format, suitable for Markdown:\n",
|
||||
"\n",
|
||||
"# Explanation of the Code\n",
|
||||
"\n",
|
||||
"### Code:\n",
|
||||
"```python\n",
|
||||
"full_name = lambda first, last: f'Full name: {first.title()} {last.title()}'\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### Explanation:\n",
|
||||
"\n",
|
||||
"1. **Lambda Function:**\n",
|
||||
" - The keyword `lambda` is used to create a small, one-line anonymous function (a function without a name).\n",
|
||||
" - It takes two parameters: `first` (for the first name) and `last` (for the last name).\n",
|
||||
"\n",
|
||||
"2. **String Formatting (`f-string`):**\n",
|
||||
" - `f'Full name: {first.title()} {last.title()}'` is a formatted string (f-string).\n",
|
||||
" - It inserts the values of `first` and `last` into the string while applying `.title()` to capitalize the first letter of each name.\n",
|
||||
"\n",
|
||||
"3. **Assigning the Function:**\n",
|
||||
" - The lambda function is assigned to the variable `full_name`, so we can use `full_name()` like a regular function.\n",
|
||||
"\n",
|
||||
"### How to Use It:\n",
|
||||
"Now, let’s call this function and see what it does.\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"print(full_name(\"john\", \"doe\"))\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Output:\n",
|
||||
"```\n",
|
||||
"Full name: John Doe\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### What Happens:\n",
|
||||
"- `\"john\"` becomes `\"John\"` (because `.title()` capitalizes the first letter).\n",
|
||||
"- `\"doe\"` becomes `\"Doe\"`.\n",
|
||||
"- The output is `\"Full name: John Doe\"`.\n",
|
||||
"\n",
|
||||
"### Summary:\n",
|
||||
"This is a simple way to create a function that formats a full name while ensuring proper capitalization. You could write the same function using `def` like this:\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"def full_name(first, last):\n",
|
||||
" return f'Full name: {first.title()} {last.title()}'\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Both versions work the same way, but the `lambda` version is more compact.\n",
|
||||
"\n",
|
||||
"### Reference(s):\n",
|
||||
"To deepen your understanding of the code snippet involving Python's lambda functions here is a resource you might find helpful:\n",
|
||||
"\n",
|
||||
"Ref. **Python Lambda Functions:**\n",
|
||||
" - The official Python documentation provides an in-depth explanation of lambda expressions, including their syntax and use cases.\n",
|
||||
" - [Lambda Expressions](https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions)\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"You can copy and paste this into any Markdown file or viewer. Let me know if you need further modifications! 😊\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f7225ab0-5ade-4c93-839c-3c80b0b23c37",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# display(Markdown(system_prompt))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "07fa2506-4b24-4a53-9f3f-500b4cbcb10a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# user question\n",
|
||||
"default_question= \"\"\"\n",
|
||||
"Please explain what this code does and why:\n",
|
||||
"yield from {book.get('author') from book in books if book.get('author')}\n",
|
||||
"\"\"\"\n",
|
||||
"user_question= str(input(\"What code do you want me to explain?/n(Press 'Enter' for an example)\"))\n",
|
||||
"\n",
|
||||
"if user_question== '':\n",
|
||||
" question= default_question\n",
|
||||
" print(default_question)\n",
|
||||
"else:\n",
|
||||
" question= \"Please explain what this code does and why:\\n\" + user_question"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a6749065-fb8a-4f9f-8297-3cd33abd97bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f48df06c-edb7-4a05-9e56-910854dad0c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# user model\n",
|
||||
"model_number= input(\"\"\"\n",
|
||||
"Please enter the number of the model you want to use from the list below:\n",
|
||||
"1 GPT-4o Mini\n",
|
||||
"2 Llama 3.2\n",
|
||||
"3 DeepSeek R1\n",
|
||||
"4 Qwen 2.5\n",
|
||||
"\"\"\")\n",
|
||||
"try:\n",
|
||||
" if int(model_number)==1:\n",
|
||||
" model= 'gpt-4o-mini'\n",
|
||||
" elif int(model_number)==2:\n",
|
||||
" model= 'llama3.2'\n",
|
||||
" elif int(model_number)==3:\n",
|
||||
" model= 'deepseek-r1:1.5b'\n",
|
||||
" elif int(model_number)==4:\n",
|
||||
" model= 'qwen2.5:3b'\n",
|
||||
" else:\n",
|
||||
" model= ''\n",
|
||||
" print(\"please provide only a number from the list\")\n",
|
||||
"except:\n",
|
||||
" model=''\n",
|
||||
" print(\"Please provide a number or press 'Enter' to finish\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aeb6e4e5-fb63-4192-bb74-0b015dfedfb7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# print(model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fffa6021-d3f8-4855-a694-bed6d651791f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": question}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "835374a4-3df5-4f28-82e3-6bc70514df16",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if int(model_number)==1:\n",
|
||||
" openai= OpenAI()\n",
|
||||
" stream = openai.chat.completions.create(\n",
|
||||
" model=model,\n",
|
||||
" messages=messages,\n",
|
||||
" stream= True\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" response = \"\"\n",
|
||||
" print(\"The following answer will be generated by {0} LLM\".format(model))\n",
|
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
||||
" for chunk in stream:\n",
|
||||
" response += chunk.choices[0].delta.content or ''\n",
|
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
|
||||
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
|
||||
"elif int(model_number)==2 or 3 or 4:\n",
|
||||
" !ollama pull {model}\n",
|
||||
" print(\"\\n\\nThe following answer will be generated by {0} LLM\\n\\n\".format(model))\n",
|
||||
" response = ollama.chat(\n",
|
||||
" model=model,\n",
|
||||
" messages = messages)\n",
|
||||
" result= response['message']['content']\n",
|
||||
" display(Markdown(result))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
273
week1/community-contributions/Week1-Day2-Ollama-Exercise.ipynb
Normal file
273
week1/community-contributions/Week1-Day2-Ollama-Exercise.ipynb
Normal file
@@ -0,0 +1,273 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fad31e32-2e42-42ae-ae63-c15d90292839",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# First Project\n",
|
||||
"Ollama -> Summary\n",
|
||||
"huggingface_hub -> \"facebook/m2m100_418M\" for translation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5fb79a20-a455-4d27-91a1-91958af786c1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install transformers datasets torch\n",
|
||||
"!pip install huggingface_hub"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e95ac7f2-5192-4f83-acf3-61df30cd3109",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"import requests\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"import json\n",
|
||||
"import ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "12276d74-0e79-4e66-9135-1c9d1a80b943",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Website:\n",
|
||||
" def __init__(self, url):\n",
|
||||
" self.url = url\n",
|
||||
" response = requests.get(url)\n",
|
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||
" irrelevant.decompose()\n",
|
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
|
||||
"\n",
|
||||
"huggingface_url = \"https://huggingface.co/learn/ml-for-3d-course\"\n",
|
||||
"huggingface_website = Website(huggingface_url)\n",
|
||||
"\n",
|
||||
"huggingface_data = {\n",
|
||||
" \"title\": huggingface_website.title,\n",
|
||||
" \"text\": huggingface_website.text\n",
|
||||
"}\n",
|
||||
"print(huggingface_data)\n",
|
||||
"\n",
|
||||
"with open('ml_for_3d_course_data.json', 'w') as f:\n",
|
||||
" json.dump(huggingface_data, f)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7d74c85c-3e09-4514-bde4-4cafc4910c52",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# huggingface_data 'text' value\n",
|
||||
"huggingface_text = huggingface_data['text']\n",
|
||||
"\n",
|
||||
"# Summary\n",
|
||||
"response_summary = ollama.chat(model=\"llama3.2:latest\", messages=[{\"role\": \"user\", \"content\": f\"Summarize the following text: {huggingface_text}\"}])\n",
|
||||
"print(response_summary)\n",
|
||||
"\n",
|
||||
"# print summary\n",
|
||||
"summary_huggingface_text = response_summary.message['content']\n",
|
||||
"print(\"Summary Text:\", summary_huggingface_text)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d13764d5-cb76-46c5-bbe6-d132b31a9ea6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# HuggingFace Translation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "08405038-4115-487f-9efc-de58572453c1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Website:\n",
|
||||
" url: str\n",
|
||||
" title: str\n",
|
||||
" text: str\n",
|
||||
"\n",
|
||||
" def __init__(self, url):\n",
|
||||
" self.url = url\n",
|
||||
" response = requests.get(url)\n",
|
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||
" irrelevant.decompose()\n",
|
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
|
||||
"\n",
|
||||
"url = \"https://huggingface.co/learn/ml-for-3d-course\"\n",
|
||||
"website = Website(url)\n",
|
||||
"print(website.title) \n",
|
||||
"print(website.text[:1000])\n",
|
||||
"\n",
|
||||
"data = {\n",
|
||||
" \"title\": website.title,\n",
|
||||
" \"text\": website.text\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"with open('ml_for_3d_course_data.json', 'w') as f:\n",
|
||||
" json.dump(data, f)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0632352f-4b16-4125-83bf-f3cc3aabd659",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a85f8625-725d-4d7f-8cb7-8da4276f81cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install sacremoses"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c800cea4-f4a4-4e41-9637-31ff11afb256",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer\n",
|
||||
"\n",
|
||||
"# Load the M2M100 model and tokenizer\n",
|
||||
"model_name = \"facebook/m2m100_418M\"\n",
|
||||
"model = M2M100ForConditionalGeneration.from_pretrained(model_name)\n",
|
||||
"tokenizer = M2M100Tokenizer.from_pretrained(model_name)\n",
|
||||
"\n",
|
||||
"# Load the saved JSON file\n",
|
||||
"with open('ml_for_3d_course_data.json', 'r') as f:\n",
|
||||
" data = json.load(f)\n",
|
||||
"\n",
|
||||
"# Extract text from the loaded data\n",
|
||||
"text = data[\"text\"]\n",
|
||||
"\n",
|
||||
"# Set the source language to English and target language to Korean\n",
|
||||
"source_lang = \"en\"\n",
|
||||
"target_lang = \"ko\"\n",
|
||||
"\n",
|
||||
"# Set the language for tokenizer (important for M2M100)\n",
|
||||
"tokenizer.src_lang = source_lang\n",
|
||||
"tokenizer.tgt_lang = target_lang\n",
|
||||
"\n",
|
||||
"# Split text into smaller chunks if it's too large\n",
|
||||
"# This step ensures we don't exceed the model's maximum length (512 tokens)\n",
|
||||
"max_input_length = 512\n",
|
||||
"chunks = [text[i:i+max_input_length] for i in range(0, len(text), max_input_length)]\n",
|
||||
"\n",
|
||||
"print(chunks)\n",
|
||||
"# Initialize a list to hold the translated text\n",
|
||||
"translated_chunks = []\n",
|
||||
"\n",
|
||||
"# Iterate through each chunk and translate it\n",
|
||||
"for chunk in chunks:\n",
|
||||
" # Tokenize the chunk\n",
|
||||
" encoded = tokenizer(chunk, return_tensors=\"pt\", padding=True, truncation=True, max_length=512)\n",
|
||||
"\n",
|
||||
" # Generate translation from the model, forcing the output to be in Korean\n",
|
||||
" generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(target_lang), max_length=512)\n",
|
||||
"\n",
|
||||
" # Decode the translated tokens to text\n",
|
||||
" translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]\n",
|
||||
" translated_chunks.append(translated_text)\n",
|
||||
"\n",
|
||||
"# Combine all translated chunks back together\n",
|
||||
"final_translated_text = ' '.join(translated_chunks)\n",
|
||||
"print(\"Translated Text:\", final_translated_text)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ffe0f264-a588-422f-a6e1-b60504d1e02c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"# Ollama API URL 설정\n",
|
||||
"ollama_url = \"http://localhost:11411/v1/models/facebook/m2m100_418M/generate\"\n",
|
||||
"\n",
|
||||
"# 저장된 JSON 파일 로드\n",
|
||||
"with open('ml_for_3d_course_data.json', 'r') as f:\n",
|
||||
" data = json.load(f)\n",
|
||||
"\n",
|
||||
"# 텍스트 추출\n",
|
||||
"course_text = data[\"text\"]\n",
|
||||
"\n",
|
||||
"# 번역할 소스 언어 및 타겟 언어 설정\n",
|
||||
"source_language = \"en\"\n",
|
||||
"target_language = \"ko\"\n",
|
||||
"\n",
|
||||
"# 데이터 준비\n",
|
||||
"payload = {\n",
|
||||
" \"input_text\": course_text,\n",
|
||||
" \"src_lang\": source_language,\n",
|
||||
" \"tgt_lang\": target_language\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# API 호출\n",
|
||||
"response = requests.post(ollama_url, json=payload)\n",
|
||||
"\n",
|
||||
"# 응답 확인\n",
|
||||
"if response.status_code == 200:\n",
|
||||
" translated_course_text = response.json().get(\"translated_text\", \"Translation failed\")\n",
|
||||
" print(\"Translated Course Text:\", translated_course_text)\n",
|
||||
"else:\n",
|
||||
" print(f\"Error {response.status_code}: {response.text}\")\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -234,7 +234,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "llms",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -252,5 +252,5 @@
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
"nbformat_minor": 4
|
||||
}
|
||||
|
||||
611
week1/community-contributions/day1-master-chef.ipynb
Normal file
611
week1/community-contributions/day1-master-chef.ipynb
Normal file
@@ -0,0 +1,611 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# YOUR FIRST LAB\n",
|
||||
"### Please read this section. This is valuable to get you prepared, even if it's a long read -- it's important stuff.\n",
|
||||
"\n",
|
||||
"## Your first Frontier LLM Project\n",
|
||||
"\n",
|
||||
"Let's build a useful LLM solution - in a matter of minutes.\n",
|
||||
"\n",
|
||||
"By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n",
|
||||
"\n",
|
||||
"Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n",
|
||||
"\n",
|
||||
"Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n",
|
||||
"\n",
|
||||
"## If you're new to Jupyter Lab\n",
|
||||
"\n",
|
||||
"Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n",
|
||||
"\n",
|
||||
"I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n",
|
||||
"\n",
|
||||
"## If you're new to the Command Line\n",
|
||||
"\n",
|
||||
"Please see these excellent guides: [Command line on PC](https://chatgpt.com/share/67b0acea-ba38-8012-9c34-7a2541052665) and [Command line on Mac](https://chatgpt.com/canvas/shared/67b0b10c93a081918210723867525d2b). \n",
|
||||
"\n",
|
||||
"## If you'd prefer to work in IDEs\n",
|
||||
"\n",
|
||||
"If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n",
|
||||
"If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n",
|
||||
"\n",
|
||||
"## If you'd like to brush up your Python\n",
|
||||
"\n",
|
||||
"I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n",
|
||||
"`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n",
|
||||
"\n",
|
||||
"## I am here to help\n",
|
||||
"\n",
|
||||
"If you have any problems at all, please do reach out. \n",
|
||||
"I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!) \n",
|
||||
"And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done 😂 \n",
|
||||
"\n",
|
||||
"## More troubleshooting\n",
|
||||
"\n",
|
||||
"Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n",
|
||||
"\n",
|
||||
"## If this is old hat!\n",
|
||||
"\n",
|
||||
"If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n",
|
||||
"\n",
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#900;\">Please read - important note</h2>\n",
|
||||
" <span style=\"color:#900;\">The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, <b>after</b> watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>\n",
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#f71;\">Treat these labs as a resource</h2>\n",
|
||||
" <span style=\"color:#f71;\">I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n",
|
||||
" </span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>\n",
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#181;\">Business value of these exercises</h2>\n",
|
||||
" <span style=\"color:#181;\">A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display\n",
|
||||
"from openai import OpenAI\n",
|
||||
"\n",
|
||||
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Connecting to OpenAI\n",
|
||||
"\n",
|
||||
"The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n",
|
||||
"\n",
|
||||
"## Troubleshooting if you have problems:\n",
|
||||
"\n",
|
||||
"Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n",
|
||||
"\n",
|
||||
"If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n",
|
||||
"\n",
|
||||
"Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n",
|
||||
"\n",
|
||||
"Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load environment variables in a file called .env\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"# Check the key\n",
|
||||
"\n",
|
||||
"if not api_key:\n",
|
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
|
||||
"elif not api_key.startswith(\"sk-proj-\"):\n",
|
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
|
||||
"elif api_key.strip() != api_key:\n",
|
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
|
||||
"else:\n",
|
||||
" print(\"API key found and looks good so far!\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"openai = OpenAI()\n",
|
||||
"\n",
|
||||
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
|
||||
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Let's make a quick call to a Frontier model to get started, as a preview!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n",
|
||||
"\n",
|
||||
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n",
|
||||
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n",
|
||||
"print(response.choices[0].message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2aa190e5-cb31-456a-96cc-db109919cd78",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## OK onwards with our first project"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c5e793b2-6775-426a-a139-4848291d0463",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A class to represent a Webpage\n",
|
||||
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
|
||||
"\n",
|
||||
"# Some websites need you to use proper headers when fetching them:\n",
|
||||
"headers = {\n",
|
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"class Website:\n",
|
||||
"\n",
|
||||
" def __init__(self, url):\n",
|
||||
" \"\"\"\n",
|
||||
" Create this Website object from the given url using the BeautifulSoup library\n",
|
||||
" \"\"\"\n",
|
||||
" self.url = url\n",
|
||||
" response = requests.get(url, headers=headers)\n",
|
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||
" irrelevant.decompose()\n",
|
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Let's try one out. Change the website and add print statements to follow along.\n",
|
||||
"\n",
|
||||
"ed = Website(\"https://edwarddonner.com\")\n",
|
||||
"print(ed.title)\n",
|
||||
"print(ed.text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Types of prompts\n",
|
||||
"\n",
|
||||
"You may know this already - but if not, you will get very familiar with it!\n",
|
||||
"\n",
|
||||
"Models like GPT4o have been trained to receive instructions in a particular way.\n",
|
||||
"\n",
|
||||
"They expect to receive:\n",
|
||||
"\n",
|
||||
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
|
||||
"\n",
|
||||
"**A user prompt** -- the conversation starter that they should reply to"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
|
||||
"\n",
|
||||
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
|
||||
"and provides a short summary, ignoring text that might be navigation related. \\\n",
|
||||
"Respond in markdown.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A function that writes a User Prompt that asks for summaries of websites:\n",
|
||||
"\n",
|
||||
"def user_prompt_for(website):\n",
|
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
|
||||
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
|
||||
"please provide a short summary of this website in markdown. \\\n",
|
||||
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
|
||||
" user_prompt += website.text\n",
|
||||
" return user_prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(user_prompt_for(ed))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Messages\n",
|
||||
"\n",
|
||||
"The API from OpenAI expects to receive messages in a particular structure.\n",
|
||||
"Many of the other APIs share this structure:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"[\n",
|
||||
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n",
|
||||
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
|
||||
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# To give you a preview -- calling OpenAI with system and user messages:\n",
|
||||
"\n",
|
||||
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
|
||||
"print(response.choices[0].message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## And now let's build useful messages for GPT-4o-mini, using a function"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# See how this function creates exactly the format above\n",
|
||||
"\n",
|
||||
"def messages_for(website):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Try this out, and then try for a few more websites\n",
|
||||
"\n",
|
||||
"messages_for(ed)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Time to bring it together - the API for OpenAI is very simple!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# And now: call the OpenAI API. You will get very familiar with this!\n",
|
||||
"\n",
|
||||
"def summarize(url):\n",
|
||||
" website = Website(url)\n",
|
||||
" response = openai.chat.completions.create(\n",
|
||||
" model = \"gpt-4o-mini\",\n",
|
||||
" messages = messages_for(website)\n",
|
||||
" )\n",
|
||||
" return response.choices[0].message.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"summarize(\"https://edwarddonner.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A function to display this nicely in the Jupyter output, using markdown\n",
|
||||
"\n",
|
||||
"def display_summary(url):\n",
|
||||
" summary = summarize(url)\n",
|
||||
" display(Markdown(summary))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display_summary(\"https://edwarddonner.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Let's try more websites\n",
|
||||
"\n",
|
||||
"Note that this will only work on websites that can be scraped using this simplistic approach.\n",
|
||||
"\n",
|
||||
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n",
|
||||
"\n",
|
||||
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n",
|
||||
"\n",
|
||||
"But many websites will work just fine!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "45d83403-a24c-44b5-84ac-961449b4008f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display_summary(\"https://cnn.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "75e9fd40-b354-4341-991e-863ef2e59db7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display_summary(\"https://anthropic.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#181;\">Business applications</h2>\n",
|
||||
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n",
|
||||
"\n",
|
||||
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>\n",
|
||||
"\n",
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n",
|
||||
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Step 1: Create your prompts\n",
|
||||
"\n",
|
||||
"system_prompt = \"You are an head chef of a michelin star restaurant who has a diverse skillset \\\n",
|
||||
"and loves to teach new and interesting recepies for homechefs. Given input of several ingredients \\\n",
|
||||
"provide step by step instruction of what could be cooked for any cuisine of your choice. Respond in markdown.\"\n",
|
||||
"\n",
|
||||
"user_prompt = \"\"\"\n",
|
||||
"You are a Michelin-starred head chef with a passion for teaching home chefs. \n",
|
||||
"I have the following ingredients: \n",
|
||||
"\n",
|
||||
"**[Chicken breast, Bell peppers, cherry tomatoes, spinach, Basmati rice,\n",
|
||||
"Garlic, basil, black pepper, smoked paprika]** \n",
|
||||
"\n",
|
||||
"Can you provide a step-by-step recipe using these ingredients? You can choose any cuisine that best fits them. \n",
|
||||
"Please include cooking times, techniques, and any chef tips for enhancing flavors. \n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"# Step 2: Make the messages list\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt}\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
"# Step 3: Call OpenAI\n",
|
||||
"\n",
|
||||
"response = openai.chat.completions.create(\n",
|
||||
" model = \"gpt-4o-mini\",\n",
|
||||
" messages = messages\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Step 4: print the result\n",
|
||||
"def display_summary(summary):\n",
|
||||
" display(Markdown(summary))\n",
|
||||
"display_summary(response.choices[0].message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## An extra exercise for those who enjoy web scraping\n",
|
||||
"\n",
|
||||
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Sharing your code\n",
|
||||
"\n",
|
||||
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n",
|
||||
"\n",
|
||||
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n",
|
||||
"\n",
|
||||
"Here are good instructions courtesy of an AI friend: \n",
|
||||
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f4484fcf-8b39-4c3f-9674-37970ed71988",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,152 @@
|
||||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# Getting MOM from call transcripts"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "99Z21wE7xpKS"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"Import necessary libraries"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "YZMeexE8M_Pp"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display\n",
|
||||
"from openai import OpenAI\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "u5DCVg0Mxj5T"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "i0V11JQ2az-C"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load environment variables in a file called .env\n",
|
||||
"\n",
|
||||
"#The below code can be uncommented in using .env file\n",
|
||||
"\n",
|
||||
"#from dotenv import load_dotenv\n",
|
||||
"#load_dotenv(override=True)\n",
|
||||
"#api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"#I am using google colab to import api_key\n",
|
||||
"from google.colab import userdata\n",
|
||||
"api_key=userdata.get('gemini_api')\n",
|
||||
"\n",
|
||||
"# Check the key\n",
|
||||
"if not api_key:\n",
|
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
|
||||
"elif not api_key.startswith(\"sk-proj-\"):\n",
|
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
|
||||
"elif api_key.strip() != api_key:\n",
|
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
|
||||
"else:\n",
|
||||
" print(\"API key found and looks good so far!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# A class to represet Transcript\n",
|
||||
"from pathlib import Path\n",
|
||||
"class Transcript:\n",
|
||||
" def __init__(self, file_path):\n",
|
||||
" self.file_path=file_path\n",
|
||||
" self.content=Path(file_path).read_text(encoding='utf-8')\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "j6UTsnTEyWZ-"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"# Source of the text file -\"https://raw.githubusercontent.com/GeminiLn/EarningsCall_Dataset/refs/heads/master/3M%20Company_20170425/Text.txt\"\n",
|
||||
"path = '/content/Text.txt' # Specify the path of file you want to use - format should be .txt\n",
|
||||
"t=Transcript(path)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "hquePU_mzZ7s"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"\n",
|
||||
"system_prompt = \"You are expert at taking Meeting Notes & given the below transcript , create an MOM (Minutes of meeting)\""
|
||||
],
|
||||
"metadata": {
|
||||
"id": "ex5DB7M8L7KT"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from google import genai\n",
|
||||
"from google.genai import types\n",
|
||||
"\n",
|
||||
"client = genai.Client(api_key=api_key)\n",
|
||||
"\n",
|
||||
"response = client.models.generate_content(\n",
|
||||
" model=\"gemini-2.0-flash\",\n",
|
||||
" config=types.GenerateContentConfig(\n",
|
||||
" system_instruction=system_prompt,\n",
|
||||
" max_output_tokens=500,\n",
|
||||
" temperature=0.1\n",
|
||||
" ),\n",
|
||||
" contents=t.content,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(response.text)"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "wcpJ34qfMKmV"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
}
|
||||
]
|
||||
}
|
||||
87
week1/community-contributions/day1_michelin_start_cook.ipynb
Normal file
87
week1/community-contributions/day1_michelin_start_cook.ipynb
Normal file
@@ -0,0 +1,87 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "44aba2a0-c6eb-4fc1-a5cc-0a8f8679dbb8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Michelin-star cook..."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d4d58124-5e9a-4f5a-9e0a-ff74f43896a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display\n",
|
||||
"from openai import OpenAI\n",
|
||||
"\n",
|
||||
"# Load environment variables in a file called .env\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"openai = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "67dc3099-2ccc-4ee8-8ff2-0dbbe4ae2fcb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_prompt = \"You are a professional chef in a Michelin-star restaurant. You will help me cook restaurant-style dishes using the ingredients I have left in my refrigerator.\\\n",
|
||||
"You will provide detailed instructions with precise times and measurements in grams and include calorie information for raw ingredients, not cooked ones.\\\n",
|
||||
"Add the caloric information at the end. Your responses should be formatted in Markdown.\"\n",
|
||||
"\n",
|
||||
"user_prompt = \"\"\"\n",
|
||||
"Help me with a recipe using the ingredients I have left in the refrigerator. I have spinach, eggs, pasta, rice, chicken, beef, carrots, potatoes, butter, milk, cheese, tomatoes, red peppers, and all spices in the pantry.\\n\\n\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"messages = [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt},\n",
|
||||
"]\n",
|
||||
" \n",
|
||||
"response = openai.chat.completions.create(\n",
|
||||
" model = \"gpt-4o-mini\",\n",
|
||||
" messages = messages\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# Step 4: print the result in markdown format\n",
|
||||
"pretty_response = Markdown(response.choices[0].message.content)\n",
|
||||
"display(pretty_response)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,127 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0a512c2a-55e7-40e1-ab17-88b7034ca09a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Imports\n",
|
||||
"import openai\n",
|
||||
"import os\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"from IPython.display import Markdown, display"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1aa8dd82-6b5e-4dbd-a2ee-8367e796a51f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"load_dotenv(override=True)\n",
|
||||
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"# Check the key\n",
|
||||
"\n",
|
||||
"if not api_key:\n",
|
||||
" print(\"No API key was found - head over to the troubleshooting notebook!\")\n",
|
||||
"elif not api_key.startswith(\"sk-proj-\"):\n",
|
||||
" print(\"An API key was found, but it doesn't start sk-proj... make sure you using the right key (Check troubleshooting notebook)\")\n",
|
||||
"elif api_key.strip() != api_key:\n",
|
||||
" print(\"An API key was found, but it looks like white space was found in beginning or end. (Check troubleshooting notebook)\")\n",
|
||||
"else:\n",
|
||||
" print(\"API key found and looks good so far!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2acd579b-846c-4aa6-ba6c-1cc1a5a2eeb6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Input the system prompt\n",
|
||||
"system_prompt = \"\"\"you are top notched AI music expert that have knowledge of all genres, songs, and artists. You need to google search lyrics. You have the following rules:\\\n",
|
||||
"1. Carefully break down what type of recommendation the user wants and the context.\\\n",
|
||||
"2. If asked to recommend genres similar to a song or artists please identify the top 3 genres.\\\n",
|
||||
"3. If asked to recommend artists from songs or genres then recommend the top 5 artists.\n",
|
||||
"4. If asked to recommend songs from genres or artist than recommend the top 10 songs.\n",
|
||||
"5. If asked for a general recommendation give them the top 5 songs based off of context.\\\n",
|
||||
"6. Be flexible and adaptable with recommendations and consider the context the user might ask.\n",
|
||||
"7. always respond in markdown.\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3c1cf212-538c-4e9a-8da5-337bd7b6197c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# music recommender function\n",
|
||||
"def music_recommender(user_prompt):\n",
|
||||
" messages = [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt}\n",
|
||||
" ]\n",
|
||||
" \n",
|
||||
" response = openai.chat.completions.create(\n",
|
||||
" model=\"gpt-4\",\n",
|
||||
" messages=messages,\n",
|
||||
" max_tokens=300\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
" return response.choices[0].message.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4f277561-af8b-4715-90e7-6ebaadeb15d0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# User prompt (Change this to fit your needs!)\n",
|
||||
"user_prompt = \"Can you recommend me songs from Taylor Swift\"\n",
|
||||
"\n",
|
||||
"# Example usage\n",
|
||||
"response = music_recommender(user_prompt)\n",
|
||||
"display(Markdown(response))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bb869d36-de14-4e46-9087-223d6b257efa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
444
week1/community-contributions/gradio_testcase_automation.ipynb
Normal file
444
week1/community-contributions/gradio_testcase_automation.ipynb
Normal file
@@ -0,0 +1,444 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "it1JLoxrSqO1",
|
||||
"metadata": {
|
||||
"id": "it1JLoxrSqO1"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install openai python-docx python-dotenv gradio openpyxl"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "950a084a-7f92-4669-af62-f07cb121da56",
|
||||
"metadata": {
|
||||
"id": "950a084a-7f92-4669-af62-f07cb121da56"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"#from IPython.display import Markdown, display, update_display\n",
|
||||
"from openai import OpenAI\n",
|
||||
"from docx import Document"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d0548135-ef16-4102-a55a-cea888a51c29",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"import re\n",
|
||||
"import gradio as gr"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d",
|
||||
"metadata": {
|
||||
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class ReqDoc:\n",
|
||||
" def __init__(self, file_path):\n",
|
||||
" self.file_path = file_path\n",
|
||||
"\n",
|
||||
" def extract(self):\n",
|
||||
" \"\"\"\n",
|
||||
" Reads the content of a .docx file and returns the paragraphs as a list of strings.\n",
|
||||
" \"\"\"\n",
|
||||
" try:\n",
|
||||
" # Check if the file exists\n",
|
||||
" if not os.path.exists(self.file_path):\n",
|
||||
" raise FileNotFoundError(f\"The file {self.file_path} was not found.\")\n",
|
||||
"\n",
|
||||
" # Attempt to open and read the document\n",
|
||||
" doc = Document(self.file_path)\n",
|
||||
" text = \"\\n\".join([paragraph.text for paragraph in doc.paragraphs])\n",
|
||||
" return text\n",
|
||||
"\n",
|
||||
" except FileNotFoundError as fnf_error:\n",
|
||||
" print(fnf_error)\n",
|
||||
" return None\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"An error occurred: {e}\")\n",
|
||||
" return None\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9",
|
||||
"metadata": {
|
||||
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize and constants\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"if api_key and api_key.startswith('sk-proj') and len(api_key)>10:\n",
|
||||
" print(\"API key looks good!\")\n",
|
||||
"else:\n",
|
||||
" print(\"There might be a problem with your API key. Please check!\")\n",
|
||||
" \n",
|
||||
"MODEL = 'gpt-4o-mini'\n",
|
||||
"openai = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb",
|
||||
"metadata": {
|
||||
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Set up system prompt for extracting just the requirements from the document\n",
|
||||
"\n",
|
||||
"req_doc_system_prompt = \"You are provided with a complete requirements specifications document. \\\n",
|
||||
"You are able to decide which content from that document are related to actual requirements, identify each requirement as \\\n",
|
||||
"functional or non-functional and list them all.\\n\"\n",
|
||||
"req_doc_system_prompt += \"If the document is empty or do not contain requirements or if you cannot extract them, please respond as such.\\\n",
|
||||
"Do not make up your own requirements. \\n\"\n",
|
||||
"req_doc_system_prompt += \"You should respond in JSON as in this example:\"\n",
|
||||
"req_doc_system_prompt += \"\"\"\n",
|
||||
"{\n",
|
||||
" \"requirements\": [\n",
|
||||
" {\"RequirementNo\": \"FR-01\", \"Requirement Description\": \"description of this functional requirement goes here\"},\n",
|
||||
" {\"RequirementNo\": \"FR-02\": \"Requirement Description\": \"description of this functional requirement goes here\"},\n",
|
||||
" {\"RequirementNo\": \"NFR-01\": \"Requirement Description\": \"description of this non-functional requirement goes here\"},\n",
|
||||
" {\"RequirementNo\": \"NFR-02\": \"Requirement Description\": \"description of this non-functional requirement goes here\"}\n",
|
||||
" ]\n",
|
||||
"}\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "20460e45-c1b7-4dc4-ab07-932235c19895",
|
||||
"metadata": {
|
||||
"id": "20460e45-c1b7-4dc4-ab07-932235c19895"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Set up user prompt, sending in the requirements doc as input and calling the ReqDoc.extract function. Key to note here is the explicit instructions to\n",
|
||||
"#respond in JSON format.\n",
|
||||
"\n",
|
||||
"def req_doc_user_prompt(doc):\n",
|
||||
" user_prompt = \"Here is the contents from a requirement document.\\n\"\n",
|
||||
" user_prompt += f\"{doc.extract()} \\n\"\n",
|
||||
" user_prompt += \"Please scan through the document and extract only the actual requirements. For example, ignore sections or \\\n",
|
||||
"paragraphs such as Approvers, table of contents and similar sections which are not really requirements.\\\n",
|
||||
"You must respond in a JSON format\"\n",
|
||||
" user_prompt += \"If the content is empty, respond that there are no valid requirements you could extract and ask for a proper document.\\n\"\n",
|
||||
" user_prompt = user_prompt[:25_000] # Truncate if more than 25,000 characters\n",
|
||||
" return user_prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891",
|
||||
"metadata": {
|
||||
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Function to call chatgpt-4o-mini model with the user and system prompts set above and returning the json formatted result obtained from chatgpt\n",
|
||||
"def get_requirements(doc):\n",
|
||||
" reqdoc = ReqDoc(doc)\n",
|
||||
" response = openai.chat.completions.create(\n",
|
||||
" model=MODEL,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": req_doc_system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": req_doc_user_prompt(reqdoc)}\n",
|
||||
" ],\n",
|
||||
" response_format={\"type\": \"json_object\"}\n",
|
||||
" )\n",
|
||||
" result = response.choices[0].message.content\n",
|
||||
" return json.loads(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e",
|
||||
"metadata": {
|
||||
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Uncomment and run this if you want to see the extracted requriements in json format.\n",
|
||||
"#get_requirements(\"reqdoc.docx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1fe8618c-1dfe-4030-bad8-405731294c93",
|
||||
"metadata": {
|
||||
"id": "1fe8618c-1dfe-4030-bad8-405731294c93"
|
||||
},
|
||||
"source": [
|
||||
"### Next, we will make another call to gpt-4o-mini"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b",
|
||||
"metadata": {
|
||||
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Set up system prompt to ask for test cases in table format\n",
|
||||
"system_prompt = \"You are an assitant that receives a list of functional and non functional requirements in JSON format. You are the expert in generating unit test cases for each requirement. \\\n",
|
||||
"You will create as many different test cases as needed for each requirement and produce a result in a table. Order the table by requirement No. Provide clear details on test case pass criteria. \\\n",
|
||||
"The table will contain the following columns. \\\n",
|
||||
"1.S No\\\n",
|
||||
"2.Requirement No\\\n",
|
||||
"3.Requirement Description\\\n",
|
||||
"4.Test Case ID\\\n",
|
||||
"5.Test case summary\\\n",
|
||||
"6.Test case description\\\n",
|
||||
"7.Success criteria \\n\"\n",
|
||||
"system_prompt += \"If you are provided with an empty list, ask for a proper requirement doc\\n\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5",
|
||||
"metadata": {
|
||||
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Set up user prompt passing in the req doc file. This in turn will call the get_requirements function, which will make a call to chatgpt.\n",
|
||||
"\n",
|
||||
"def get_testcase_user_prompt(reqdoc):\n",
|
||||
" user_prompt = \"You are looking at the following list of requirements. \\n\"\n",
|
||||
" user_prompt += f\"{get_requirements(reqdoc)}\\n\"\n",
|
||||
" user_prompt += \"Prepare unit test cases for each of these requirements in a table and send that table as response. \\n\"\n",
|
||||
" user_prompt += user_prompt[:25000]\n",
|
||||
" return user_prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5b2a2b46-9d9c-416c-b189-3007b4d26d76",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#This is the 2nd call to chatgpt to get test cases. display(Markdown) will take care of producing a neatly formatted table output.\n",
|
||||
"def create_testcase_doc_gradio(response, is_response_ready, is_cleared, file_input):\n",
|
||||
" if is_cleared or file_input == None: # Prevent OpenAI call if \"Clear\" was clicked\n",
|
||||
" return \"\", False\n",
|
||||
" stream = openai.chat.completions.create(\n",
|
||||
" model=MODEL,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": get_testcase_user_prompt(file_input)}\n",
|
||||
" ],\n",
|
||||
" stream=True\n",
|
||||
" )\n",
|
||||
" #Modified for Gradio\n",
|
||||
" result = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" result += chunk.choices[0].delta.content or \"\"\n",
|
||||
" #print(result)\n",
|
||||
" yield result, False"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2bb96a11-063e-4b20-9880-71fa9ea4d3f7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define this variable and then pass js=force_dark_mode when creating the Interface\n",
|
||||
"force_dark_mode = \"\"\"\n",
|
||||
"function refresh() {\n",
|
||||
" const url = new URL(window.location);\n",
|
||||
" if (url.searchParams.get('__theme') !== 'dark') {\n",
|
||||
" url.searchParams.set('__theme', 'dark');\n",
|
||||
" window.location.href = url.href;\n",
|
||||
" }\n",
|
||||
"}\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5c81c766-9613-4614-b88d-410654672b89",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def show_or_hide_save_button(response, is_response_ready, is_cleared):\n",
|
||||
" if is_cleared or response == None:\n",
|
||||
" return \"\", False\n",
|
||||
" table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n",
|
||||
" table_match = re.search(table_pattern, response)\n",
|
||||
" if table_match:\n",
|
||||
" return response, True #(response, is_response_ready)\n",
|
||||
" else:\n",
|
||||
" return response, False #(response, is_response_ready)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a5f5d8e7-d29c-4f40-8d57-a9911bb7c47e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def extract_table_from_markdown(response):\n",
|
||||
" # Regular expression to match Markdown tables\n",
|
||||
" table_pattern = r\"(\\|.+\\|[\\r\\n]+)+\"\n",
|
||||
" table_match = re.search(table_pattern, response)\n",
|
||||
"\n",
|
||||
" if table_match:\n",
|
||||
" table_data = table_match.group(0)\n",
|
||||
" # Process the table into a format pandas can read\n",
|
||||
" rows = table_data.strip().split(\"\\n\")\n",
|
||||
" data = [row.split(\"|\")[1:-1] for row in rows] # Split columns by '|'\n",
|
||||
"\n",
|
||||
" # Convert to DataFrame\n",
|
||||
" df = pd.DataFrame(data[1:], columns=data[0]) # First row is the header\n",
|
||||
"\n",
|
||||
" # Save to Excel\n",
|
||||
" output_file = \"test_cases.xlsx\"\n",
|
||||
" df.to_excel(output_file, index=False)\n",
|
||||
"\n",
|
||||
" return output_file\n",
|
||||
" else:\n",
|
||||
" return None"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c1380b11-3e28-40de-ab1a-93a5fd73cf81",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def extract_and_save_button(response, is_cleared):\n",
|
||||
" if is_cleared:\n",
|
||||
" return None # Do nothing if the file was cleared\n",
|
||||
" # This function will be triggered when the user clicks \"Save as Excel\"\n",
|
||||
" output_file = extract_table_from_markdown(response)\n",
|
||||
" if output_file:\n",
|
||||
" return output_file\n",
|
||||
" else:\n",
|
||||
" return \"No table found in the provided input.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3a532b42-9f81-4c75-8be4-e40d621a6b35",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Gradio interface\n",
|
||||
"with gr.Blocks(js=force_dark_mode) as demo:\n",
|
||||
" gr.HTML(\"<h2 style='text-align: center; color: white;'>📄 Test case automation</h2>\")\n",
|
||||
" with gr.Row():\n",
|
||||
" file_input = gr.File(label=\"Upload your requirements docx file\", file_types=[\".docx\"])\n",
|
||||
" with gr.Row():\n",
|
||||
" response = gr.Markdown()\n",
|
||||
" # Button to save the table as Excel file (optional)\n",
|
||||
" save_button = gr.Button(\"Download Table as Excel\", visible=False)\n",
|
||||
" file_output = gr.File(label=\"Download Excel File\", visible=False) \n",
|
||||
" # State variable to track if response is ready\n",
|
||||
" is_response_ready = gr.State(False)\n",
|
||||
" with gr.Row():\n",
|
||||
" clear_button = gr.Button(\"Clear\")\n",
|
||||
" # State variable to track if clear button is clicked\n",
|
||||
" is_cleared = gr.State(False)\n",
|
||||
"\n",
|
||||
" # Function to show \"Processing...\" message\n",
|
||||
" def show_processing(is_cleared, file_input):\n",
|
||||
" if is_cleared or file_input==None:\n",
|
||||
" return None, False, is_cleared, file_input # Do nothing if the file was cleared\n",
|
||||
" #return gr.HTML(\"<h6 style='text-align: left; color: #ffffffff;'>⌛ Processing your file... Please wait!</h6>\"), False, is_cleared, file_input\n",
|
||||
" return \"⌛ Processing your file... Please wait!\", False, is_cleared, file_input\n",
|
||||
" \n",
|
||||
" # Trigger response only if the file was uploaded and not cleared\n",
|
||||
" file_input.change(\n",
|
||||
" lambda _: False, # Directly set is_cleared to False\n",
|
||||
" inputs=[file_input],\n",
|
||||
" outputs=[is_cleared]\n",
|
||||
" ).then(\n",
|
||||
" show_processing, inputs=[is_cleared, file_input], outputs=[response, is_response_ready, is_cleared, file_input]\n",
|
||||
" ).then(\n",
|
||||
" create_testcase_doc_gradio, inputs=[response, is_response_ready, is_cleared, file_input], outputs=[response, is_response_ready]\n",
|
||||
" ).then(\n",
|
||||
" show_or_hide_save_button, inputs=[response, is_response_ready, is_cleared], outputs=[response, is_response_ready]\n",
|
||||
" ).then(\n",
|
||||
" lambda _, ready: (gr.update(visible=ready), gr.update(visible=ready)), inputs=[response, is_response_ready], outputs=[save_button,file_output])\n",
|
||||
"\n",
|
||||
" #.then() passes the previous function outputs as inputs to the next function\n",
|
||||
"\n",
|
||||
" # Button action to extract and save table as an Excel file\n",
|
||||
" save_button.click(extract_and_save_button, inputs=[response, is_cleared], outputs=file_output)\n",
|
||||
" \n",
|
||||
" # Clear button resets both file and output while setting is_cleared to True\n",
|
||||
" clear_button.click(lambda: (None, None, None, True), inputs=None, outputs=[file_input, file_output, response, is_cleared]) \n",
|
||||
"\n",
|
||||
"# Launch Gradio app\n",
|
||||
"demo.launch(share=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cd5314b2-ee91-49bd-9d40-558775d44382",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
632
week1/community-contributions/tweet-generate-from-alt-text.ipynb
Normal file
632
week1/community-contributions/tweet-generate-from-alt-text.ipynb
Normal file
@@ -0,0 +1,632 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# YOUR FIRST LAB\n",
|
||||
"## Please read this. This is super-critical to get you prepared; there's no fluff here!\n",
|
||||
"\n",
|
||||
"## Your first Frontier LLM Project\n",
|
||||
"\n",
|
||||
"Let's build a useful LLM solution - in a matter of minutes.\n",
|
||||
"\n",
|
||||
"By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n",
|
||||
"\n",
|
||||
"Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n",
|
||||
"\n",
|
||||
"Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n",
|
||||
"\n",
|
||||
"## If you're new to Jupyter Lab\n",
|
||||
"\n",
|
||||
"Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n",
|
||||
"\n",
|
||||
"I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n",
|
||||
"\n",
|
||||
"## If you're new to the Command Line\n",
|
||||
"\n",
|
||||
"Please see these excellent guides: [Command line on PC](https://chatgpt.com/share/67b0acea-ba38-8012-9c34-7a2541052665) and [Command line on Mac](https://chatgpt.com/canvas/shared/67b0b10c93a081918210723867525d2b). \n",
|
||||
"Linux people, something tells me you could teach _me_ a thing or two about the command line!\n",
|
||||
"\n",
|
||||
"## If you'd prefer to work in IDEs\n",
|
||||
"\n",
|
||||
"If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n",
|
||||
"If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n",
|
||||
"\n",
|
||||
"## If you'd like to brush up your Python\n",
|
||||
"\n",
|
||||
"I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n",
|
||||
"`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n",
|
||||
"\n",
|
||||
"## I am here to help\n",
|
||||
"\n",
|
||||
"If you have any problems at all, please do reach out. \n",
|
||||
"I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!) \n",
|
||||
"And this is new to me, but I'm also trying out X/Twitter at [@edwarddonner](https://x.com/edwarddonner) - if you're on X, please show me how it's done 😂 \n",
|
||||
"\n",
|
||||
"## More troubleshooting\n",
|
||||
"\n",
|
||||
"Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n",
|
||||
"\n",
|
||||
"## If this is old hat!\n",
|
||||
"\n",
|
||||
"If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n",
|
||||
"\n",
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#900;\">Please read - important note</h2>\n",
|
||||
" <span style=\"color:#900;\">The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you carefully execute this yourself, <b>after</b> watching the lecture. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>\n",
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#f71;\">Treat these labs as a resource</h2>\n",
|
||||
" <span style=\"color:#f71;\">I push updates to the code regularly. When people ask questions or have problems, I incorporate it in the code, adding more examples or improved commentary. As a result, you'll notice that the code below isn't identical to the videos. Everything from the videos is here; but in addition, I've added more steps and better explanations, and occasionally added new models like DeepSeek. Consider this like an interactive book that accompanies the lectures.\n",
|
||||
" </span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>\n",
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#181;\">Business value of these exercises</h2>\n",
|
||||
" <span style=\"color:#181;\">A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display\n",
|
||||
"from openai import OpenAI\n",
|
||||
"\n",
|
||||
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Connecting to OpenAI\n",
|
||||
"\n",
|
||||
"The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n",
|
||||
"\n",
|
||||
"## Troubleshooting if you have problems:\n",
|
||||
"\n",
|
||||
"Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n",
|
||||
"\n",
|
||||
"If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n",
|
||||
"\n",
|
||||
"Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n",
|
||||
"\n",
|
||||
"Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load environment variables in a file called .env\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"# Check the key\n",
|
||||
"\n",
|
||||
"if not api_key:\n",
|
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
|
||||
"elif not api_key.startswith(\"sk-proj-\"):\n",
|
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
|
||||
"elif api_key.strip() != api_key:\n",
|
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
|
||||
"else:\n",
|
||||
" print(\"API key found and looks good so far!\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import httpx\n",
|
||||
"openai = OpenAI(http_client=httpx.Client(verify=False))\n",
|
||||
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
|
||||
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Let's make a quick call to a Frontier model to get started, as a preview!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n",
|
||||
"\n",
|
||||
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n",
|
||||
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n",
|
||||
"print(response.choices[0].message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2aa190e5-cb31-456a-96cc-db109919cd78",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## OK onwards with our first project"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c5e793b2-6775-426a-a139-4848291d0463",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A class to represent a Webpage\n",
|
||||
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
|
||||
"\n",
|
||||
"# Some websites need you to use proper headers when fetching them:\n",
|
||||
"headers = {\n",
|
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"class Website:\n",
|
||||
"\n",
|
||||
" def __init__(self, url):\n",
|
||||
" \"\"\"\n",
|
||||
" Create this Website object from the given url using the BeautifulSoup library\n",
|
||||
" \"\"\"\n",
|
||||
" self.url = url\n",
|
||||
" requests.packages.urllib3.disable_warnings()\n",
|
||||
" response = requests.get(url, headers=headers, verify=False)\n",
|
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||
" irrelevant.decompose()\n",
|
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Let's try one out. Change the website and add print statements to follow along.\n",
|
||||
"ed = Website(\"http://edwarddonner.com\")\n",
|
||||
"print(ed.title)\n",
|
||||
"print(ed.text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Types of prompts\n",
|
||||
"\n",
|
||||
"You may know this already - but if not, you will get very familiar with it!\n",
|
||||
"\n",
|
||||
"Models like GPT4o have been trained to receive instructions in a particular way.\n",
|
||||
"\n",
|
||||
"They expect to receive:\n",
|
||||
"\n",
|
||||
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
|
||||
"\n",
|
||||
"**A user prompt** -- the conversation starter that they should reply to"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
|
||||
"\n",
|
||||
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
|
||||
"and provides a short summary, ignoring text that might be navigation related. \\\n",
|
||||
"Respond in markdown.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A function that writes a User Prompt that asks for summaries of websites:\n",
|
||||
"\n",
|
||||
"def user_prompt_for(website):\n",
|
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
|
||||
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
|
||||
"please provide a short summary of this website in markdown. \\\n",
|
||||
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
|
||||
" user_prompt += website.text\n",
|
||||
" return user_prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(user_prompt_for(ed))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Messages\n",
|
||||
"\n",
|
||||
"The API from OpenAI expects to receive messages in a particular structure.\n",
|
||||
"Many of the other APIs share this structure:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"[\n",
|
||||
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n",
|
||||
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"messages = [\n",
|
||||
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
|
||||
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# To give you a preview -- calling OpenAI with system and user messages:\n",
|
||||
"\n",
|
||||
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
|
||||
"print(response.choices[0].message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## And now let's build useful messages for GPT-4o-mini, using a function"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# See how this function creates exactly the format above\n",
|
||||
"\n",
|
||||
"def messages_for(website):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Try this out, and then try for a few more websites\n",
|
||||
"\n",
|
||||
"messages_for(ed)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Time to bring it together - the API for OpenAI is very simple!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# And now: call the OpenAI API. You will get very familiar with this!\n",
|
||||
"\n",
|
||||
"def summarize(url):\n",
|
||||
" website = Website(url)\n",
|
||||
" response = openai.chat.completions.create(\n",
|
||||
" model = \"gpt-4o-mini\",\n",
|
||||
" messages = messages_for(website)\n",
|
||||
" )\n",
|
||||
" return response.choices[0].message.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"summarize(\"https://edwarddonner.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A function to display this nicely in the Jupyter output, using markdown\n",
|
||||
"\n",
|
||||
"def display_summary(url):\n",
|
||||
" summary = summarize(url)\n",
|
||||
" display(Markdown(summary))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display_summary(\"https://edwarddonner.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f8a34db6-9c2f-4f5e-95b4-62090d7b591b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display_summary(\"https://openai.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Let's try more websites\n",
|
||||
"\n",
|
||||
"Note that this will only work on websites that can be scraped using this simplistic approach.\n",
|
||||
"\n",
|
||||
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n",
|
||||
"\n",
|
||||
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n",
|
||||
"\n",
|
||||
"But many websites will work just fine!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "45d83403-a24c-44b5-84ac-961449b4008f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display_summary(\"https://cnn.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "75e9fd40-b354-4341-991e-863ef2e59db7",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"display_summary(\"https://anthropic.com\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#181;\">Business applications</h2>\n",
|
||||
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n",
|
||||
"\n",
|
||||
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>\n",
|
||||
"\n",
|
||||
"<table style=\"margin: 0; text-align: left;\">\n",
|
||||
" <tr>\n",
|
||||
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
|
||||
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
|
||||
" </td>\n",
|
||||
" <td>\n",
|
||||
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n",
|
||||
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n",
|
||||
" </td>\n",
|
||||
" </tr>\n",
|
||||
"</table>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A small exercise to feed the llm with image alt text and return a funny tweet.\n",
|
||||
"\n",
|
||||
"# Step 1: Create your prompts\n",
|
||||
"import json\n",
|
||||
"system_prompt = \"You are a meme lord. You like tweeting funny and hilarious comments on images. To understand the image you would be given alt text on the image.\"\n",
|
||||
"class website:\n",
|
||||
" def __init__(self,url):\n",
|
||||
" self.url = url\n",
|
||||
" requests.packages.urllib3.disable_warnings()\n",
|
||||
" response = requests.get(url, headers=headers, verify=False)\n",
|
||||
" html_content = response.content\n",
|
||||
" soup = BeautifulSoup(html_content, 'html.parser')\n",
|
||||
" image_tags = soup.find_all('img')\n",
|
||||
" self.image_urls = [img['src'] for img in image_tags if img.get('src')]\n",
|
||||
" self.image_alt = [img['alt'] if img.get('alt') else \"\" for img in image_tags]\n",
|
||||
"\n",
|
||||
" # Restricting to 3 images only.\n",
|
||||
" if self.image_urls:\n",
|
||||
" self.images = {self.image_urls[i]:self.image_alt[i] for i in range(4)}\n",
|
||||
" else:\n",
|
||||
" self.images = {}\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"def user_prompt_for(website):\n",
|
||||
" user_prompt = f\"Following are images with their alt-text:\"\n",
|
||||
" user_prompt += json.dumps(website.images)\n",
|
||||
" user_prompt += \"\\n Give me a markdown layout with tables for each image where each image is given its own row, with the image itself on the left and funny tweet on the right.\"\n",
|
||||
" return user_prompt\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Step 2: Make the messages list\n",
|
||||
"page = website(\"https://www.pexels.com/\")\n",
|
||||
"user_prompt = user_prompt_for(page)\n",
|
||||
"messages = [{\"role\":\"system\",\"content\":system_prompt},{\"role\":\"user\", \"content\":user_prompt}] # fill this in\n",
|
||||
"\n",
|
||||
"# Step 3: Call OpenAI\n",
|
||||
"response = openai.chat.completions.create(\n",
|
||||
" model = \"gpt-4o-mini\",\n",
|
||||
" messages = messages\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"# Step 4: print the result\n",
|
||||
"display(Markdown((response.choices[0].message.content)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## An extra exercise for those who enjoy web scraping\n",
|
||||
"\n",
|
||||
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Sharing your code\n",
|
||||
"\n",
|
||||
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n",
|
||||
"\n",
|
||||
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n",
|
||||
"\n",
|
||||
"Here are good instructions courtesy of an AI friend: \n",
|
||||
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f4484fcf-8b39-4c3f-9674-37970ed71988",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
229
week1/community-contributions/website-summarizer-by-tithi.ipynb
Normal file
229
week1/community-contributions/website-summarizer-by-tithi.ipynb
Normal file
@@ -0,0 +1,229 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "29ddd15d-a3c5-4f4e-a678-873f56162724",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display\n",
|
||||
"import ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "479ff514-e8bd-4985-a572-2ea28bb4fa40",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ‹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ™ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ¹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ¸ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ¼ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ´ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ¦ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â § \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â ‡ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest â <C3A2> \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest \u001b[K\n",
|
||||
"pulling 2bada8a74506... 100% ▕████████████████â–<C3A2> 4.7 GB \u001b[K\n",
|
||||
"pulling 66b9ea09bd5b... 100% ▕████████████████â–<C3A2> 68 B \u001b[K\n",
|
||||
"pulling eb4402837c78... 100% ▕████████████████â–<C3A2> 1.5 KB \u001b[K\n",
|
||||
"pulling 832dd9e00a68... 100% ▕████████████████â–<C3A2> 11 KB \u001b[K\n",
|
||||
"pulling 2f15b3218f05... 100% ▕████████████████â–<C3A2> 487 B \u001b[K\n",
|
||||
"verifying sha256 digest \u001b[K\n",
|
||||
"writing manifest \u001b[K\n",
|
||||
"success \u001b[K\u001b[?25h\u001b[?2026l\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Let's just make sure the model is loaded\n",
|
||||
"\n",
|
||||
"!ollama pull qwen2.5\n",
|
||||
"MODEL = \"qwen2.5\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "6de38216-6d1c-48c4-877b-86d403f4e0f8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"headers = {\n",
|
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"class Website:\n",
|
||||
"\n",
|
||||
" def __init__(self, url):\n",
|
||||
" \"\"\"\n",
|
||||
" Create this Website object from the given url using the BeautifulSoup library\n",
|
||||
" \"\"\"\n",
|
||||
" self.url = url\n",
|
||||
" response = requests.get(url, headers=headers)\n",
|
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||
" irrelevant.decompose()\n",
|
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "a531b8f6-d4f8-4140-b54d-bcf280bd7a99",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
|
||||
"and provides a short summary, ignoring text that might be navigation related. \\\n",
|
||||
"Respond in markdown.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "6b46ff43-4817-431e-8335-8d2cc9957910",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def user_prompt_for(website):\n",
|
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
|
||||
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
|
||||
"please provide a summary of this website in markdown. \\\n",
|
||||
"If it includes news or announcements, then summarize these too.(only if they are present)\\n\\n\"\n",
|
||||
" user_prompt += website.text\n",
|
||||
" return user_prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "13a3a001-5d91-4269-ab60-493bbf35bda4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def messages_for(website):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "c61ad738-9395-415d-b88b-d4a70d4331aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def summarize(url):\n",
|
||||
" website = Website(url)\n",
|
||||
" response = ollama.chat(model=MODEL, messages=messages_for(website))\n",
|
||||
" return response['message']['content']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "bdbcfa75-980b-4542-872d-af8b20546b5d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'```markdown\\n# Tailwind CSS Cheat Sheet Summary\\n\\nThis website serves as a comprehensive guide for developers using Tailwind CSS, providing quick access to commonly used utility classes and configurations. The content is organized into sections such as typography, layout, colors, shadows, and more, making it easy for users to find specific styles or settings.\\n\\n- **Typography**: Includes various font sizes, weights, line heights, and other typographic utilities.\\n- **Layout**: Features columns, grid, flexbox, spacing, and responsive design utilities.\\n- **Colors**: Lists predefined color palettes and utility classes for color manipulation.\\n- **Shadows**: Provides options to add depth and dimension to elements through shadow effects.\\n- **Other Sections**: Covers forms, animations, and more, with concise descriptions and examples.\\n\\nThe site is designed to be a one-stop reference tool, allowing developers to quickly apply Tailwind CSS styles without having to consult the official documentation every time.\\n```'"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"summarize(\"https://www.creative-tim.com/twcomponents/cheatsheet/\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "817e6f73-1abe-4f79-9010-f4264e0f324a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def display_summary(url):\n",
|
||||
" summary = summarize(url)\n",
|
||||
" display(Markdown(summary))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "504c19cf-9add-4a78-a028-fe2710e0604d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/markdown": [
|
||||
"# Summary\n",
|
||||
"\n",
|
||||
"**Home Page:**\n",
|
||||
"- The website is titled \"Home - Edward Donner\" and introduces Ed, who enjoys coding, experimenting with large language models (LLMs), DJing, and engaging in Hacker News.\n",
|
||||
"- He co-founded Nebula.io, an AI company focusing on helping people discover their potential. The platform uses proprietary LLMs for talent discovery and has been patented.\n",
|
||||
"\n",
|
||||
"**News/Announcements:**\n",
|
||||
"- **January 23, 2025:** LLM Workshop – Hands-on with Agents\n",
|
||||
"- **December 21, 2024:** Welcome, SuperDataScientists!\n",
|
||||
"- **November 13, 2024:** Mastering AI and LLM Engineering – Resources\n",
|
||||
"- **October 16, 2024:** From Software Engineer to AI Data Scientist – resources\n",
|
||||
"\n",
|
||||
"**Connect Section:**\n",
|
||||
"- Provides ways to get in touch with Ed, including email, LinkedIn, Twitter, Facebook, and a newsletter subscription form.\n",
|
||||
"\n",
|
||||
"**Additional Content:**\n",
|
||||
"- **Connect Four:** Describes it as an arena where LLMs compete against each other.\n",
|
||||
"- **About Page:** Further details about Ed's background and Nebula.io."
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.Markdown object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"display_summary('https://edwarddonner.com')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "20d621cb-6bfb-41a6-bd98-a51ef0a8b158",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
148
week1/community-contributions/week1 exercise - my AI tutor.ipynb
Normal file
148
week1/community-contributions/week1 exercise - my AI tutor.ipynb
Normal file
@@ -0,0 +1,148 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f38e9ebb-453d-4b40-84f6-bc3e9bf4d7ef",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"import json\n",
|
||||
"import ollama\n",
|
||||
"from typing import List\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display, update_display\n",
|
||||
"from openai import OpenAI\n",
|
||||
"\n",
|
||||
"# constants\n",
|
||||
"\n",
|
||||
"MODEL_GPT = 'gpt-4o-mini'\n",
|
||||
"MODEL_LLAMA = 'llama3.2'\n",
|
||||
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
|
||||
"HEADERS = {\"Content-Type\": \"application/json\"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f367c5bb-80a2-4d78-8f27-823f5dafe7c0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# set up environment\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"openai = OpenAI()\n",
|
||||
"\n",
|
||||
"# System prompt for the AI TECHNICAL LLM AND PYTHON TUTOR.\"\n",
|
||||
"\n",
|
||||
"system_prompt = \"You are an EXPERT in AI, LLMS and Python \\\n",
|
||||
"Provide the answer with example ALLWAYS when necessary. \\\n",
|
||||
"If you do not know the answer just say 'I don't know the answer' \\\n",
|
||||
"Respond in markdown in Spanish.\"\n",
|
||||
"\n",
|
||||
"# messages\n",
|
||||
"def messages_for(question):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": question}\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
"# here is the question; type over this to ask something new\n",
|
||||
"\n",
|
||||
"question = \"\"\"\n",
|
||||
"Please explain what this code does and why:\n",
|
||||
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
|
||||
"\"\"\"\n",
|
||||
"question = question[:5_000] # Truncate if more than 5,000 characters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a90d726d-d494-401f-9cd6-0260f5c781e0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# METHODS TO DISPLAY\n",
|
||||
"def display_summary_ollama(question):\n",
|
||||
" response = ollama.chat(\n",
|
||||
" model = MODEL_LLAMA,\n",
|
||||
" messages = messages_for(question)\n",
|
||||
" ) \n",
|
||||
" summary = response['message']['content']\n",
|
||||
" display(Markdown(summary))\n",
|
||||
"\n",
|
||||
"def display_summary_gpt(question):\n",
|
||||
" stream = openai.chat.completions.create(\n",
|
||||
" model = MODEL_GPT,\n",
|
||||
" messages = messages_for(question),\n",
|
||||
" stream=True\n",
|
||||
" )\n",
|
||||
" response = \"\"\n",
|
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
||||
" for chunk in stream:\n",
|
||||
" response += chunk.choices[0].delta.content or ''\n",
|
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
|
||||
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
|
||||
" \n",
|
||||
"def display_summary(llm, question):\n",
|
||||
" if llm.startswith(\"llama3.2\"):\n",
|
||||
" display_summary_ollama(question)\n",
|
||||
" else:\n",
|
||||
" display_summary_gpt(question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4e993b6d-8fee-43f3-9e36-f86701a5cc57",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get gpt-4o-mini to answer, with streaming\n",
|
||||
"\n",
|
||||
"display_summary(MODEL_GPT, question)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "31f6283a-ee57-415e-9a57-83d07261b7f9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get Llama 3.2 to answer\n",
|
||||
"\n",
|
||||
"display_summary(MODEL_LLAMA, question)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,464 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# MY !FIRST LAB\n",
|
||||
"\n",
|
||||
"### Script will take a stackoverflow issue and summarize it as a technical tutorial. \n",
|
||||
"\n",
|
||||
"Example links to use: \n",
|
||||
" \n",
|
||||
"https://stackoverflow.com/questions/14220321/how-do-i-return-the-response-from-an-asynchronous-call \n",
|
||||
"https://stackoverflow.com/questions/60174/how-can-i-prevent-sql-injection-in-php\n",
|
||||
"https://stackoverflow.com/questions/1732348/regex-match-open-tags-except-xhtml-self-contained-tags\n",
|
||||
"\n",
|
||||
"*Note: Issues must be answered preferebly by a lot of users.*\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "e2fd67f3-6441-4fee-b19c-7c91e6188348",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"website = 'https://stackoverflow.com/questions/60174/how-can-i-prevent-sql-injection-in-php'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display\n",
|
||||
"from openai import OpenAI\n",
|
||||
"\n",
|
||||
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Load environment variables in a file callwebsite_content .env\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"# Check the key\n",
|
||||
"\n",
|
||||
"if not api_key:\n",
|
||||
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
|
||||
"elif not api_key.startswith(\"sk-proj-\"):\n",
|
||||
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
|
||||
"elif api_key.strip() != api_key:\n",
|
||||
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
|
||||
"else:\n",
|
||||
" print(\"API key found and looks good so far!\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"openai = OpenAI()\n",
|
||||
"\n",
|
||||
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
|
||||
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "c5e793b2-6775-426a-a139-4848291d0463",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A class to represent a Webpage\n",
|
||||
"# If you're not familiar with Classes, check out the \"Intermwebsite_contentiate Python\" notebook\n",
|
||||
"\n",
|
||||
"# Some websites newebsite_content you to use proper headers when fetching them:\n",
|
||||
"headers = {\n",
|
||||
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"class Website:\n",
|
||||
"\n",
|
||||
" def __init__(self, url):\n",
|
||||
" \"\"\"\n",
|
||||
" Create this Website object from the given url using the BeautifulSoup library\n",
|
||||
" \"\"\"\n",
|
||||
" self.url = url\n",
|
||||
" response = requests.get(url, headers=headers)\n",
|
||||
" soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||
" self.title = soup.title.string if soup.title else \"No title found\"\n",
|
||||
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
|
||||
" irrelevant.decompose()\n",
|
||||
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"mysql - How can I prevent SQL injection in PHP? - Stack Overflow\n",
|
||||
"Skip to main content\n",
|
||||
"Stack Overflow\n",
|
||||
"About\n",
|
||||
"Products\n",
|
||||
"OverflowAI\n",
|
||||
"Stack Overflow for Teams\n",
|
||||
"Where developers & technologists share private knowledge with c\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Let's try one out. Change the website and add print statements to follow along.\n",
|
||||
"\n",
|
||||
"website_content = Website(website)\n",
|
||||
"print(website_content.title[:100])\n",
|
||||
"print(website_content.text[:150])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Types of prompts\n",
|
||||
"\n",
|
||||
"You may know this already - but if not, you will get very familiar with it!\n",
|
||||
"\n",
|
||||
"Models like GPT4o have been trained to receive instructions in a particular way.\n",
|
||||
"\n",
|
||||
"They expect to receive:\n",
|
||||
"\n",
|
||||
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
|
||||
"\n",
|
||||
"**A user prompt** -- the conversation starter that they should reply to"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "268cb127-ec40-4016-9436-94a1ae10a1c6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
|
||||
"\n",
|
||||
"system_prompt = \"You are a technical writer that analyzes the contents of a stackoverflow website issue containing a question and answer \\\n",
|
||||
"and provides a summary in the form of a technical tutorial , ignoring text that might be navigation related. \\\n",
|
||||
"Respond in markdown.\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A function that writes a User Prompt that asks for summaries of websites:\n",
|
||||
"\n",
|
||||
"def user_prompt_for(website):\n",
|
||||
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
|
||||
" user_prompt += f\"\"\" \n",
|
||||
"\n",
|
||||
" You are looking at a website titled {website_content.title}\n",
|
||||
"\n",
|
||||
" Create a technical tutorial baswebsite_content on the following Stack Overflow content:\n",
|
||||
" \n",
|
||||
" {website_content.text}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
" The tutorial should include an introduction, problem statement, solution steps, and conclusion.\n",
|
||||
" Tutrial should be in markdown format.\n",
|
||||
" \"\"\"\n",
|
||||
" user_prompt += website.text\n",
|
||||
" return user_prompt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"You are looking at a website titled mysql - How can I prevent SQL injection in PHP? - Stack Overflow \n",
|
||||
"\n",
|
||||
" You are looking at a website titled mysql - How can I prevent SQL injection in PHP? - Stack Overflow\n",
|
||||
"\n",
|
||||
" Create a technical tutorial baswebsite_content on the following Stack Overflow content:\n",
|
||||
"\n",
|
||||
" Skip to main content\n",
|
||||
"Stack Overflow\n",
|
||||
"About\n",
|
||||
"Products\n",
|
||||
"OverflowAI\n",
|
||||
"Stack Overflow for Teams\n",
|
||||
"Where developers & technologists share private knowledge with coworkers\n",
|
||||
"Advertising & Talent\n",
|
||||
"Reach devs & t\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(user_prompt_for(website_content)[:500])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Messages\n",
|
||||
"\n",
|
||||
"The API from OpenAI expects to receive messages in a particular structure.\n",
|
||||
"Many of the other APIs share this structure:\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"[\n",
|
||||
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n",
|
||||
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the mighty GPT (yet!)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## And now let's build useful messages for GPT-4o-mini, using a function"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# See how this function creates exactly the format above\n",
|
||||
"\n",
|
||||
"def messages_for(website):\n",
|
||||
" return [\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
|
||||
" ]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Time to bring it together - the API for OpenAI is very simple!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# And now: call the OpenAI API. You will get very familiar with this!\n",
|
||||
"\n",
|
||||
"def summarize(url):\n",
|
||||
" website = Website(url)\n",
|
||||
" response = openai.chat.completions.create(\n",
|
||||
" model = \"gpt-4o-mini\",\n",
|
||||
" messages = messages_for(website)\n",
|
||||
" )\n",
|
||||
" return response.choices[0].message.content"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# A function to display this nicely in the Jupyter output, using markdown\n",
|
||||
"\n",
|
||||
"def display_summary(url):\n",
|
||||
" summary = summarize(url)\n",
|
||||
" display(Markdown(summary))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "0a6970cc-bed8-4759-a312-3b81236c2f4e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/markdown": [
|
||||
"```markdown\n",
|
||||
"# How to Prevent SQL Injection in PHP\n",
|
||||
"\n",
|
||||
"## Introduction\n",
|
||||
"SQL injection is a serious security vulnerability that can allow an attacker to interfere with the queries that your application makes to the database. By exploiting this vulnerability, an attacker can gain unauthorized access to sensitive data, manipulate data, and even execute administrative operations on the database. This tutorial will guide you on how to prevent SQL injection in your PHP applications through various best practices.\n",
|
||||
"\n",
|
||||
"## Problem Statement\n",
|
||||
"Consider the following PHP code that is vulnerable to SQL injection:\n",
|
||||
"\n",
|
||||
"```php\n",
|
||||
"$unsafe_variable = $_POST['user_input']; \n",
|
||||
"mysql_query(\"INSERT INTO `table` (`column`) VALUES ('$unsafe_variable')\");\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"If a user were to input something like `value'); DROP TABLE table;--`, the query would become:\n",
|
||||
"\n",
|
||||
"```sql\n",
|
||||
"INSERT INTO `table` (`column`) VALUES('value'); DROP TABLE table;--');\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"This inserts an unwanted SQL command leading to disastrous effects on the database.\n",
|
||||
"\n",
|
||||
"## Solution Steps\n",
|
||||
"\n",
|
||||
"### 1. Use Prepared Statements\n",
|
||||
"The best method to prevent SQL injection is to use prepared statements with parameterized queries. This separates SQL logic from data, ensuring that user input is treated as data, not executable code.\n",
|
||||
"\n",
|
||||
"#### Using PDO\n",
|
||||
"Here's how to use PDO in PHP:\n",
|
||||
"\n",
|
||||
"```php\n",
|
||||
"$dsn = 'mysql:dbname=dbtest;host=127.0.0.1;charset=utf8mb4';\n",
|
||||
"$dbConnection = new PDO($dsn, 'user', 'password');\n",
|
||||
"$dbConnection->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);\n",
|
||||
"$dbConnection->setAttribute(PDO::ATTR_EMULATE_PREPARES, false);\n",
|
||||
"\n",
|
||||
"$stmt = $dbConnection->prepare('SELECT * FROM users WHERE name = :name');\n",
|
||||
"$stmt->execute(['name' => $name]);\n",
|
||||
"\n",
|
||||
"foreach ($stmt as $row) {\n",
|
||||
" // Process row\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"#### Using MySQLi\n",
|
||||
"If you're using MySQLi, the syntax is slightly different:\n",
|
||||
"\n",
|
||||
"```php\n",
|
||||
"$dbConnection = new mysqli('127.0.0.1', 'username', 'password', 'test');\n",
|
||||
"$dbConnection->set_charset('utf8mb4');\n",
|
||||
"\n",
|
||||
"$stmt = $dbConnection->prepare('SELECT * FROM users WHERE name = ?');\n",
|
||||
"$stmt->bind_param('s', $name); // 's' stands for string\n",
|
||||
"$stmt->execute();\n",
|
||||
"$result = $stmt->get_result();\n",
|
||||
"\n",
|
||||
"while ($row = $result->fetch_assoc()) {\n",
|
||||
" // Process row\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### 2. Properly Configure the Database Connection\n",
|
||||
"When using PDO, ensure that emulated prepared statements are disabled. This is essential for real prepared statements to take effect.\n",
|
||||
"\n",
|
||||
"Example configuration:\n",
|
||||
"```php\n",
|
||||
"$dbConnection->setAttribute(PDO::ATTR_EMULATE_PREPARES, false);\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### 3. Validate Input Data\n",
|
||||
"In addition to using prepared statements, you should validate and sanitize user inputs. Implementing whitelist validation can help by ensuring only expected values are processed.\n",
|
||||
"\n",
|
||||
"For example, if you expect a sorting direction:\n",
|
||||
"```php\n",
|
||||
"$dir = !empty($_GET['dir']) && $_GET['dir'] === 'DESC' ? 'DESC' : 'ASC';\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### 4. Limit Database Permissions\n",
|
||||
"Restrict database user permissions to the minimum required for their role. For example, a user who only needs to read data should not have permissions to delete or alter it.\n",
|
||||
"\n",
|
||||
"```sql\n",
|
||||
"GRANT SELECT ON database TO 'username'@'localhost';\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"### 5. Regularly Update Your Codebase\n",
|
||||
"Keep libraries and the PHP version you are using up-to-date. Deprecated functions and libraries often contain vulnerabilities that can be exploited.\n",
|
||||
"\n",
|
||||
"## Conclusion\n",
|
||||
"Preventing SQL injection in PHP applications requires a proactive approach. Using prepared statements ensures user input is handled securely, while validating data and limiting permissions fortifies your application against potential attacks. By implementing these best practices, you can significantly reduce the risk of SQL injection vulnerabilities in your applications.\n",
|
||||
"\n",
|
||||
"For more in-depth information on SQL injection prevention techniques, consult the [OWASP SQL Injection Prevention Cheat Sheet](https://owasp.org/www-community/attacks/SQL_Injection).\n",
|
||||
"```"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.Markdown object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"display_summary(website)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import argparse
|
||||
from dotenv import load_dotenv
|
||||
from openai import OpenAI
|
||||
|
||||
def load_openai_key():
|
||||
# Load environment variables in a file called .env
|
||||
load_dotenv(override=True)
|
||||
api_key = os.getenv('OPENAI_API_KEY')
|
||||
|
||||
# Check the key
|
||||
if not api_key:
|
||||
return "Error: No API key was found!"
|
||||
elif not api_key.startswith("sk-proj-"):
|
||||
return "Error: An API key was found, but it doesn't start sk-proj-; please check you're using the right key"
|
||||
elif api_key.strip() != api_key:
|
||||
return "Error: An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them!"
|
||||
else:
|
||||
return "API key found and looks good so far!"
|
||||
|
||||
def ask_llm(client, model, user_prompt):
|
||||
system_prompt = """
|
||||
you are a writing assistant with an expertise in children's stories.
|
||||
Write a bedtime story inspired by the subject below.
|
||||
The story should have a begining, middle, and end.
|
||||
The story shoukd be appropriate for children ages 5-8 and have a positive message.
|
||||
I should be able to read the entire story in about 3 minutes
|
||||
"""
|
||||
response = client.chat.completions.create(
|
||||
model = model,
|
||||
messages = [ {"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": user_prompt}]
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="AI Bedtime Storyteller")
|
||||
parser.add_argument("provider", choices=["openai", "ollama"], help="AI provider to use")
|
||||
parser.add_argument("--model", help="Model to use for Ollama (required if provider is 'ollama')", required="ollama" in parser.parse_known_args()[0].provider)
|
||||
parser.add_argument("subject", help="What do you want the story to be about?")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.provider == "openai":
|
||||
load_openai_key()
|
||||
client = OpenAI()
|
||||
model = "gpt-4o-mini"
|
||||
elif args.provider == "ollama":
|
||||
client = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')
|
||||
model = args.model
|
||||
else:
|
||||
return "Error: invalid provider!"
|
||||
|
||||
user_prompt = args.subject
|
||||
|
||||
result = ask_llm(client, model, user_prompt)
|
||||
print("AI Response:", result)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -0,0 +1,180 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# End of week 1 exercise\n",
|
||||
"\n",
|
||||
"To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n",
|
||||
"and responds with an explanation. This is a tool that you will be able to use yourself during the course!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c1070317-3ed9-4659-abe3-828943230e03",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"import os\n",
|
||||
"import requests\n",
|
||||
"import json\n",
|
||||
"from typing import List\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"from IPython.display import Markdown, display, update_display\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# constants\n",
|
||||
"MODEL_GPT = 'gpt-4o-mini'\n",
|
||||
"MODEL_LLAMA = 'llama3.2'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a8d7923c-5f28-4c30-8556-342d7c8497c1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# set up environment\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"\n",
|
||||
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
|
||||
" print(\"API key looks good so far\")\n",
|
||||
"else:\n",
|
||||
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n",
|
||||
"\n",
|
||||
"openai = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_prompt = \"You are provided with a technical question. \\\n",
|
||||
"You are answering by providing a quick explanation and giving some examples.\\n\"\n",
|
||||
"\n",
|
||||
"# here is the question; type over this to ask something new\n",
|
||||
"question = \"\"\"\n",
|
||||
"Please explain what this code does and why:\n",
|
||||
"yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "60ce7000-a4a5-4cce-a261-e75ef45063b4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get gpt-4o-mini to answer, with streaming\n",
|
||||
"def get_answer_gpt():\n",
|
||||
" stream = openai.chat.completions.create(\n",
|
||||
" model=MODEL_GPT,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
||||
" {\"role\": \"user\", \"content\": question}\n",
|
||||
" ],\n",
|
||||
" stream=True\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" response = \"\"\n",
|
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
||||
" for chunk in stream:\n",
|
||||
" response += chunk.choices[0].delta.content or ''\n",
|
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
|
||||
" update_display(Markdown(response), display_id=display_handle.display_id)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Get Llama 3.2 to answer\n",
|
||||
"def get_answer_ollama():\n",
|
||||
" stream = ollama.generate(\n",
|
||||
" MODEL_LLAMA,\n",
|
||||
" question,\n",
|
||||
" stream=True\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
" response = \"\"\n",
|
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
||||
" for chunk in stream:\n",
|
||||
" response += chunk['response'] or ''\n",
|
||||
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
|
||||
" update_display(Markdown(response), display_id=display_handle.display_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4a859eb1-23fa-40dd-ba91-b35084433a00",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"get_answer_gpt()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1c73f046-da3a-49a5-8a74-4b8a86a9032a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"get_answer_ollama()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bea20f33-a710-44ab-9a4d-856db05e4201",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -203,6 +203,36 @@
|
||||
"print(response.choices[0].message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9f9e22da-b891-41f6-9ac9-bd0c0a5f4f44",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Are you confused about why that works?\n",
|
||||
"\n",
|
||||
"It seems strange, right? We just used OpenAI code to call Ollama?? What's going on?!\n",
|
||||
"\n",
|
||||
"Here's the scoop:\n",
|
||||
"\n",
|
||||
"The python class `OpenAI` is simply code written by OpenAI engineers that makes calls over the internet to an endpoint. \n",
|
||||
"\n",
|
||||
"When you call `openai.chat.completions.create()`, this python code just makes a web request to the following url: \"https://api.openai.com/v1/chat/completions\"\n",
|
||||
"\n",
|
||||
"Code like this is known as a \"client library\" - it's just wrapper code that runs on your machine to make web requests. The actual power of GPT is running on OpenAI's cloud behind this API, not on your computer!\n",
|
||||
"\n",
|
||||
"OpenAI was so popular, that lots of other AI providers provided identical web endpoints, so you could use the same approach.\n",
|
||||
"\n",
|
||||
"So Ollama has an endpoint running on your local box at http://localhost:11434/v1/chat/completions \n",
|
||||
"And in week 2 we'll discover that lots of other providers do this too, including Gemini and DeepSeek.\n",
|
||||
"\n",
|
||||
"And then the team at OpenAI had a great idea: they can extend their client library so you can specify a different 'base url', and use their library to call any compatible API.\n",
|
||||
"\n",
|
||||
"That's it!\n",
|
||||
"\n",
|
||||
"So when you say: `ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')` \n",
|
||||
"Then this will make the same endpoint calls, but to Ollama instead of OpenAI."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bc7d1de3-e2ac-46ff-a302-3b4ba38c4c90",
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"1. Permissions. Please take a look at this [tutorial](https://chatgpt.com/share/67b0ae58-d1a8-8012-82ca-74762b0408b0) on permissions on Windows\n",
|
||||
"2. Anti-virus, Firewall, VPN. These can interfere with installations and network access; try temporarily disabling them as needed\n",
|
||||
"3. The evil Windows 260 character limit to filenames - here is a full [explanation and fix](https://chatgpt.com/share/67b0afb9-1b60-8012-a9f7-f968a5a910c7)!\n",
|
||||
"4. If you've not worked with Data Science packages on your computer before, you might need to install Microsoft Build Tools. Here are [instructions](https://chatgpt.com/share/67b0b762-327c-8012-b809-b4ec3b9e7be0).\n",
|
||||
"4. If you've not worked with Data Science packages on your computer before, you might need to install Microsoft Build Tools. Here are [instructions](https://chatgpt.com/share/67b0b762-327c-8012-b809-b4ec3b9e7be0). A student also mentioned that [these instructions](https://github.com/bycloudai/InstallVSBuildToolsWindows) might be helpful for people on Windows 11. \n",
|
||||
"\n",
|
||||
"## And for Mac people\n",
|
||||
"\n",
|
||||
@@ -127,7 +127,7 @@
|
||||
" print(f\"Environment Name: {venv_name}\")\n",
|
||||
"\n",
|
||||
"if conda_name != \"llms\" and venv_name != \"llms\" and venv_name != \"venv\":\n",
|
||||
" print(\"Neither Anaconda nor Virtualenv seem to be activated with the expected name 'llms'\")\n",
|
||||
" print(\"Neither Anaconda nor Virtualenv seem to be activated with the expected name 'llms' or 'venv'\")\n",
|
||||
" print(\"Did you run 'jupyter lab' from an activated environment with (llms) showing on the command line?\")\n",
|
||||
" print(\"If in doubt, close down all jupyter lab, and follow Part 5 in the SETUP-PC or SETUP-mac guide.\")"
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user