Merge remote-tracking branch 'upstream/main'

This commit is contained in:
dsadrianzadeh
2025-02-02 23:25:46 -05:00
85 changed files with 13308 additions and 49 deletions

View File

@@ -278,7 +278,7 @@
"# is up to date with any new upgrades to packages;\n",
"# But it might take a minute and will print a lot to output\n",
"\n",
"!conda env update -f ../environment.yml --prune"
"!conda env update -f ../environment.yml"
]
},
{

View File

@@ -140,7 +140,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.11"
}
},
"nbformat": 4,

View File

@@ -0,0 +1,408 @@
{
"cells": [
{
"cell_type": "raw",
"id": "f64407a0-fda5-48f3-a2d3-82e80d320931",
"metadata": {},
"source": [
"### \"Career Well-Being Companion\" ###\n",
"This project will gather feelings at the end of day from employee.\n",
"Based on employee feelings provided as input, model will analyze feelings and provide suggestions and acknowledge with feelings employtee is going thru.\n",
"Model even will ask employee \"Do you want more detailed resposne to cope up with your feelings?\".\n",
"If employee agrees, model even replies with online courses, tools, meetups and other ideas for the well being of the employee.\n",
"\n",
"Immediate Impact: Professionals can quickly see value through insights or actionable suggestions.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2b30a8fa-1067-4369-82fc-edb197551e43",
"metadata": {},
"outputs": [],
"source": [
"### Step 1: Emotional Check-in:\n",
"\n",
"# Input: User describes their feelings or workday.\n",
"# LLM Task: Analyze the input for emotional tone and identify keywords (e.g., \"stress,\" \"boredom\").\n",
"# Output: A summary of emotional trends.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2b52469e-da81-42ec-9e6c-0c121ad349a7",
"metadata": {},
"outputs": [],
"source": [
"print(\"I am your well being companion and end goal is to help you in your career.\\nI want to start by asking about your feelings, how was your day today.\\n\")\n",
"print(\"I will do my best as well being companion to analyze your day and come up with the suggestions that might help you in your career and life. \\n\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a6df2e2c-785d-4323-90f4-b49592ab33fc",
"metadata": {},
"outputs": [],
"source": [
"how_was_day = \"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "247e4a80-f634-4a7a-9f40-315f042be59c",
"metadata": {},
"outputs": [],
"source": [
"how_was_day = input(\"How was your day today,can you describe about your day, what went well, what did not go well, what you did not like :\\n\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0faac2dd-0d53-431a-87a7-d57a6881e043",
"metadata": {},
"outputs": [],
"source": [
"what_went_well = input(\"What went well for you , today?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2c11628b-d14b-47eb-a97e-70d08ddf3364",
"metadata": {},
"outputs": [],
"source": [
"what_went_bad = input(\"What did not go well, today?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f64e34b4-f83a-4ae4-86bb-5bd164121412",
"metadata": {},
"outputs": [],
"source": [
"how_was_day = how_was_day + what_went_well + what_went_bad\n",
"print(how_was_day)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c5fe08c4-4d21-4917-a556-89648eb543c7",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from openai import OpenAI\n",
"from dotenv import load_dotenv\n",
"import json\n",
"from IPython.display import Markdown, display, update_display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d6875d51-f33b-462e-85cb-a5d6a7cfb86e",
"metadata": {},
"outputs": [],
"source": [
"#Initialize environment and constants:\n",
"load_dotenv(override=True)\n",
"\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"if api_key and api_key.startswith('sk-proj-') and len(api_key)>10:\n",
" print(\"API key looks good so far\")\n",
"else:\n",
" print(\"There might be a problem with your API key? Please visit the troubleshooting notebook!\")\n",
" \n",
"MODEL = 'gpt-4o-mini'\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "c12cf934-4bd4-4849-9e8f-5bb89eece996",
"metadata": {},
"outputs": [],
"source": [
"### Step 2: From day spent and what went good, what went bad => LLM will extract feelings, emotions from those unspoken words :)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "237d14b3-571e-4598-a57b-d3ebeaf81afc",
"metadata": {},
"outputs": [],
"source": [
"system_prompt_for_emotion_check_in = \"You are a career well-being assistant. Your task is to analyze the user's emotional state based on their text input.\"\\\n",
"\"Look for signs of stress, burnout, dissatisfaction, boredom, motivation, or any other emotional indicators related to work.\"\\\n",
"\"Based on the input, provide a summary of the user's feelings and categorize them under relevant emotional states (e.g., Burnout, Boredom, Stress, Satisfaction, etc.).\"\\\n",
"\"Your response should be empathetic and non-judgmental. Please summarize the list of feelings, emotions , those unspoken but unheard feelings you get it.\\n\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a205a6d3-b0d7-4fcb-9eed-f3a86576cd9f",
"metadata": {},
"outputs": [],
"source": [
"def get_feelings(how_was_day):\n",
" response = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages = [\n",
" {'role':'system','content': system_prompt_for_emotion_check_in},\n",
" {'role':'user', 'content': how_was_day}\n",
" ]\n",
" )\n",
" result = response.choices[0].message.content\n",
" return result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "45e152c8-37c4-4818-a8a0-49f1ea3c1b65",
"metadata": {},
"outputs": [],
"source": [
"## LLM will give the feelings you have based on \"the day you had today\".\n",
"print(get_feelings(how_was_day))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a62a385-4c51-42b1-ad73-73949e740e66",
"metadata": {},
"outputs": [],
"source": [
"### Step 3: From those feelings, emotions ==> Get suggestions from LLM."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d856ca4f-ade9-4e6f-b540-2d07a70867c7",
"metadata": {},
"outputs": [],
"source": [
"## Lets construct system prompt for LLM to get suggestions (from these feelings above).\n",
"\n",
"system_prompt_for_suggestion =\"You are a career well-being assistant.Provide a list of practical,actionable suggestions to help them improve their emotional state.\"\n",
"\n",
"system_prompt_for_suggestion+=\"The suggestions should be personalized based on their current feelings, and they should be simple, effective actions the user can take immediately.\"\\\n",
"\"Include activities, tasks, habits, or approaches that will either alleviate stress, boost motivation, or help them reconnect with their work in a positive way.\"\\\n",
"\"Be empathetic, non-judgmental, and encouraging in your tone.\\n\"\n",
"system_prompt_for_suggestion += \"Request you to respond in JSON format. Below is example:\\n\"\n",
"system_prompt_for_suggestion += '''\n",
"{\n",
" \"suggestions\": [\n",
" {\n",
" \"action\": \"Take a short break\",\n",
" \"description\": \"Step away from your workspace for 5-10 minutes. Use this time to take deep breaths, stretch, or grab a drink. This mini-break can help clear your mind and reduce feelings of overwhelm.\"\n",
" },\n",
" {\n",
" \"action\": \"Write a quick journal entry\",\n",
" \"description\": \"Spend 5-10 minutes writing down your thoughts and feelings. Specify what's distracting you and what you appreciate about your personal life. This can help you process emotions and refocus on tasks.\"\n",
" },\n",
" {\n",
" \"action\": \"Set a small task goal\",\n",
" \"description\": \"Choose one manageable task to complete today. Break it down into smaller steps to make it less daunting. Completing even a small task can give you a sense of achievement and boost motivation.\"\n",
" }\n",
" ]\n",
"}\n",
"'''\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e9eee380-7fa5-4d21-9357-f4fc34d3368d",
"metadata": {},
"outputs": [],
"source": [
"## Lets build user prompt to ask LLM for the suggestions based on the feelings above.\n",
"## Note: Here while building user_prompt, we are making another LLM call (via function get_feelings() to get feelings analyzed from \"day spent\".\n",
"## Because first step is to get feelings from day spent then we move to offer suggestions to ease discomfort feelings.\n",
"\n",
"def get_user_prompt_for_suggestion(how_was_day):\n",
" user_prompt_for_suggestion = \"You are a career well-being assistant.Please see below users emotional input on 'day user had spent' and this user input might have feeling burnt out, bored, uninspired, or stressed or sometime opposite \"\\\n",
" \"of these feelings.\"\n",
" user_prompt_for_suggestion += f\"{get_feelings(how_was_day)}\"\n",
" return user_prompt_for_suggestion\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3576e451-b29c-44e1-bcdb-addc8d61afa7",
"metadata": {},
"outputs": [],
"source": [
"print(get_user_prompt_for_suggestion(how_was_day))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4a41ee40-1f49-4474-809f-a0d5e44e4aa4",
"metadata": {},
"outputs": [],
"source": [
"def get_suggestions(how_was_day):\n",
" response = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages = [\n",
" {'role': 'system', 'content':system_prompt_for_suggestion},\n",
" {'role': 'user', 'content': get_user_prompt_for_suggestion(how_was_day)}\n",
" ],\n",
" response_format={\"type\": \"json_object\"}\n",
" )\n",
" result = response.choices[0].message.content\n",
" return json.loads(result)\n",
" #display(Markdown(result))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "33e3a14e-0e2c-43cb-b50b-d6df52b4d300",
"metadata": {},
"outputs": [],
"source": [
"suggestions = get_suggestions(how_was_day)\n",
"print(suggestions)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "31c75e04-2800-4ba2-845b-bc38f8965622",
"metadata": {},
"outputs": [],
"source": [
"### Step 4: From those suggestions from companion ==> Enhance with support you need to follow sugestions like action plan for your self."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d07f9d3f-5acf-4a86-9160-4c6de8df4eb0",
"metadata": {},
"outputs": [],
"source": [
"system_prompt_for_enhanced_suggestions = \"You are a helpful assistant that enhances actionable suggestions for users. For each suggestion provided, enhance it by adding:\\n\"\\\n",
"\"1. A step-by-step guide for implementation.\"\\\n",
"\"2. Tools, resources, or apps that can help.\"\\\n",
"\"3. Examples or additional context to make the suggestion practical.\"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6ab449f1-7a6c-4982-99e0-83d99c45ad2d",
"metadata": {},
"outputs": [],
"source": [
"def get_user_prompt_for_enhanced_suggestions(suggestions):\n",
" prompt = \"You are able to check below suggestions and can enhance to help end user. Below is the list of suggestions.\\n\"\n",
" prompt += f\"{suggestions}\"\n",
" return prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d5187b7a-d8cd-4377-b011-7805bd50443d",
"metadata": {},
"outputs": [],
"source": [
"def enhance_suggestions(suggestions):\n",
" stream = openai.chat.completions.create(\n",
" model = MODEL,\n",
" messages=[\n",
" {'role':'system', 'content':system_prompt_for_enhanced_suggestions},\n",
" {'role':'user', 'content':get_user_prompt_for_enhanced_suggestions(suggestions)}\n",
" ],\n",
" stream = True\n",
" )\n",
" \n",
" #result = response.choices[0].message.content\n",
" #for chunk in stream:\n",
" # print(chunk.choices[0].delta.content or '', end='')\n",
"\n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)\n",
" \n",
" #display(Markdown(result))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "429cd6f8-3215-4140-9a6d-82d14a9b9798",
"metadata": {},
"outputs": [],
"source": [
"detailed = input(\"\\nWould you like a DETAILED PLAN for implementing this suggestion?(Yes/ No)\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5efda045-5bde-4c51-bec6-95b5914102dd",
"metadata": {},
"outputs": [],
"source": [
"if detailed.lower() == 'yes':\n",
" enhance_suggestions(suggestions)\n",
"else:\n",
" print(suggestions)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1969b2ec-c850-4dfc-b790-8ae8e3fa36e9",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,126 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "d25b0aef-3e5e-4026-90ee-2b373bf262b7",
"metadata": {},
"outputs": [],
"source": [
"# Step 0: Import libraries and load environment variables\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it does not start with 'sk-proj-'! Please ensure you are using the right key.\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end! Please remove them.\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n",
"\n",
"# Step 1: Create prompts\n",
"print(\"[INFO] Creating system prompt ...\")\n",
"system_prompt = \"You are an assistant that analyzes the contents of \\\n",
" email texts and suggests short subject lines for the email based \\\n",
" on the requested tone and language. Respond in markdown.\"\n",
"\n",
"print(\"[INFO] Creating user prompt ...\")\n",
"user_prompt = \"\"\"\n",
" The text below is an e-mail text for which you are required to \\\n",
" provide subject lines. Please provide two snarky, two funny, and \\\n",
" two formal short subject lines for the email text. Each of the six \\\n",
" subject lines should be presented in both English and French \\\n",
" languages, making a total of 12 subject lines. Please provide your \\\n",
" answer in markdown.\\\n",
" \n",
" \\n\\n\n",
" \n",
" Welcome to arXiv!\n",
"\n",
" Thank you for creating an account and joining the arXiv community. We look\n",
" forward to receiving your contribution.\n",
"\n",
" Help Pages\n",
" An overview on how to navigate and use arXiv can be found here:\n",
" https://arxiv.org/help\n",
" https://arxiv.org/about\n",
"\n",
" If you would like to know more about the submission process, please go here:\n",
" https://arxiv.org/help/submit\n",
"\n",
" Before Submitting to arXiv\n",
" The arXiv.org e-print archive is fully automated and processes nearly\n",
" 1,000 new submissions per day. To help us keep the process running smoothly\n",
" and efficiently please check your submission carefully for mistakes, typos\n",
" and layout issues. Once you have submitted your work please check your account\n",
" frequently for verification messages and other communication from arXiv.\n",
"\n",
" Contacting arXiv\n",
" We have provided extensive help pages to guide you through the process and\n",
" to answer the most common questions. If you have problems with the submission\n",
" process please contact us here:\n",
" https://arxiv.org/help/contact\n",
" We aim to assist submitters within one business day, but during times of high\n",
" volume or maintenance work we may be slightly delayed in our response.\n",
"\n",
" Thank you for your cooperation.\n",
"\"\"\"\n",
"\n",
"# Step 2: Make messages list\n",
"print(\"[INFO] Making messages list ...\")\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
"]\n",
"\n",
"# Step 3: Call OpenAI\n",
"print(\"[INFO] Calling OpenAI ...\")\n",
"openai = OpenAI()\n",
"response = openai.chat.completions.create(\n",
" model=\"gpt-4o-mini\",\n",
" messages=messages\n",
" )\n",
"\n",
"# Step 4: Print result\n",
"print(\"[INFO] Print result ...\")\n",
"display(Markdown(response.choices[0].message.content))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b0a6676e-fb43-4725-9389-2acd74c13c4e",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,530 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
"metadata": {},
"source": [
"## DAY1 LLM Project with GROQ!\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from groq import Groq\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
},
{
"cell_type": "markdown",
"id": "5d899ad6-1428-481b-b308-750308d80442",
"metadata": {},
"source": [
"If you are getting error ModuleNotFoundError: No module named 'groq' follow below steps.\n",
"\n",
"1. Activate llms enviornment from Anaconda, so that (llms) is showing in your prompt, as this is the environment where the package will get installed.Install pip here. \n",
"\n",
"(base) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> conda activate llms\n",
"(llms) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> pip install groq\n",
"\n",
"\n",
"2. After you install a new package, you'd need to restart the Kernel in jupyter lab for each notebook (Kernel >> Restart Kernel and Clear Values Of All Outputs).\n",
"\n",
"You can also run this command in jupyter lab to see whether it's installed:\n",
"\n",
"!pip show groq\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "99c0c3c9-fa5e-405e-8453-2a557dc60c09",
"metadata": {},
"outputs": [],
"source": [
"!pip show groq"
]
},
{
"cell_type": "markdown",
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254",
"metadata": {},
"source": [
"# Connecting to GROQ\n",
"\n",
"The next cell is where we load in the environment variables in your `.env` file and connect to GROQ.\n",
"\n",
".env file should have below entry\n",
"\n",
"GROQ_API_KEY=gsk_xxxxxx\n",
"\n",
"GROQ keys can be configired by logging to below link\n",
"https://console.groq.com/keys\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('GROQ_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"gsk_\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {},
"outputs": [],
"source": [
"groq = Groq()"
]
},
{
"cell_type": "markdown",
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91",
"metadata": {},
"source": [
"# Let's make a quick call to a Frontier model to get started, as a preview!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
"metadata": {},
"outputs": [],
"source": [
"# To give you a preview -- calling Groq with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n",
"\n",
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n",
"response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=[{\"role\":\"user\", \"content\":message}])\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "2aa190e5-cb31-456a-96cc-db109919cd78",
"metadata": {},
"source": [
"## OK onwards with our first project"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {},
"outputs": [],
"source": [
"# Let's try one out. Change the website and add print statements to follow along.\n",
"\n",
"ed = Website(\"https://edwarddonner.com\")\n",
"print(ed.title)\n",
"print(ed.text)"
]
},
{
"cell_type": "markdown",
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1",
"metadata": {},
"source": [
"## Types of prompts\n",
"\n",
"You may know this already - but if not, you will get very familiar with it!\n",
"\n",
"Models like GPT4o have been trained to receive instructions in a particular way.\n",
"\n",
"They expect to receive:\n",
"\n",
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
"\n",
"**A user prompt** -- the conversation starter that they should reply to"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {},
"outputs": [],
"source": [
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {},
"outputs": [],
"source": [
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
"metadata": {},
"outputs": [],
"source": [
"print(user_prompt_for(ed))"
]
},
{
"cell_type": "markdown",
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc",
"metadata": {},
"source": [
"## Messages\n",
"\n",
"Similar to OPENAI GROQ APIs share this structure:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n",
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n",
"]\n",
"\n",
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the might GPT (yet!)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce",
"metadata": {},
"outputs": [],
"source": [
"# To give you a preview -- calling Groq with system and user messages:\n",
"\n",
"response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=messages)\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47",
"metadata": {},
"source": [
"## And now let's build useful messages for LLAMA3.3, using a function"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {},
"outputs": [],
"source": [
"# See how this function creates exactly the format above\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c",
"metadata": {},
"outputs": [],
"source": [
"# Try this out, and then try for a few more websites\n",
"\n",
"messages_for(ed)"
]
},
{
"cell_type": "markdown",
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0",
"metadata": {},
"source": [
"## Time to bring it together - the API for GROQ is very simple!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {},
"outputs": [],
"source": [
"# And now: call the GROQ API\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = groq.chat.completions.create(\n",
" model = \"llama-3.3-70b-versatile\",\n",
" messages = messages_for(website)\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {},
"outputs": [],
"source": [
"summarize(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {},
"outputs": [],
"source": [
"# A function to display this nicely in the Jupyter output, using markdown\n",
"\n",
"def display_summary(url):\n",
" summary = summarize(url)\n",
" display(Markdown(summary))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "markdown",
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624",
"metadata": {},
"source": [
"# Let's try more websites\n",
"\n",
"Note that this will only work on websites that can be scraped using this simplistic approach.\n",
"\n",
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n",
"\n",
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n",
"\n",
"But many websites will work just fine!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "45d83403-a24c-44b5-84ac-961449b4008f",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://cnn.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "75e9fd40-b354-4341-991e-863ef2e59db7",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://anthropic.com\")"
]
},
{
"cell_type": "markdown",
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#181;\">Business applications</h2>\n",
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n",
"\n",
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n",
" </td>\n",
" </tr>\n",
"</table>\n",
"\n",
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n",
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
"metadata": {},
"outputs": [],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"something here\"\n",
"user_prompt = \"\"\"\n",
" Lots of text\n",
" Can be pasted here\n",
"\"\"\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [] # fill this in\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response =\n",
"\n",
"# Step 4: print the result\n",
"\n",
"print("
]
},
{
"cell_type": "markdown",
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda",
"metadata": {},
"source": [
"## An extra exercise for those who enjoy web scraping\n",
"\n",
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)"
]
},
{
"cell_type": "markdown",
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6",
"metadata": {},
"source": [
"# Sharing your code\n",
"\n",
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n",
"\n",
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n",
"\n",
"Here are good instructions courtesy of an AI friend: \n",
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,530 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
"metadata": {},
"source": [
"## DAY1 LLM Project with GROQ!\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from groq import Groq\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
},
{
"cell_type": "markdown",
"id": "5d899ad6-1428-481b-b308-750308d80442",
"metadata": {},
"source": [
"If you are getting error ModuleNotFoundError: No module named 'groq' follow below steps.\n",
"\n",
"1. Activate llms enviornment from Anaconda, so that (llms) is showing in your prompt, as this is the environment where the package will get installed.Install pip here. \n",
"\n",
"(base) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> conda activate llms\n",
"(llms) PS C:\\Users\\test\\OneDrive\\Desktop\\AI\\projects\\llm_engineering> pip install groq\n",
"\n",
"\n",
"2. After you install a new package, you'd need to restart the Kernel in jupyter lab for each notebook (Kernel >> Restart Kernel and Clear Values Of All Outputs).\n",
"\n",
"You can also run this command in jupyter lab to see whether it's installed:\n",
"\n",
"!pip show groq\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "99c0c3c9-fa5e-405e-8453-2a557dc60c09",
"metadata": {},
"outputs": [],
"source": [
"!pip show groq"
]
},
{
"cell_type": "markdown",
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254",
"metadata": {},
"source": [
"# Connecting to GROQ\n",
"\n",
"The next cell is where we load in the environment variables in your `.env` file and connect to GROQ.\n",
"\n",
".env file should have below entry\n",
"\n",
"GROQ_API_KEY=gsk_xxxxxx\n",
"\n",
"GROQ keys can be configired by logging to below link\n",
"https://console.groq.com/keys\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('GROQ_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"gsk_\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {},
"outputs": [],
"source": [
"groq = Groq()"
]
},
{
"cell_type": "markdown",
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91",
"metadata": {},
"source": [
"# Let's make a quick call to a Frontier model to get started, as a preview!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
"metadata": {},
"outputs": [],
"source": [
"# To give you a preview -- calling Groq with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n",
"\n",
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n",
"response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=[{\"role\":\"user\", \"content\":message}])\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "2aa190e5-cb31-456a-96cc-db109919cd78",
"metadata": {},
"source": [
"## OK onwards with our first project"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {},
"outputs": [],
"source": [
"# Let's try one out. Change the website and add print statements to follow along.\n",
"\n",
"ed = Website(\"https://edwarddonner.com\")\n",
"print(ed.title)\n",
"print(ed.text)"
]
},
{
"cell_type": "markdown",
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1",
"metadata": {},
"source": [
"## Types of prompts\n",
"\n",
"You may know this already - but if not, you will get very familiar with it!\n",
"\n",
"Models like GPT4o have been trained to receive instructions in a particular way.\n",
"\n",
"They expect to receive:\n",
"\n",
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
"\n",
"**A user prompt** -- the conversation starter that they should reply to"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {},
"outputs": [],
"source": [
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {},
"outputs": [],
"source": [
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
"metadata": {},
"outputs": [],
"source": [
"print(user_prompt_for(ed))"
]
},
{
"cell_type": "markdown",
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc",
"metadata": {},
"source": [
"## Messages\n",
"\n",
"Similar to OPENAI GROQ APIs share this structure:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n",
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n",
"]\n",
"\n",
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the might GPT (yet!)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce",
"metadata": {},
"outputs": [],
"source": [
"# To give you a preview -- calling Groq with system and user messages:\n",
"\n",
"response = groq.chat.completions.create(model=\"llama-3.3-70b-versatile\", messages=messages)\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47",
"metadata": {},
"source": [
"## And now let's build useful messages for LLAMA3.3, using a function"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {},
"outputs": [],
"source": [
"# See how this function creates exactly the format above\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c",
"metadata": {},
"outputs": [],
"source": [
"# Try this out, and then try for a few more websites\n",
"\n",
"messages_for(ed)"
]
},
{
"cell_type": "markdown",
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0",
"metadata": {},
"source": [
"## Time to bring it together - the API for GROQ is very simple!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {},
"outputs": [],
"source": [
"# And now: call the GROQ API\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = groq.chat.completions.create(\n",
" model = \"llama-3.3-70b-versatile\",\n",
" messages = messages_for(website)\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {},
"outputs": [],
"source": [
"summarize(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {},
"outputs": [],
"source": [
"# A function to display this nicely in the Jupyter output, using markdown\n",
"\n",
"def display_summary(url):\n",
" summary = summarize(url)\n",
" display(Markdown(summary))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "markdown",
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624",
"metadata": {},
"source": [
"# Let's try more websites\n",
"\n",
"Note that this will only work on websites that can be scraped using this simplistic approach.\n",
"\n",
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n",
"\n",
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n",
"\n",
"But many websites will work just fine!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "45d83403-a24c-44b5-84ac-961449b4008f",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://cnn.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "75e9fd40-b354-4341-991e-863ef2e59db7",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://anthropic.com\")"
]
},
{
"cell_type": "markdown",
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#181;\">Business applications</h2>\n",
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n",
"\n",
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n",
" </td>\n",
" </tr>\n",
"</table>\n",
"\n",
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n",
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
"metadata": {},
"outputs": [],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"something here\"\n",
"user_prompt = \"\"\"\n",
" Lots of text\n",
" Can be pasted here\n",
"\"\"\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [] # fill this in\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response =\n",
"\n",
"# Step 4: print the result\n",
"\n",
"print("
]
},
{
"cell_type": "markdown",
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda",
"metadata": {},
"source": [
"## An extra exercise for those who enjoy web scraping\n",
"\n",
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)"
]
},
{
"cell_type": "markdown",
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6",
"metadata": {},
"source": [
"# Sharing your code\n",
"\n",
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n",
"\n",
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n",
"\n",
"Here are good instructions courtesy of an AI friend: \n",
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,159 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0d2d5441-2afe-41b9-8039-c367acd715f9",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7c7e0988-8f2d-4844-a847-eebec76b114a",
"metadata": {},
"outputs": [],
"source": [
"website = \"https://www.screener.in/company/CMSINFO/\"\n",
"biz = Website(website)\n",
"user_prompt = \"Give short summary of the business \" + biz.text +\" and recommend pros and cons of the business in bullet points alongwith recommendation to buy or sell\"\n",
"print(user_prompt)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
"metadata": {},
"outputs": [],
"source": [
"# Step 1: Create your prompts\n",
"website = \"https://www.screener.in/company/CMSINFO/\"\n",
"biz = Website(website)\n",
"\n",
"system_prompt = \"You are an equity research analyst. Analyze the content of the website and give a summary of the business\"\n",
"user_prompt = \"Give short summary of the business \" + biz.text +\" and recommend pros and cons of the business in bullet points alongwith recommendation to buy or sell\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
"]\n",
"# Step 3: Call OpenAI\n",
"\n",
"# To give you a preview -- calling OpenAI with system and user messages:\n",
"\n",
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
"# Step 4: print the result\n",
"\n",
"print(response.choices[0].message.content)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d9edf96e-1190-44fe-9261-405709fb39cd",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,127 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "0ee39d65-f27d-416d-8b46-43d15aebe752",
"metadata": {},
"outputs": [],
"source": [
"# Below is a sample for email reviewer using Bahasa Indonesia. "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9fd62af-9b14-490b-8d0b-990da96101bf",
"metadata": {},
"outputs": [],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"Anda adalah seorang Asisten untuk menganalisa email berdasarkan user prompt yang nanti akan diberikan. Summarize the email and give me a tone about that email\"\n",
"user_prompt = \"\"\"\n",
" Subject: Permintaan Pertemuan\n",
"\n",
"Yang terhormat Bapak Rijal,\n",
"\n",
"Saya ingin meminta waktu Anda untuk membahas Generative AI untuk bisnis. Apakah Anda tersedia pada besok pukul 19:00? \n",
"Jika tidak, mohon beri tahu waktu yang lebih sesuai bagi Anda.\n",
"\n",
"Terima kasih atas perhatian Anda.\n",
"\n",
"Salam,\n",
"\n",
"Mentari\n",
"\"\"\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ] # fill this in\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages\n",
" )\n",
"\n",
"# Step 4: print the result\n",
"\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d10208fa-02d8-41a0-b9bb-0bf30f237f25",
"metadata": {},
"outputs": [],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"Anda adalah seorang Asisten untuk menganalisa email berdasarkan user prompt yang nanti akan diberikan. Summarize the email and give me a tone about that email\"\n",
"user_prompt = \"\"\"\n",
" Subject: Feedback terkait Bapak\n",
"\n",
"Yang terhormat Bapak Rijal,\n",
"\n",
"Saya ingin memberikan sedikit feedback untuk BBapak.\n",
"\n",
"Kemampuan Anda dalam memimpin tim ini mampu membawa saya dan rekan lainnya untuk mengerahkan semua kemampuan saya agar jadi lebih baik.\n",
"Selama ini saya cukup senang bekerja dengan Anda karena memberikan saya peluang untuk mencoba banyak hal baru. Tapi ada beberapa kekhawatiran yang mau saya sampaikan, terutama terkait target yang perlu dicapai oleh tim. Saya pikir melihat performa ke belakang, target yang ditentukan harus lebih realistis lagi.\n",
"Saya beruntung bisa berkesempatan bekerja dengan Anda sehingga banyak ilmu yang saya dapat. Kira-kira untuk ke depannya, hal apa lagi yang bisa tim ini tingkatkan agar kita bisa mencapai target yang lebih baik?\n",
"Selama ini, banyak terjadi miskomunikasi dalam pekerjaan. Dan menurut saya salah satunya karena arahan yang Anda berikan kurang jelas dan kurang ditangkap sepenuhnya oleh anggota yang lain. Saya dan tim berharap ke depan bisa mendapatkan arahan yang lebih jelas dan satu arah.\n",
"\n",
"Terima kasih atas perhatian Anda.\n",
"\n",
"Salam,\n",
"\n",
"Mentari\n",
"\"\"\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ] # fill this in\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages\n",
" )\n",
"\n",
"# Step 4: print the result\n",
"\n",
"print(response.choices[0].message.content)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,316 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "1c6700cb-a0b0-4ac2-8fd5-363729284173",
"metadata": {},
"source": [
"# AI-Powered Resume Analyzer for Job Postings"
]
},
{
"cell_type": "markdown",
"id": "a2fa4891-b283-44de-aa63-f017eb9b140d",
"metadata": {},
"source": [
"This tool is designed to analyze resumes against specific job postings, offering valuable insights such as:\n",
"\n",
"- Identification of skill gaps\n",
"- Keyword matching between the CV and the job description\n",
"- Tailored recommendations for CV improvement\n",
"- An alignment score reflecting how well the CV fits the job\n",
"- Personalized feedback \n",
"- Job market trend insights\n",
"\n",
"An example of the tool's output can be found [here](https://tvarol.github.io/sideProjects/AILLMAgents/output.html)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8a6a34ea-191f-4c54-9793-a3eb63faab23",
"metadata": {},
"outputs": [],
"source": [
"# Imports\n",
"import os\n",
"import io\n",
"import time\n",
"import requests\n",
"import PyPDF2\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"from ipywidgets import Textarea, FileUpload, Button, VBox, HTML"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "04bbe1d3-bacc-400c-aed2-db44699e38f3",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"if not api_key:\n",
" print(\"No API key was found!!!\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "27bfcee1-58e6-4ff2-9f12-9dc5c1aa5b5b",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()"
]
},
{
"cell_type": "markdown",
"id": "c82e79f2-3139-4520-ac01-a728c11cb8b9",
"metadata": {},
"source": [
"## Using a Frontier Model GPT-4o Mini for This Project\n",
"\n",
"### Types of Prompts\n",
"\n",
"Models like GPT4o have been trained to receive instructions in a particular way.\n",
"\n",
"They expect to receive:\n",
"\n",
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
"\n",
"**A user prompt** -- the conversation starter that they should reply to"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0da158ad-c3a8-4cef-806f-be0f90852996",
"metadata": {},
"outputs": [],
"source": [
"# Define our system prompt \n",
"system_prompt = \"\"\"You are a powerful AI model designed to assist with resume analysis. Your task is to analyze a resume against a given job posting and provide feedback on how well the resume aligns with the job requirements. Your response should include the following: \n",
"1) Skill gap identification: Compare the skills listed in the resume with those required in the job posting, highlighting areas where the resume may be lacking or overemphasized.\n",
"2) Keyword matching between a CV and a job posting: Match keywords from the job description with the resume, determining how well they align. Provide specific suggestions for missing keywords to add to the CV.\n",
"3) Recommendations for CV improvement: Provide actionable suggestions on how to enhance the resume, such as adding missing skills or rephrasing experience to match job requirements.\n",
"4) Alignment score: Display a score that represents the degree of alignment between the resume and the job posting.\n",
"5) Personalized feedback: Offer tailored advice based on the job posting, guiding the user on how to optimize their CV for the best chances of success.\n",
"6) Job market trend insights, provide broader market trends and insights, such as in-demand skills and salary ranges.\n",
"Provide responses that are concise, clear, and to the point. Respond in markdown.\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ebdb34b0-85bd-4e36-933a-20c3c42e833b",
"metadata": {},
"outputs": [],
"source": [
"# The job posting and the CV are required to define the user prompt\n",
"# The user will input the job posting as text in a box here\n",
"# The user will upload the CV in PDF format, from which the text will be extracted\n",
"\n",
"# You might need to install PyPDF2 via pip if it's not already installed\n",
"# !pip install PyPDF2\n",
"\n",
"# Create widgets - to create a box for the job posting text\n",
"job_posting_area = Textarea(\n",
" placeholder='Paste the job posting text here...',\n",
" description='Job Posting:',\n",
" disabled=False,\n",
" layout={'width': '800px', 'height': '300px'}\n",
")\n",
"\n",
"# Define file upload for CV\n",
"cv_upload = FileUpload(\n",
" accept='.pdf', # Only accept PDF files\n",
" multiple=False, # Only allow single file selection\n",
" description='Upload CV (PDF)'\n",
")\n",
"\n",
"status = HTML(value=\"<b>Status:</b> Waiting for inputs...\")\n",
"\n",
"# Create Submit Buttons\n",
"submit_cv_button = Button(description='Submit CV', button_style='success')\n",
"submit_job_posting_button = Button(description='Submit Job Posting', button_style='success')\n",
"\n",
"# Initialize variables to store the data\n",
"# This dictionary will hold the text for both the job posting and the CV\n",
"# It will be used to define the user_prompt\n",
"for_user_prompt = {\n",
" 'job_posting': '',\n",
" 'cv_text': ''\n",
"}\n",
"\n",
"# Functions\n",
"def submit_cv_action(change):\n",
"\n",
" if not for_user_prompt['cv_text']:\n",
" status.value = \"<b>Status:</b> Please upload a CV before submitting.\"\n",
" \n",
" if cv_upload.value:\n",
" # Get the uploaded file\n",
" uploaded_file = cv_upload.value[0]\n",
" content = io.BytesIO(uploaded_file['content'])\n",
" \n",
" try:\n",
" pdf_reader = PyPDF2.PdfReader(content) \n",
" cv_text = \"\"\n",
" for page in pdf_reader.pages: \n",
" cv_text += page.extract_text() \n",
" \n",
" # Store CV text in for_user_prompt\n",
" for_user_prompt['cv_text'] = cv_text\n",
" status.value = \"<b>Status:</b> CV uploaded and processed successfully!\"\n",
" except Exception as e:\n",
" status.value = f\"<b>Status:</b> Error processing PDF: {str(e)}\"\n",
"\n",
" time.sleep(0.5) # Short pause between upload and submit messages to display both\n",
" \n",
" if for_user_prompt['cv_text']:\n",
" #print(\"CV Submitted:\")\n",
" #print(for_user_prompt['cv_text'])\n",
" status.value = \"<b>Status:</b> CV submitted successfully!\"\n",
" \n",
"def submit_job_posting_action(b):\n",
" for_user_prompt['job_posting'] = job_posting_area.value\n",
" if for_user_prompt['job_posting']:\n",
" #print(\"Job Posting Submitted:\")\n",
" #print(for_user_prompt['job_posting'])\n",
" status.value = \"<b>Status:</b> Job posting submitted successfully!\"\n",
" else:\n",
" status.value = \"<b>Status:</b> Please enter a job posting before submitting.\"\n",
"\n",
"# Attach actions to buttons\n",
"submit_cv_button.on_click(submit_cv_action)\n",
"submit_job_posting_button.on_click(submit_job_posting_action)\n",
"\n",
"# Layout\n",
"job_posting_box = VBox([job_posting_area, submit_job_posting_button])\n",
"cv_buttons = VBox([submit_cv_button])\n",
"\n",
"# Display all widgets\n",
"display(VBox([\n",
" HTML(value=\"<h3>Input Job Posting and CV</h3>\"),\n",
" job_posting_box, \n",
" cv_upload,\n",
" cv_buttons,\n",
" status\n",
"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "364e42a6-0910-4c7c-8c3c-2ca7d2891cb6",
"metadata": {},
"outputs": [],
"source": [
"# Now define user_prompt using for_user_prompt dictionary\n",
"# Clearly label each input to differentiate the job posting and CV\n",
"# The model can parse and analyze each section based on these labels\n",
"user_prompt = f\"\"\"\n",
"Job Posting: \n",
"{for_user_prompt['job_posting']}\n",
"\n",
"CV: \n",
"{for_user_prompt['cv_text']}\n",
"\"\"\""
]
},
{
"cell_type": "markdown",
"id": "3b51dda0-9a0c-48f4-8ec8-dae32c29da24",
"metadata": {},
"source": [
"## Messages\n",
"\n",
"The API from OpenAI expects to receive messages in a particular structure.\n",
"Many of the other APIs share this structure:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n",
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3262c0b9-d3de-4e4f-b535-a25c0aed5783",
"metadata": {},
"outputs": [],
"source": [
"# Define messages with system_prompt and user_prompt\n",
"def messages_for(system_prompt_input, user_prompt_input):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt_input},\n",
" {\"role\": \"user\", \"content\": user_prompt_input}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2409ac13-0b39-4227-b4d4-b4c0ff009fd7",
"metadata": {},
"outputs": [],
"source": [
"# And now: call the OpenAI API. \n",
"response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages_for(system_prompt, user_prompt)\n",
")\n",
"\n",
"# Response is provided in Markdown and displayed accordingly\n",
"display(Markdown(response.choices[0].message.content))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "86ab71cf-bd7e-45f7-9536-0486f349bfbe",
"metadata": {},
"outputs": [],
"source": [
"## If you would like to save the response content as a Markdown file, uncomment the following lines\n",
"#with open('yourfile.md', 'w') as file:\n",
"# file.write(response.choices[0].message.content)\n",
"\n",
"## You can then run the line below to create output.html which you can open on your browser\n",
"#!pandoc yourfile.md -o output.html"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,194 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "2112166e-3629-4167-a4cb-0a1a6e549e97",
"metadata": {},
"source": [
"# Hello everyone, \n",
"The community contributions folder is super motivating. Thanks to Ed for democratising learning with this great idea of sharing. The below small piece is my novice attempt in summarizing content from wikipedia page. It is pretty straightforward, but a good learning exercise for me nevertheless. "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "947028c8-30c6-456a-8e0c-25e0de1ecbb6",
"metadata": {},
"outputs": [],
"source": [
"!pip install wikipedia"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aa18a060-6dbe-42c9-bc11-c8b079397d6b",
"metadata": {},
"outputs": [],
"source": [
"# Import statements\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"import wikipedia\n",
"import warnings"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8d9c128d-ed7d-4e58-8cd1-1468242c7967",
"metadata": {},
"outputs": [],
"source": [
"#To supress a warning from wikipedia module when there are multiple options.\n",
"warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"wikipedia\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5371f405-e628-4b6a-a5ab-5774c1431749",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e6610504-bd7b-459f-9722-0044b3101e05",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"\n",
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
"# If it STILL doesn't work (horrors!) then please see the troubleshooting notebook, or try the below line instead:\n",
"# openai = OpenAI(api_key=\"your-key-here-starting-sk-proj-\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ac37741a-2608-4760-8ba8-163fb9155f0f",
"metadata": {},
"outputs": [],
"source": [
"class Wikipedia:\n",
" def __init__(self, searchText):\n",
" \"\"\"\n",
" Create this object to extract the summary of wikipedia page for a text entered by user\n",
" \"\"\"\n",
" self.searchText = searchText\n",
" self.summary_text = None\n",
" self.user_prompt = None\n",
" \n",
" self._fetch_summary()\n",
"\n",
" def _fetch_summary(self):\n",
" \"\"\"\n",
" Fetches the summary from wikipedia page based on user entered search text and sets user prompt accordingly\n",
" \"\"\"\n",
" try:\n",
" # Try to get the summary of the text from Wikipedia based on user entered text. Using starightforward summary module in wikipedia.\n",
" self.summary_text = wikipedia.summary(self.searchText)\n",
" self.user_prompt = f\"You are looking a summary extract from a wikipedia page. The content is as follows\\n {self.summary_text}.\\nProvide \\\n",
" a summary taking key points from each sections listed on the page\"\n",
" except wikipedia.DisambiguationError as e:\n",
" #Modify user and system prompts if there are multiple options for a user search text\n",
" self.user_prompt = f\"You have received quite a few options {e.options} for the keyword {self.searchText}. Please request user to choose one of them\"\n",
" except wikipedia.PageError:\n",
" #To handle when there is no page\n",
" self.user_prompt = f\"There is no wiki page for {self.searchText}. Apparently it is not your fault!\"\n",
" except Exception as e:\n",
" # To handle any other exceptions\n",
" self.user_prompt = f\"Sorry, something seems to be wrong on my end. Please try again later\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "143c203e-bb99-49c6-89a2-2a32ea429719",
"metadata": {},
"outputs": [],
"source": [
"# Our by-now familiar sumamrize function\n",
"def summarize(searchText):\n",
" wiki = Wikipedia(searchText)\n",
" system_prompt = f\"You are an assitant trying to summarize content from Wikipedia. You will have three scenarios to handle your responses \\\n",
" 1. You will have the summary text content and you will just show that to user\\\n",
" 2. You will have multiple options for the user entered keyword, and you will respond by asking user to choose from that and request again \\\n",
" 3. You will not have the content due to a page not found error. Respond accordingly.\\\n",
" Respond all of these in Markdown format.\"\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": wiki.user_prompt}\n",
" ]\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages\n",
" )\n",
" return response.choices[0].message.content\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b61532fc-189c-4cd8-9402-93d8d8fa8c59",
"metadata": {},
"outputs": [],
"source": [
"summary = summarize(\"mukhari\")\n",
"display(Markdown(summary))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5c3f05f6-acb5-41e4-a521-8d8b8ace0192",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,356 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "31d3c4a4-5442-4074-b812-42d60e0a0c04",
"metadata": {},
"outputs": [],
"source": [
"#In this example we will fetch the job description by pasting the URL,then we upload CV. Only then ChatGPT will\n",
"#analyze CV against the fetched job description. If the CV is a good match then it will write a cover letter.\n",
"\n",
"#If \n",
" ##job posting url is fake/random text or \n",
" ##job posting is fake/random tex or \n",
" ##CV is fake/random text\n",
"#then ChatGPT will not analyze CV, it will give a generic response to enter the info correctly."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bc2eafe6-5255-4317-8ddd-a93695296043",
"metadata": {},
"outputs": [],
"source": [
"pip install PyPDF2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cf45e9d5-4913-416c-9880-5be60a96c0e6",
"metadata": {},
"outputs": [],
"source": [
"# Imports\n",
"import os\n",
"import io\n",
"import time\n",
"import requests\n",
"import PyPDF2\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display\n",
"from bs4 import BeautifulSoup\n",
"from openai import OpenAI\n",
"from ipywidgets import Textarea, FileUpload, Button, VBox, HTML"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "af8fea69-60aa-430c-a16c-8757b487e07a",
"metadata": {},
"outputs": [],
"source": [
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "daee94d2-f82b-43f0-95d1-15370eda1bc7",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"\n",
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0712dd1d-b6bc-41c6-84ec-d965f696f7aa",
"metadata": {},
"outputs": [],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"You are an assistant who analyzes user's CV against the job description \\\n",
" and provide a short summary if the user is fit for this job. If the user is fit for the job \\\n",
" write a cover letter for the user to apply for the job. Keep the cover letter professional, short, \\\n",
" and formal. \\\n",
" Important things to notice before analyzing CV:\\\n",
" 1. Always check if the CV is actually a CV or just random text\\\n",
" 2. Check if the job description fetched from the website is the job description or not\\\n",
" and ignore text related to navigation\\\n",
" 3. Also check the link of the job posting, if it actually resembles a job posting or is just random \\\n",
" fake website\\\n",
" 4. if any one of these two checks fails, do not analyze the CV against the Job description and give an\\\n",
" appropriate response as you think\\\n",
" 5. Always respond in Markdown.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70c972a6-8af6-4ff2-a338-6d7ba90e2045",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "426dfd9b-3446-4543-9819-63040abd9644",
"metadata": {},
"outputs": [],
"source": [
"for_user_prompt = {\n",
" 'job_posting_url':'',\n",
" 'job_posting': '',\n",
" 'cv_text': ''\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "79d9ccd6-f5fe-4ce8-982c-7235d2cf6a9f",
"metadata": {},
"outputs": [],
"source": [
"# Create widgets - to create a box for the job posting text\n",
"job_posting_url_area = Textarea(\n",
" placeholder='Paste the URL of the job posting here, ONLY URL PLEASE',\n",
" description='Fetching job:',\n",
" disabled=False,\n",
" layout={'width': '800px', 'height': '50px'}\n",
")\n",
"\n",
"status_job_posting = HTML(value=\"<b>Status:</b> Waiting for inputs...\")\n",
"\n",
"# Create Submit Buttons\n",
"fetch_job_posting_button = Button(description='Fetch Job Posting', button_style='primary')\n",
"\n",
"def fetch_job_posting_action(b):\n",
" for_user_prompt['job_posting_url'] = job_posting_url_area.value\n",
" if for_user_prompt['job_posting_url']:\n",
" ed = Website(for_user_prompt['job_posting_url'])\n",
" status_job_posting.value = \"<b>Status:</b> Job posting fetched successfully!\"\n",
" fetch_job_posting_button.button_style='success'\n",
" for_user_prompt['job_posting']=ed.text\n",
" else:\n",
" status_job_posting.value = \"<b>Status:</b> Please enter a job posting url before submitting.\"\n",
"\n",
"# Attach actions to buttons\n",
"fetch_job_posting_button.on_click(fetch_job_posting_action)\n",
"\n",
"# Layout\n",
"job_posting_box = VBox([job_posting_url_area, fetch_job_posting_button])\n",
"\n",
"# Display all widgets\n",
"display(VBox([\n",
" HTML(value=\"<h2>Input Job Posting Url</h2>\"),\n",
" job_posting_box,\n",
" status_job_posting\n",
"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "58d42786-1580-4d3f-b44f-5c52250c2935",
"metadata": {},
"outputs": [],
"source": [
"# Print fetched job description\n",
"\n",
"#print(for_user_prompt['job_posting'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cd258dec-9b57-40ce-b37c-2627acbcb5af",
"metadata": {},
"outputs": [],
"source": [
"# Define file upload for CV\n",
"cv_upload = FileUpload(\n",
" accept='.pdf', # Only accept PDF files\n",
" multiple=False, # Only allow single file selection\n",
" description='Upload CV (PDF)'\n",
")\n",
"\n",
"status = HTML(value=\"<b>Status:</b> Waiting for inputs...\")\n",
"\n",
"# Create Submit Buttons\n",
"submit_cv_button = Button(description='Submit CV', button_style='success')\n",
"\n",
"# Functions\n",
"def submit_cv_action(change):\n",
"\n",
" if not for_user_prompt['cv_text']:\n",
" status.value = \"<b>Status:</b> Please upload a CV before submitting.\"\n",
" \n",
" if cv_upload.value:\n",
" # Get the uploaded file\n",
" uploaded_file = cv_upload.value[0]\n",
" content = io.BytesIO(uploaded_file['content'])\n",
" \n",
" try:\n",
" pdf_reader = PyPDF2.PdfReader(content) \n",
" cv_text = \"\"\n",
" for page in pdf_reader.pages: \n",
" cv_text += page.extract_text() \n",
" \n",
" # Store CV text in for_user_prompt\n",
" for_user_prompt['cv_text'] = cv_text\n",
" status.value = \"<b>Status:</b> CV uploaded and processed successfully!\"\n",
" except Exception as e:\n",
" status.value = f\"<b>Status:</b> Error processing PDF: {str(e)}\"\n",
"\n",
" time.sleep(0.5) # Short pause between upload and submit messages to display both\n",
" \n",
" if for_user_prompt['cv_text']:\n",
" #print(\"CV Submitted:\")\n",
" #print(for_user_prompt['cv_text'])\n",
" status.value = \"<b>Status:</b> CV submitted successfully!\"\n",
" \n",
"\n",
"# Attach actions to buttons\n",
"submit_cv_button.on_click(submit_cv_action)\n",
"\n",
"# Layout\n",
"cv_buttons = VBox([submit_cv_button])\n",
"\n",
"# Display all widgets\n",
"display(VBox([\n",
" HTML(value=\"<h2>Import CV and submit</h2>\"),\n",
" cv_upload,\n",
" cv_buttons,\n",
" status\n",
"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a7dd22a4-ca7b-4b8c-a328-6205cec689cb",
"metadata": {},
"outputs": [],
"source": [
"# Prepare the user prompt that we will send to open ai (added URL for the context)\n",
"user_prompt = f\"\"\"\n",
"Job Posting: \n",
"{for_user_prompt['job_posting']}\n",
"\n",
"CV: \n",
"{for_user_prompt['cv_text']}\n",
"\n",
"Url:\n",
"{for_user_prompt['job_posting_url']}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "82b71c1a-895a-48e7-a945-13e615bb0096",
"metadata": {},
"outputs": [],
"source": [
"# Define messages with system_prompt and user_prompt\n",
"def messages_for(system_prompt_input, user_prompt_input):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt_input},\n",
" {\"role\": \"user\", \"content\": user_prompt_input}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "854dc42e-2bbd-493b-958f-c20484908300",
"metadata": {},
"outputs": [],
"source": [
"# And now: call the OpenAI API. \n",
"response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages_for(system_prompt, user_prompt)\n",
")\n",
"\n",
"# Response is provided in Markdown and displayed accordingly\n",
"display(Markdown(response.choices[0].message.content))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "758d2cbe-0f80-4572-8724-7cba77f701dd",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,979 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
"metadata": {},
"source": [
"# Instant Gratification\n",
"\n",
"## Your first Frontier LLM Project!\n",
"\n",
"Let's build a useful LLM solution - in a matter of minutes.\n",
"\n",
"By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n",
"\n",
"Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n",
"\n",
"Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n",
"\n",
"## If you're new to Jupyter Lab\n",
"\n",
"Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n",
"\n",
"I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n",
"\n",
"## If you'd prefer to work in IDEs\n",
"\n",
"If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n",
"If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n",
"\n",
"## If you'd like to brush up your Python\n",
"\n",
"I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n",
"`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n",
"\n",
"## I am here to help\n",
"\n",
"If you have any problems at all, please do reach out. \n",
"I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!)\n",
"\n",
"## More troubleshooting\n",
"\n",
"Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n",
"\n",
"## If this is old hat!\n",
"\n",
"If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n",
"\n",
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#900;\">Please read - important note</h2>\n",
" <span style=\"color:#900;\">The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you do this with me, either at the same time, or (perhaps better) right afterwards. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...</span>\n",
" </td>\n",
" </tr>\n",
"</table>\n",
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#181;\">Business value of these exercises</h2>\n",
" <span style=\"color:#181;\">A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.</span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
},
{
"cell_type": "markdown",
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254",
"metadata": {},
"source": [
"# Connecting to OpenAI\n",
"\n",
"The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n",
"\n",
"## Troubleshooting if you have problems:\n",
"\n",
"Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n",
"\n",
"If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n",
"\n",
"Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n",
"\n",
"Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key found and looks good so far!\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"\n",
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions"
]
},
{
"cell_type": "markdown",
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91",
"metadata": {},
"source": [
"# Let's make a quick call to a Frontier model to get started, as a preview!"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hello! Im glad to hear from you! How can I assist you today?\n"
]
}
],
"source": [
"# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n",
"\n",
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n",
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "2aa190e5-cb31-456a-96cc-db109919cd78",
"metadata": {},
"source": [
"## OK onwards with our first project"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Home - Edward Donner\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Well, hi there.\n",
"Im Ed. I like writing code and experimenting with LLMs, and hopefully youre here because you do too. I also enjoy DJing (but Im badly out of practice), amateur electronic music production (\n",
"very\n",
"amateur) and losing myself in\n",
"Hacker News\n",
", nodding my head sagely to things I only half understand.\n",
"Im the co-founder and CTO of\n",
"Nebula.io\n",
". Were applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. Im previously the founder and CEO of AI startup untapt,\n",
"acquired in 2021\n",
".\n",
"We work with groundbreaking, proprietary LLMs verticalized for talent, weve\n",
"patented\n",
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n",
"Connect\n",
"with me for more!\n",
"December 21, 2024\n",
"Welcome, SuperDataScientists!\n",
"November 13, 2024\n",
"Mastering AI and LLM Engineering Resources\n",
"October 16, 2024\n",
"From Software Engineer to AI Data Scientist resources\n",
"August 6, 2024\n",
"Outsmart LLM Arena a battle of diplomacy and deviousness\n",
"Navigation\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Get in touch\n",
"ed [at] edwarddonner [dot] com\n",
"www.edwarddonner.com\n",
"Follow me\n",
"LinkedIn\n",
"Twitter\n",
"Facebook\n",
"Subscribe to newsletter\n",
"Type your email…\n",
"Subscribe\n"
]
}
],
"source": [
"# Let's try one out. Change the website and add print statements to follow along.\n",
"\n",
"ed = Website(\"https://edwarddonner.com\")\n",
"print(ed.title)\n",
"print(ed.text)"
]
},
{
"cell_type": "markdown",
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1",
"metadata": {},
"source": [
"## Types of prompts\n",
"\n",
"You may know this already - but if not, you will get very familiar with it!\n",
"\n",
"Models like GPT4o have been trained to receive instructions in a particular way.\n",
"\n",
"They expect to receive:\n",
"\n",
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
"\n",
"**A user prompt** -- the conversation starter that they should reply to"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {},
"outputs": [],
"source": [
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\""
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {},
"outputs": [],
"source": [
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"You are looking at a website titled Home - Edward Donner\n",
"The contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\n",
"\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Well, hi there.\n",
"Im Ed. I like writing code and experimenting with LLMs, and hopefully youre here because you do too. I also enjoy DJing (but Im badly out of practice), amateur electronic music production (\n",
"very\n",
"amateur) and losing myself in\n",
"Hacker News\n",
", nodding my head sagely to things I only half understand.\n",
"Im the co-founder and CTO of\n",
"Nebula.io\n",
". Were applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. Im previously the founder and CEO of AI startup untapt,\n",
"acquired in 2021\n",
".\n",
"We work with groundbreaking, proprietary LLMs verticalized for talent, weve\n",
"patented\n",
"our matching model, and our award-winning platform has happy customers and tons of press coverage.\n",
"Connect\n",
"with me for more!\n",
"December 21, 2024\n",
"Welcome, SuperDataScientists!\n",
"November 13, 2024\n",
"Mastering AI and LLM Engineering Resources\n",
"October 16, 2024\n",
"From Software Engineer to AI Data Scientist resources\n",
"August 6, 2024\n",
"Outsmart LLM Arena a battle of diplomacy and deviousness\n",
"Navigation\n",
"Home\n",
"Outsmart\n",
"An arena that pits LLMs against each other in a battle of diplomacy and deviousness\n",
"About\n",
"Posts\n",
"Get in touch\n",
"ed [at] edwarddonner [dot] com\n",
"www.edwarddonner.com\n",
"Follow me\n",
"LinkedIn\n",
"Twitter\n",
"Facebook\n",
"Subscribe to newsletter\n",
"Type your email…\n",
"Subscribe\n"
]
}
],
"source": [
"print(user_prompt_for(ed))"
]
},
{
"cell_type": "markdown",
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc",
"metadata": {},
"source": [
"## Messages\n",
"\n",
"The API from OpenAI expects to receive messages in a particular structure.\n",
"Many of the other APIs share this structure:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n",
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n",
"]\n",
"\n",
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the might GPT (yet!)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Oh, we're starting with the basics, huh? Well, 2 + 2 equals 4. Shocking, I know!\n"
]
}
],
"source": [
"# To give you a preview -- calling OpenAI with system and user messages:\n",
"\n",
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47",
"metadata": {},
"source": [
"## And now let's build useful messages for GPT-4o-mini, using a function"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {},
"outputs": [],
"source": [
"# See how this function creates exactly the format above\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'role': 'system',\n",
" 'content': 'You are an assistant that analyzes the contents of a website and provides a short summary, ignoring text that might be navigation related. Respond in markdown.'},\n",
" {'role': 'user',\n",
" 'content': 'You are looking at a website titled Home - Edward Donner\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nWell, hi there.\\nIm Ed. I like writing code and experimenting with LLMs, and hopefully youre here because you do too. I also enjoy DJing (but Im badly out of practice), amateur electronic music production (\\nvery\\namateur) and losing myself in\\nHacker News\\n, nodding my head sagely to things I only half understand.\\nIm the co-founder and CTO of\\nNebula.io\\n. Were applying AI to a field where it can make a massive, positive impact: helping people discover their potential and pursue their reason for being. Recruiters use our product today to source, understand, engage and manage talent. Im previously the founder and CEO of AI startup untapt,\\nacquired in 2021\\n.\\nWe work with groundbreaking, proprietary LLMs verticalized for talent, weve\\npatented\\nour matching model, and our award-winning platform has happy customers and tons of press coverage.\\nConnect\\nwith me for more!\\nDecember 21, 2024\\nWelcome, SuperDataScientists!\\nNovember 13, 2024\\nMastering AI and LLM Engineering Resources\\nOctober 16, 2024\\nFrom Software Engineer to AI Data Scientist resources\\nAugust 6, 2024\\nOutsmart LLM Arena a battle of diplomacy and deviousness\\nNavigation\\nHome\\nOutsmart\\nAn arena that pits LLMs against each other in a battle of diplomacy and deviousness\\nAbout\\nPosts\\nGet in touch\\ned [at] edwarddonner [dot] com\\nwww.edwarddonner.com\\nFollow me\\nLinkedIn\\nTwitter\\nFacebook\\nSubscribe to newsletter\\nType your email…\\nSubscribe'}]"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Try this out, and then try for a few more websites\n",
"\n",
"messages_for(ed)"
]
},
{
"cell_type": "markdown",
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0",
"metadata": {},
"source": [
"## Time to bring it together - the API for OpenAI is very simple!"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {},
"outputs": [],
"source": [
"# And now: call the OpenAI API. You will get very familiar with this!\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages_for(website)\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'# Summary of Edward Donner\\'s Website\\n\\nEdward Donner\\'s website serves as a platform for sharing his interests and expertise in coding, large language models (LLMs), and AI. He is the co-founder and CTO of Nebula.io, a company focused on leveraging AI to enhance talent discovery and management. Previously, he founded the AI startup untapt, which was acquired in 2021.\\n\\n## Key Content\\n\\n- **Personal Introduction**: Ed shares his passion for coding, experimenting with LLMs, DJing, and music production.\\n- **Professional Background**: He highlights his role at Nebula.io and his prior experience with untapt.\\n- **Innovative Work**: Mention of proprietary LLMs tailored for talent management and a patented matching model.\\n\\n## News and Announcements\\n\\n- **December 21, 2024**: Welcoming \"SuperDataScientists.\"\\n- **November 13, 2024**: Resources for mastering AI and LLM engineering.\\n- **October 16, 2024**: Transitioning from software engineering to AI data science resources.\\n- **August 6, 2024**: Introduction to the Outsmart LLM Arena, a competition focusing on strategy among LLMs.\\n\\nThe website encourages connections and offers resources for individuals interested in AI and LLMs.'"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"summarize(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {},
"outputs": [],
"source": [
"# A function to display this nicely in the Jupyter output, using markdown\n",
"\n",
"def display_summary(url):\n",
" summary = summarize(url)\n",
" display(Markdown(summary))"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of Edward Donner's Website\n",
"\n",
"The website belongs to Ed, a coder and LLM (Large Language Model) enthusiast, who is also a co-founder and CTO of Nebula.io. Nebula.io focuses on leveraging AI to help individuals discover their potential in recruitment through its innovative platform. Ed also shares his background in the AI field, having previously founded the startup untapt, which was acquired in 2021.\n",
"\n",
"## Recent News and Announcements\n",
"1. **December 21, 2024**: Welcome message for SuperDataScientists.\n",
"2. **November 13, 2024**: Resources for mastering AI and LLM engineering.\n",
"3. **October 16, 2024**: Resources for transitioning from Software Engineer to AI Data Scientist.\n",
"4. **August 6, 2024**: Introduction to the \"Outsmart LLM Arena,\" a competitive platform where LLMs engage in diplomacy and strategy.\n",
"\n",
"Ed expresses a passion for technology, music, and engaging in community discussions through platforms like Hacker News."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "markdown",
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624",
"metadata": {},
"source": [
"# Let's try more websites\n",
"\n",
"Note that this will only work on websites that can be scraped using this simplistic approach.\n",
"\n",
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n",
"\n",
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n",
"\n",
"But many websites will work just fine!"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "45d83403-a24c-44b5-84ac-961449b4008f",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"# CNN Website Summary\n",
"\n",
"CNN is a leading news platform that provides comprehensive coverage across a wide range of categories including US and world news, politics, business, health, entertainment, and more. The website features breaking news articles, videos, and live updates on significant global events.\n",
"\n",
"### Recent Headlines:\n",
"- **Politics**: \n",
" - Justin Trudeau announced his resignation as Canada's Prime Minister, sharing his \"one regret.\"\n",
" - Analysis of Trump's influence in Congress and recent legal battles related to his actions.\n",
" \n",
"- **Global Affairs**: \n",
" - Rising tensions in Venezuela as the opposition leader urges military action against Maduro.\n",
" - Sudanese authorities announced the transfer of 11 Yemeni detainees from Guantanamo Bay to Oman.\n",
" \n",
"- **Weather**: A major winter storm impacted Washington, DC, causing power outages and stranded drivers.\n",
"\n",
"- **Health**: \n",
" - FDA issues new draft guidance on improving pulse oximeter readings for individuals with darker skin.\n",
"\n",
"### Additional Features:\n",
"CNN includes segments dedicated to sports, science, climate, and travel. There are also various podcasts available, offering deeper insights into current events and specialized topics. \n",
"\n",
"The site encourages user feedback on ads and technical issues, emphasizing its commitment to enhancing user experience. \n",
"\n",
"Overall, CNN serves as a crucial resource for staying updated with local and international news."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://cnn.com\")"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "75e9fd40-b354-4341-991e-863ef2e59db7",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"# Anthropic Website Summary\n",
"\n",
"Anthropic is an AI safety and research company that prioritizes safety in the development of AI technologies. The main focus of the site is on their AI model, Claude, which includes the latest version, Claude 3.5 Sonnet, as well as additional offerings like Claude 3.5 Haiku. The company emphasizes the creation of AI-powered applications and custom experiences through its API.\n",
"\n",
"## Recent Announcements\n",
"- **Claude 3.5 Sonnet Launch**: Announced on October 22, 2024, featuring significant advancements in AI capabilities.\n",
"- **New AI Models**: Introduction of Claude 3.5 Sonnet and Claude 3.5 Haiku.\n",
"\n",
"Anthropic's work spans various domains including machine learning, policy, and product development, aimed at generating reliable and beneficial AI systems. They also highlight career opportunities within the organization."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary(\"https://anthropic.com\")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "8070c4c3-1ef1-4c7a-8c2d-f6b4b9b4aa8e",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"# Summary of CPP Investments Website\n",
"\n",
"## Overview\n",
"The CPP Investments website serves as a comprehensive resource for information regarding the management and performance of the Canada Pension Plan (CPP) Fund. It emphasizes its long-standing commitment to ensuring financial security for over 22 million Canadians who rely on the benefits of the CPP.\n",
"\n",
"## Key Sections\n",
"- **About Us**: Details the governance, leadership, and investment programs available within CPP Investments.\n",
"- **The Fund**: Offers an overview of the fund's performance, sustainability, and transparency in its operations.\n",
"- **Investment Strategies**: Explanation of CPP's investment beliefs and strategies, emphasizing a global mindset and sustainable investing practices.\n",
"- **Insights Institute**: A dedicated section for reports and analyses on relevant investment topics, including emerging trends and strategies.\n",
"\n",
"## Recent News and Announcements\n",
"- **2024 CEO Letter** (May 22, 2024): Reflects on the 25th anniversary of CPP Investments and its mission to manage funds in the best interest of Canadians.\n",
"- **Article on CPP Benefits** (September 18, 2024): Highlights why the CPP is regarded as one of the best pension plans globally.\n",
"- **Report on AI Integration and Human Capital** (October 31, 2024): Discusses how institutional investors can engage with boards and leadership on AI adaptation strategies.\n",
"- **Stake Sales** (January 3, 2025): Announcements regarding the sale of stakes in various partnerships and joint ventures, including a significant logistics partnership in North America and real estate ventures in Hong Kong.\n",
"\n",
"This website underscores CPP Investments' ongoing commitment to transparency, strong financial performance, and its role in supporting the financial security of Canadians as they prepare for retirement."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display_summary('https://cppinvestments.com')"
]
},
{
"cell_type": "markdown",
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#181;\">Business applications</h2>\n",
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n",
"\n",
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n",
" </td>\n",
" </tr>\n",
"</table>\n",
"\n",
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n",
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"**Subject:** Request for Annual Sales Report (2024)\n",
"\n",
"**Email:**\n",
"\n",
"Dear Abhinav,\n",
"\n",
"I hope this email finds you in good health and high spirits. As we step into a new year and begin reviewing our plans and strategies, it is crucial for us to analyze the performance metrics from the previous year. In this regard, I would like to kindly request a copy of the Annual Sales Report for 2024.\n",
"\n",
"This report will play an integral role in understanding our achievements, challenges, and areas for improvement over the past year. It will also serve as a foundation for aligning our goals and preparing a roadmap for the upcoming quarters. Please ensure that the report includes key performance indicators such as:\n",
"\n",
"- Total revenue generated\n",
"- Region-wise sales performance\n",
"- Product/service-wise contribution\n",
"- Month-by-month trend analysis\n",
"- Customer retention and acquisition metrics\n",
"\n",
"If there are any additional insights or observations from your side that you feel would be helpful for us to review, please feel free to include them as well. Your expertise and detailed input are always highly valued.\n",
"\n",
"Kindly let me know if the report is already prepared or if there is an expected timeline for its completion. In case you require any assistance, data inputs, or clarification from my end to finalize the report, do not hesitate to reach out.\n",
"\n",
"Thank you in advance for prioritizing this request. I appreciate your support and look forward to receiving the report soon.\n",
"\n",
"Best regards, \n",
"Sanath Pabba\n",
"\n",
"**Tone:** Professional and Collaborative"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"You are an AI assistant email reviewer. All you need is to identify the meaning of the context in the text given and provide the subject line and email. and in the end of text, please provide the tone info.\"\n",
"user_prompt = \"\"\"\n",
" Dear Abhinav,\n",
"\n",
"I hope this email finds you in good health and high spirits. As we step into a new year and begin reviewing our plans and strategies, it is crucial for us to analyze the performance metrics from the previous year. In this regard, I would like to kindly request a copy of the Annual Sales Report for 2024.\n",
"\n",
"This report will play an integral role in understanding our achievements, challenges, and areas for improvement over the past year. It will also serve as a foundation for aligning our goals and preparing a roadmap for the upcoming quarters. Please ensure that the report includes key performance indicators such as:\n",
"\n",
"Total revenue generated\n",
"Region-wise sales performance\n",
"Product/service-wise contribution\n",
"Month-by-month trend analysis\n",
"Customer retention and acquisition metrics\n",
"If there are any additional insights or observations from your side that you feel would be helpful for us to review, please feel free to include them as well. Your expertise and detailed input are always highly valued.\n",
"\n",
"Kindly let me know if the report is already prepared or if there is an expected timeline for its completion. In case you require any assistance, data inputs, or clarification from my end to finalize the report, do not hesitate to reach out.\n",
"\n",
"Thank you in advance for prioritizing this request. I appreciate your support and look forward to receiving the report soon.\n",
"\n",
"Best regards,\n",
"Sanath Pabba\n",
"\"\"\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [\n",
" {\"role\":\"system\", \"content\": system_prompt},\n",
" {\"role\":\"user\", \"content\": user_prompt}\n",
" \n",
"] # fill this in\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response = openai.chat.completions.create(\n",
" model=\"gpt-4o-mini\",\n",
" messages=messages\n",
")\n",
"\n",
"# Step 4: print the result\n",
"\n",
"display(Markdown(response.choices[0].message.content))"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "d4d641a5-0103-44a5-b5c2-70e80976d1f1",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"**Subject:** Addressing Sales Performance Concerns\n",
"\n",
"Dear Akhil,\n",
"\n",
"I wanted to touch base with you about your sales performance over the last two quarters. Ive noticed that you havent been hitting the targets, and its something we need to address seriously.\n",
"\n",
"I know youre capable of much more, and I want to see you succeed. That said, its crucial that you meet your sales targets this quarter. If there isnt a significant improvement, we may have to consider other options, including letting you go, which I truly hope we can avoid.\n",
"\n",
"If theres anything holding you back or if you need additional support, let me know. Im here to help, but ultimately, its up to you to turn things around.\n",
"\n",
"Lets make this quarter count! Let me know if you want to discuss this further or need help strategizing.\n",
"\n",
"Best regards, \n",
"Sanath Pabba\n",
"\n",
"**Tone:** Serious yet supportive"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"You are an AI assistant email reviewer. All you need is to identify the meaning of the context in the text given and provide the subject line and email. and in the end of text, please provide the tone info.\"\n",
"user_prompt = \"\"\"\n",
"Dear Akhil,\n",
"\n",
"I wanted to touch base with you about your sales performance over the last two quarters. Ive noticed that you havent been hitting the targets, and its something we need to address seriously.\n",
"\n",
"I know youre capable of much more, and I want to see you succeed. That said, its crucial that you meet your sales targets this quarter. If there isnt a significant improvement, we may have to consider other options, including letting you go, which I truly hope we can avoid.\n",
"\n",
"If theres anything holding you back or if you need additional support, let me know. Im here to help, but ultimately, its up to you to turn things around.\n",
"\n",
"Lets make this quarter count! Let me know if you want to discuss this further or need help strategizing.\n",
"\n",
"Best regards,\n",
"Sanath Pabba\n",
"\"\"\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [\n",
" {\"role\":\"system\", \"content\": system_prompt},\n",
" {\"role\":\"user\", \"content\": user_prompt}\n",
" \n",
"] # fill this in\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response = openai.chat.completions.create(\n",
" model=\"gpt-4o-mini\",\n",
" messages=messages\n",
")\n",
"\n",
"# Step 4: print the result\n",
"\n",
"display(Markdown(response.choices[0].message.content))"
]
},
{
"cell_type": "markdown",
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda",
"metadata": {},
"source": [
"## An extra exercise for those who enjoy web scraping\n",
"\n",
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)"
]
},
{
"cell_type": "markdown",
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6",
"metadata": {},
"source": [
"# Sharing your code\n",
"\n",
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n",
"\n",
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n",
"\n",
"Here are good instructions courtesy of an AI friend: \n",
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,580 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
"metadata": {},
"source": [
"# Instant Gratification\n",
"\n",
"## Your first Frontier LLM Project!\n",
"\n",
"Let's build a useful LLM solution - in a matter of minutes.\n",
"\n",
"By the end of this course, you will have built an autonomous Agentic AI solution with 7 agents that collaborate to solve a business problem. All in good time! We will start with something smaller...\n",
"\n",
"Our goal is to code a new kind of Web Browser. Give it a URL, and it will respond with a summary. The Reader's Digest of the internet!!\n",
"\n",
"Before starting, you should have completed the setup for [PC](../SETUP-PC.md) or [Mac](../SETUP-mac.md) and you hopefully launched this jupyter lab from within the project root directory, with your environment activated.\n",
"\n",
"## If you're new to Jupyter Lab\n",
"\n",
"Welcome to the wonderful world of Data Science experimentation! Once you've used Jupyter Lab, you'll wonder how you ever lived without it. Simply click in each \"cell\" with code in it, such as the cell immediately below this text, and hit Shift+Return to execute that cell. As you wish, you can add a cell with the + button in the toolbar, and print values of variables, or try out variations. \n",
"\n",
"I've written a notebook called [Guide to Jupyter](Guide%20to%20Jupyter.ipynb) to help you get more familiar with Jupyter Labs, including adding Markdown comments, using `!` to run shell commands, and `tqdm` to show progress.\n",
"\n",
"## If you'd prefer to work in IDEs\n",
"\n",
"If you're more comfortable in IDEs like VSCode or Pycharm, they both work great with these lab notebooks too. \n",
"If you'd prefer to work in VSCode, [here](https://chatgpt.com/share/676f2e19-c228-8012-9911-6ca42f8ed766) are instructions from an AI friend on how to configure it for the course.\n",
"\n",
"## If you'd like to brush up your Python\n",
"\n",
"I've added a notebook called [Intermediate Python](Intermediate%20Python.ipynb) to get you up to speed. But you should give it a miss if you already have a good idea what this code does: \n",
"`yield from {book.get(\"author\") for book in books if book.get(\"author\")}`\n",
"\n",
"## I am here to help\n",
"\n",
"If you have any problems at all, please do reach out. \n",
"I'm available through the platform, or at ed@edwarddonner.com, or at https://www.linkedin.com/in/eddonner/ if you'd like to connect (and I love connecting!)\n",
"\n",
"## More troubleshooting\n",
"\n",
"Please see the [troubleshooting](troubleshooting.ipynb) notebook in this folder to diagnose and fix common problems. At the very end of it is a diagnostics script with some useful debug info.\n",
"\n",
"## If this is old hat!\n",
"\n",
"If you're already comfortable with today's material, please hang in there; you can move swiftly through the first few labs - we will get much more in depth as the weeks progress.\n",
"\n",
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#900;\">Please read - important note</h2>\n",
" <span style=\"color:#900;\">The way I collaborate with you may be different to other courses you've taken. I prefer not to type code while you watch. Rather, I execute Jupyter Labs, like this, and give you an intuition for what's going on. My suggestion is that you do this with me, either at the same time, or (perhaps better) right afterwards. Add print statements to understand what's going on, and then come up with your own variations. If you have a Github account, use this to showcase your variations. Not only is this essential practice, but it demonstrates your skills to others, including perhaps future clients or employers...</span>\n",
" </td>\n",
" </tr>\n",
"</table>\n",
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#181;\">Business value of these exercises</h2>\n",
" <span style=\"color:#181;\">A final thought. While I've designed these notebooks to be educational, I've also tried to make them enjoyable. We'll do fun things like have LLMs tell jokes and argue with each other. But fundamentally, my goal is to teach skills you can apply in business. I'll explain business implications as we go, and it's worth keeping this in mind: as you build experience with models and techniques, think of ways you could put this into action at work today. Please do contact me if you'd like to discuss more or if you have ideas to bounce off me.</span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"\n",
"# If you get an error running this cell, then please head over to the troubleshooting notebook!"
]
},
{
"cell_type": "markdown",
"id": "6900b2a8-6384-4316-8aaa-5e519fca4254",
"metadata": {},
"source": [
"# Connecting to OpenAI\n",
"\n",
"The next cell is where we load in the environment variables in your `.env` file and connect to OpenAI.\n",
"\n",
"## Troubleshooting if you have problems:\n",
"\n",
"Head over to the [troubleshooting](troubleshooting.ipynb) notebook in this folder for step by step code to identify the root cause and fix it!\n",
"\n",
"If you make a change, try restarting the \"Kernel\" (the python process sitting behind this notebook) by Kernel menu >> Restart Kernel and Clear Outputs of All Cells. Then try this notebook again, starting at the top.\n",
"\n",
"Or, contact me! Message me or email ed@edwarddonner.com and we will get this to work.\n",
"\n",
"Any concerns about API costs? See my notes in the README - costs should be minimal, and you can control it at every point. You can also use Ollama as a free alternative, which we discuss during Day 2."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7b87cadb-d513-4303-baee-a37b6f938e4d",
"metadata": {},
"outputs": [],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "019974d9-f3ad-4a8a-b5f9-0a3719aea2d3",
"metadata": {},
"outputs": [],
"source": [
"openai = OpenAI()\n",
"\n",
"# If this doesn't work, try Kernel menu >> Restart Kernel and Clear Outputs Of All Cells, then run the cells from the top of this notebook down.\n",
"# If it STILL doesn't work (horrors!) then please see the Troubleshooting notebook in this folder for full instructions"
]
},
{
"cell_type": "markdown",
"id": "442fc84b-0815-4f40-99ab-d9a5da6bda91",
"metadata": {},
"source": [
"# Let's make a quick call to a Frontier model to get started, as a preview!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a58394bf-1e45-46af-9bfd-01e24da6f49a",
"metadata": {},
"outputs": [],
"source": [
"# To give you a preview -- calling OpenAI with these messages is this easy. Any problems, head over to the Troubleshooting notebook.\n",
"\n",
"message = \"Hello, GPT! This is my first ever message to you! Hi!\"\n",
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=[{\"role\":\"user\", \"content\":message}])\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "2aa190e5-cb31-456a-96cc-db109919cd78",
"metadata": {},
"source": [
"## OK onwards with our first project"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c5e793b2-6775-426a-a139-4848291d0463",
"metadata": {},
"outputs": [],
"source": [
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2ef960cf-6dc2-4cda-afb3-b38be12f4c97",
"metadata": {},
"outputs": [],
"source": [
"# Let's try one out. Change the website and add print statements to follow along.\n",
"\n",
"ed = Website(\"https://edwarddonner.com\")\n",
"print(ed.title)\n",
"print(ed.text)"
]
},
{
"cell_type": "markdown",
"id": "6a478a0c-2c53-48ff-869c-4d08199931e1",
"metadata": {},
"source": [
"## Types of prompts\n",
"\n",
"You may know this already - but if not, you will get very familiar with it!\n",
"\n",
"Models like GPT4o have been trained to receive instructions in a particular way.\n",
"\n",
"They expect to receive:\n",
"\n",
"**A system prompt** that tells them what task they are performing and what tone they should use\n",
"\n",
"**A user prompt** -- the conversation starter that they should reply to"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "abdb8417-c5dc-44bc-9bee-2e059d162699",
"metadata": {},
"outputs": [],
"source": [
"# Define our system prompt - you can experiment with this later, changing the last sentence to 'Respond in markdown in Spanish.\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f0275b1b-7cfe-4f9d-abfa-7650d378da0c",
"metadata": {},
"outputs": [],
"source": [
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "26448ec4-5c00-4204-baec-7df91d11ff2e",
"metadata": {},
"outputs": [],
"source": [
"print(user_prompt_for(ed))"
]
},
{
"cell_type": "markdown",
"id": "ea211b5f-28e1-4a86-8e52-c0b7677cadcc",
"metadata": {},
"source": [
"## Messages\n",
"\n",
"The API from OpenAI expects to receive messages in a particular structure.\n",
"Many of the other APIs share this structure:\n",
"\n",
"```\n",
"[\n",
" {\"role\": \"system\", \"content\": \"system message goes here\"},\n",
" {\"role\": \"user\", \"content\": \"user message goes here\"}\n",
"]\n",
"\n",
"To give you a preview, the next 2 cells make a rather simple call - we won't stretch the might GPT (yet!)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f25dcd35-0cd0-4235-9f64-ac37ed9eaaa5",
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "21ed95c5-7001-47de-a36d-1d6673b403ce",
"metadata": {},
"outputs": [],
"source": [
"# To give you a preview -- calling OpenAI with system and user messages:\n",
"\n",
"response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "d06e8d78-ce4c-4b05-aa8e-17050c82bb47",
"metadata": {},
"source": [
"## And now let's build useful messages for GPT-4o-mini, using a function"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0134dfa4-8299-48b5-b444-f2a8c3403c88",
"metadata": {},
"outputs": [],
"source": [
"# See how this function creates exactly the format above\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "36478464-39ee-485c-9f3f-6a4e458dbc9c",
"metadata": {},
"outputs": [],
"source": [
"# Try this out, and then try for a few more websites\n",
"\n",
"messages_for(ed)"
]
},
{
"cell_type": "markdown",
"id": "16f49d46-bf55-4c3e-928f-68fc0bf715b0",
"metadata": {},
"source": [
"## Time to bring it together - the API for OpenAI is very simple!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "905b9919-aba7-45b5-ae65-81b3d1d78e34",
"metadata": {},
"outputs": [],
"source": [
"# And now: call the OpenAI API. You will get very familiar with this!\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages_for(website)\n",
" )\n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "05e38d41-dfa4-4b20-9c96-c46ea75d9fb5",
"metadata": {},
"outputs": [],
"source": [
"summarize(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d926d59-450e-4609-92ba-2d6f244f1342",
"metadata": {},
"outputs": [],
"source": [
"# A function to display this nicely in the Jupyter output, using markdown\n",
"\n",
"def display_summary(url):\n",
" summary = summarize(url)\n",
" display(Markdown(summary))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3018853a-445f-41ff-9560-d925d1774b2f",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://edwarddonner.com\")"
]
},
{
"cell_type": "markdown",
"id": "b3bcf6f4-adce-45e9-97ad-d9a5d7a3a624",
"metadata": {},
"source": [
"# Let's try more websites\n",
"\n",
"Note that this will only work on websites that can be scraped using this simplistic approach.\n",
"\n",
"Websites that are rendered with Javascript, like React apps, won't show up. See the community-contributions folder for a Selenium implementation that gets around this. You'll need to read up on installing Selenium (ask ChatGPT!)\n",
"\n",
"Also Websites protected with CloudFront (and similar) may give 403 errors - many thanks Andy J for pointing this out.\n",
"\n",
"But many websites will work just fine!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "45d83403-a24c-44b5-84ac-961449b4008f",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://cnn.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "75e9fd40-b354-4341-991e-863ef2e59db7",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://anthropic.com\")"
]
},
{
"cell_type": "markdown",
"id": "c951be1a-7f1b-448f-af1f-845978e47e2c",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../business.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#181;\">Business applications</h2>\n",
" <span style=\"color:#181;\">In this exercise, you experienced calling the Cloud API of a Frontier Model (a leading model at the frontier of AI) for the first time. We will be using APIs like OpenAI at many stages in the course, in addition to building our own LLMs.\n",
"\n",
"More specifically, we've applied this to Summarization - a classic Gen AI use case to make a summary. This can be applied to any business vertical - summarizing the news, summarizing financial performance, summarizing a resume in a cover letter - the applications are limitless. Consider how you could apply Summarization in your business, and try prototyping a solution.</span>\n",
" </td>\n",
" </tr>\n",
"</table>\n",
"\n",
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../important.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#900;\">Before you continue - now try yourself</h2>\n",
" <span style=\"color:#900;\">Use the cell below to make your own simple commercial example. Stick with the summarization use case for now. Here's an idea: write something that will take the contents of an email, and will suggest an appropriate short subject line for the email. That's the kind of feature that might be built into a commercial email tool.</span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "00743dac-0e70-45b7-879a-d7293a6f68a6",
"metadata": {},
"outputs": [],
"source": [
"# Step 1: Create your prompts\n",
"\n",
"system_prompt = \"\"\"you are an AI to a salesperson working in the field of industrial tools and hardware. You have the following roles:\\\n",
"1. identify and understand the scenario the customer is describing.\\\n",
"2. figure what caregory of products are suitable for use in the scenario.\\\n",
"3. search https://industrywaala.com/ for the category of products you identified in 2. and then look for 2 products in that\\\n",
"category that you think will be most suitable in the given use case. for this you need to check for product features provided in\\\n",
"the short and long descriptions on the website that are applicable in the scenario.\\\n",
"4. make a summary of the two products with the brand name, model and 2 other key features of the product\\\n",
"5. always respond in markdown.\n",
"\"\"\"\n",
"\n",
"user_prompt = \"\"\"\\n can you help figure what model of product should i use in high temperature environemt. \\n\\n\n",
"\"\"\"\n",
"\n",
"# Step 2: Make the messages list\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
"] # fill this in\n",
"\n",
"# Step 3: Call OpenAI\n",
"\n",
"response = openai.chat.completions.create(\n",
" model = \"gpt-4o-mini\",\n",
" messages = messages\n",
")\n",
"\n",
"# Step 4: print the result\n",
"\n",
"display(Markdown(response.choices[0].message.content))"
]
},
{
"cell_type": "markdown",
"id": "36ed9f14-b349-40e9-a42c-b367e77f8bda",
"metadata": {},
"source": [
"## An extra exercise for those who enjoy web scraping\n",
"\n",
"You may notice that if you try `display_summary(\"https://openai.com\")` - it doesn't work! That's because OpenAI has a fancy website that uses Javascript. There are many ways around this that some of you might be familiar with. For example, Selenium is a hugely popular framework that runs a browser behind the scenes, renders the page, and allows you to query it. If you have experience with Selenium, Playwright or similar, then feel free to improve the Website class to use them. In the community-contributions folder, you'll find an example Selenium solution from a student (thank you!)"
]
},
{
"cell_type": "markdown",
"id": "eeab24dc-5f90-4570-b542-b0585aca3eb6",
"metadata": {},
"source": [
"# Sharing your code\n",
"\n",
"I'd love it if you share your code afterwards so I can share it with others! You'll notice that some students have already made changes (including a Selenium implementation) which you will find in the community-contributions folder. If you'd like add your changes to that folder, submit a Pull Request with your new versions in that folder and I'll merge your changes.\n",
"\n",
"If you're not an expert with git (and I am not!) then GPT has given some nice instructions on how to submit a Pull Request. It's a bit of an involved process, but once you've done it once it's pretty clear. As a pro-tip: it's best if you clear the outputs of your Jupyter notebooks (Edit >> Clean outputs of all cells, and then Save) for clean notebooks.\n",
"\n",
"Here are good instructions courtesy of an AI friend: \n",
"https://chatgpt.com/share/677a9cb5-c64c-8012-99e0-e06e88afd293"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,170 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 8,
"id": "6ba7c60a-c338-49a1-b1ba-46b7c20e33cb",
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "4acb4062-17b2-43b1-8b74-aefaa9599463",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key found and looks good so far!\n"
]
}
],
"source": [
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "56f011b2-b759-4ad6-9d01-870fbcb8ade1",
"metadata": {},
"outputs": [],
"source": [
"def generate_quiz(topic):\n",
" prompt = f\"Generate a multiple-choice quiz with 5 questions on the topic: {topic}. Include the correct answer for each question.\"\n",
" \n",
" messages = [\n",
" {\"role\": \"system\", \"content\": \"You are a quiz generator. Create a multiple-choice quiz with 5 questions and provide the correct answers.Respond in markdown.\"},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" \n",
" response = openai.chat.completions.create(\n",
" model=\"gpt-4\",\n",
" messages=messages,\n",
" max_tokens=300\n",
" )\n",
" \n",
" return response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "1cf977e7-b04b-49e7-8b0a-d0ab2800c234",
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"**Question 1:** What is Python?\n",
"\n",
"**Choice A:** A type of snake\n",
"**Choice B:** A medical term\n",
"**Choice C:** A drilling tool\n",
"**Choice D:** A high-level programming language\n",
"\n",
"Correct Answer: **Choice D:** A high-level programming language\n",
"\n",
"**Question 2:** In Python, what keyword is used to create a function?\n",
"\n",
"**Choice A:** func\n",
"**Choice B:** def\n",
"**Choice C:** function\n",
"**Choice D:** create\n",
"\n",
"Correct Answer: **Choice B:** def\n",
"\n",
"**Question 3:** What is the correct syntax to output \"Hello World\" in Python?\n",
"\n",
"**Choice A:** printf(\"Hello World\")\n",
"**Choice B:** println(\"Hello World\")\n",
"**Choice C:** echo(\"Hello World\")\n",
"**Choice D:** print(\"Hello World\")\n",
"\n",
"Correct Answer: **Choice D:** print(\"Hello World\")\n",
"\n",
"**Question 4:** How would you create a variable \"x\" that equals 5 in Python?\n",
"\n",
"**Choice A:** var x = 5\n",
"**Choice B:** x := 5\n",
"**Choice C:** x = 5\n",
"**Choice D:** x : 5\n",
"\n",
"Correct Answer: **Choice C:** x = 5\n",
"\n",
"**Question 5:** How do you create a comment in Python?\n",
"\n",
"**Choice A:** // This is a comment\n",
"**Choice B:** # This is a comment\n",
"**Choice C:** <!-- This is a comment -->\n",
"**Choice D:** /* This is a comment */\n",
"\n",
"Correct Answer"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# Example usage\n",
"topic = \"Python programming\"\n",
"quiz = generate_quiz(topic)\n",
"display(Markdown(quiz))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70990d7c-6061-43c6-b3c9-9146a3c51c3e",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,354 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
"metadata": {},
"source": [
"# Welcome to your first assignment!\n",
"\n",
"Instructions are below. Please give this a try, and look in the solutions folder if you get stuck (or feel free to ask me!)"
]
},
{
"cell_type": "markdown",
"id": "ada885d9-4d42-4d9b-97f0-74fbbbfe93a9",
"metadata": {},
"source": [
"<table style=\"margin: 0; text-align: left;\">\n",
" <tr>\n",
" <td style=\"width: 150px; height: 150px; vertical-align: middle;\">\n",
" <img src=\"../resources.jpg\" width=\"150\" height=\"150\" style=\"display: block;\" />\n",
" </td>\n",
" <td>\n",
" <h2 style=\"color:#f71;\">Just before we get to the assignment --</h2>\n",
" <span style=\"color:#f71;\">I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides.<br/>\n",
" <a href=\"https://edwarddonner.com/2024/11/13/llm-engineering-resources/\">https://edwarddonner.com/2024/11/13/llm-engineering-resources/</a><br/>\n",
" Please keep this bookmarked, and I'll continue to add more useful links there over time.\n",
" </span>\n",
" </td>\n",
" </tr>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "6e9fa1fc-eac5-4d1d-9be4-541b3f2b3458",
"metadata": {},
"source": [
"# HOMEWORK EXERCISE ASSIGNMENT\n",
"\n",
"Upgrade the day 1 project to summarize a webpage to use an Open Source model running locally via Ollama rather than OpenAI\n",
"\n",
"You'll be able to use this technique for all subsequent projects if you'd prefer not to use paid APIs.\n",
"\n",
"**Benefits:**\n",
"1. No API charges - open-source\n",
"2. Data doesn't leave your box\n",
"\n",
"**Disadvantages:**\n",
"1. Significantly less power than Frontier Model\n",
"\n",
"## Recap on installation of Ollama\n",
"\n",
"Simply visit [ollama.com](https://ollama.com) and install!\n",
"\n",
"Once complete, the ollama server should already be running locally. \n",
"If you visit: \n",
"[http://localhost:11434/](http://localhost:11434/)\n",
"\n",
"You should see the message `Ollama is running`. \n",
"\n",
"If not, bring up a new Terminal (Mac) or Powershell (Windows) and enter `ollama serve` \n",
"And in another Terminal (Mac) or Powershell (Windows), enter `ollama pull llama3.2` \n",
"Then try [http://localhost:11434/](http://localhost:11434/) again.\n",
"\n",
"If Ollama is slow on your machine, try using `llama3.2:1b` as an alternative. Run `ollama pull llama3.2:1b` from a Terminal or Powershell, and change the code below from `MODEL = \"llama3.2\"` to `MODEL = \"llama3.2:1b\"`"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "29ddd15d-a3c5-4f4e-a678-873f56162724",
"metadata": {},
"outputs": [],
"source": [
"# Constants\n",
"\n",
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
"HEADERS = {\"Content-Type\": \"application/json\"}\n",
"MODEL = \"llama3.2\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dac0a679-599c-441f-9bf2-ddc73d35b940",
"metadata": {},
"outputs": [],
"source": [
"# Create a messages list using the same format that we used for OpenAI\n",
"\n",
"messages = [\n",
" {\"role\": \"user\", \"content\": \"Describe some of the business applications of Generative AI\"}\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7bb9c624-14f0-4945-a719-8ddb64f66f47",
"metadata": {},
"outputs": [],
"source": [
"payload = {\n",
" \"model\": MODEL,\n",
" \"messages\": messages,\n",
" \"stream\": False\n",
" }"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "479ff514-e8bd-4985-a572-2ea28bb4fa40",
"metadata": {},
"outputs": [],
"source": [
"# Let's just make sure the model is loaded\n",
"\n",
"!ollama pull llama3.2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "42b9f644-522d-4e05-a691-56e7658c0ea9",
"metadata": {},
"outputs": [],
"source": [
"# If this doesn't work for any reason, try the 2 versions in the following cells\n",
"# And double check the instructions in the 'Recap on installation of Ollama' at the top of this lab\n",
"# And if none of that works - contact me!\n",
"\n",
"response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)\n",
"print(response.json()['message']['content'])"
]
},
{
"cell_type": "markdown",
"id": "6a021f13-d6a1-4b96-8e18-4eae49d876fe",
"metadata": {},
"source": [
"# Introducing the ollama package\n",
"\n",
"And now we'll do the same thing, but using the elegant ollama python package instead of a direct HTTP call.\n",
"\n",
"Under the hood, it's making the same call as above to the ollama server running at localhost:11434"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7745b9c4-57dc-4867-9180-61fa5db55eb8",
"metadata": {},
"outputs": [],
"source": [
"import ollama\n",
"\n",
"response = ollama.chat(model=MODEL, messages=messages)\n",
"print(response['message']['content'])"
]
},
{
"cell_type": "markdown",
"id": "a4704e10-f5fb-4c15-a935-f046c06fb13d",
"metadata": {},
"source": [
"## Alternative approach - using OpenAI python library to connect to Ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "23057e00-b6fc-4678-93a9-6b31cb704bff",
"metadata": {},
"outputs": [],
"source": [
"# There's actually an alternative approach that some people might prefer\n",
"# You can use the OpenAI client python library to call Ollama:\n",
"\n",
"from openai import OpenAI\n",
"ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
"\n",
"response = ollama_via_openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=messages\n",
")\n",
"\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "1622d9bb-5c68-4d4e-9ca4-b492c751f898",
"metadata": {},
"source": [
"# NOW the exercise for you\n",
"\n",
"Take the code from day1 and incorporate it here, to build a website summarizer that uses Llama 3.2 running locally instead of OpenAI; use either of the above approaches."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ef76cfc2-c519-4cb2-947a-64948517913d",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a151a8de-1e90-4190-b68e-b44b25a2cdd7",
"metadata": {},
"outputs": [],
"source": [
"# Constants\n",
"\n",
"OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
"HEADERS = {\"Content-Type\": \"application/json\"}\n",
"MODEL = \"llama3.2\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "700fffc1-c7b0-4001-b381-5c4fd28c8799",
"metadata": {},
"outputs": [],
"source": [
"# Reusing the Website BeautifulSoup wrapper from Day 1\n",
"# SSL Verification has been disabled\n",
"\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers, verify=False) # NOTE Disabled ssl verification here to workaround VPN Limitations\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "402d5686-4e76-4110-b65a-b3906c35c0a4",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website are as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "81f5f140-8f77-418f-a252-8ad5d11f6c5f",
"metadata": {},
"outputs": [],
"source": [
"## enter the web URL here:\n",
"website_url = \"https://www.timecube.net/\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1d0ce4aa-b43e-4642-bcbd-d5964700ece8",
"metadata": {},
"outputs": [],
"source": [
"## This will at first print a warning for SSL which can be ignored before providing response. \n",
"\n",
"import ollama\n",
"\n",
"system_prompt = \"You are a virtual assistant who analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(Website(website_url))}\n",
"]\n",
"\n",
"response = ollama.chat(model=MODEL, messages=messages)\n",
"print(response['message']['content'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "910b7e06-c92d-47bf-a4ee-a006d70deb06",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,93 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "fa4447be-7825-45d9-a6a5-ed41f2500533",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"from openai import OpenAI\n",
"\n",
"openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
"MODEL = \"llama3.2\"\n",
"\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ] \n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model = MODEL,\n",
" messages = messages_for(website)\n",
" )\n",
" return response.choices[0].message.content\n",
"\n",
"def display_summary(url):\n",
" summary = summarize(url)\n",
" display(Markdown(summary))\n",
"\n",
"\n",
"display_summary(\"https://esarijal.my.id\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,159 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "568fd96a-8cf6-42aa-b9cf-74b7aa383595",
"metadata": {},
"source": [
"# Ollama Website Summarizer\n",
"## Scrape websites and summarize them locally using Ollama\n",
"\n",
"This script is a complete example of the day 1 program, which uses OpenAI API to summarize websites, altered to use techniques from the day 2 exercise to call Ollama models locally."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a9502a0f-d7be-4489-bb7f-173207e802b6",
"metadata": {},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import ollama\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display\n",
"\n",
"MODEL = \"llama3.2\"\n",
"\n",
"# A class to represent a Webpage\n",
"# If you're not familiar with Classes, check out the \"Intermediate Python\" notebook\n",
"\n",
"# Some websites need you to use proper headers when fetching them:\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" \n",
"# A function that writes a User Prompt that asks for summaries of websites:\n",
"\n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
" \n",
"# Create a messages list for a summarize prompt given a website\n",
"\n",
"def create_summarize_prompt(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\" },\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n",
"\n",
"# And now: call Ollama to summarize\n",
"\n",
"def summarize(url):\n",
" website = Website(url)\n",
" messages = create_summarize_prompt(website)\n",
" response = ollama.chat(model=MODEL, messages=messages)\n",
" return response['message']['content']\n",
" \n",
"# A function to display this nicely in the Jupyter output, using markdown\n",
"\n",
"def display_summary(url):\n",
" summary = summarize(url)\n",
" display(Markdown(summary))"
]
},
{
"cell_type": "markdown",
"id": "037627b0-b039-4ca4-a6d4-84ad8fc6a013",
"metadata": {},
"source": [
"## Pre-requisites\n",
"\n",
"Before we can run the script above, we need to make sure Ollama is running on your machine!\n",
"\n",
"Simply visit ollama.com and install!\n",
"\n",
"Once complete, the ollama server should already be running locally.\n",
"If you visit:\n",
"http://localhost:11434/\n",
"\n",
"You should see the message Ollama is running."
]
},
{
"cell_type": "markdown",
"id": "6c2d84fd-2a9b-476d-84ad-4b8522d47023",
"metadata": {},
"source": [
"## Run!\n",
"\n",
"Shift+Enter the code below to summarize a website.\n",
"\n",
"### NOTE!\n",
"\n",
"This will only work with websites that return HTML content, and may return unexpected results for SPAs that are created with JS."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "100829ba-8278-409b-bc0a-82ac28e1149f",
"metadata": {},
"outputs": [],
"source": [
"display_summary(\"https://edwarddonner.com/2024/11/13/llm-engineering-resources/\")"
]
},
{
"cell_type": "markdown",
"id": "ffe4e760-dfa6-43fa-89c4-beea547707ac",
"metadata": {},
"source": [
"Edit the URL above, or add code blocks of your own to try it out!"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -445,7 +445,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
"version": "3.11.11"
}
},
"nbformat": 4,

Binary file not shown.

View File

@@ -0,0 +1,308 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "it1JLoxrSqO1",
"metadata": {
"id": "it1JLoxrSqO1"
},
"outputs": [],
"source": [
"!pip install openai python-docx python-dotenv"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "950a084a-7f92-4669-af62-f07cb121da56",
"metadata": {
"id": "950a084a-7f92-4669-af62-f07cb121da56"
},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"from dotenv import load_dotenv\n",
"from IPython.display import Markdown, display, update_display\n",
"from openai import OpenAI\n",
"from docx import Document"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d",
"metadata": {
"id": "ab9f734f-ed6f-44f6-accb-594f9ca4843d"
},
"outputs": [],
"source": [
"class ReqDoc:\n",
" def __init__(self, file_path):\n",
" self.file_path = file_path\n",
"\n",
" def extract(self):\n",
" \"\"\"\n",
" Reads the content of a .docx file and returns the paragraphs as a list of strings.\n",
" \"\"\"\n",
" try:\n",
" # Check if the file exists\n",
" if not os.path.exists(self.file_path):\n",
" raise FileNotFoundError(f\"The file {self.file_path} was not found.\")\n",
"\n",
" # Attempt to open and read the document\n",
" doc = Document(self.file_path)\n",
" text = \"\\n\".join([paragraph.text for paragraph in doc.paragraphs])\n",
" return text\n",
"\n",
" except FileNotFoundError as fnf_error:\n",
" print(fnf_error)\n",
" return None\n",
" except Exception as e:\n",
" print(f\"An error occurred: {e}\")\n",
" return None\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9",
"metadata": {
"id": "008f485a-5718-48f6-b408-06eb6d59d7f9"
},
"outputs": [],
"source": [
"# Initialize and constants\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"if api_key and api_key.startswith('sk-proj') and len(api_key)>10:\n",
" print(\"API key looks good!\")\n",
"else:\n",
" print(\"There might be a problem with your API key. Please check!\")\n",
" \n",
"MODEL = 'gpt-4o-mini'\n",
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb",
"metadata": {
"id": "b6110ff3-74bc-430a-8051-7d86a216f0fb"
},
"outputs": [],
"source": [
"#Set up system prompt for extracting just the requirements from the document\n",
"\n",
"req_doc_system_prompt = \"You are provided with a complete requirements specifications document. \\\n",
"You are able to decide which content from that document are related to actual requirements, identify each requirement as \\\n",
"functional or non-functional and list them all.\\n\"\n",
"req_doc_system_prompt += \"If the document is empty or do not contain requirements or if you cannot extract them, please respond as such.\\\n",
"Do not make up your own requirements. \\n\"\n",
"req_doc_system_prompt += \"You should respond in JSON as in this example:\"\n",
"req_doc_system_prompt += \"\"\"\n",
"{\n",
" \"requirements\": [\n",
" {\"RequirementNo\": \"FR-01\", \"Requirement Description\": \"description of this functional requirement goes here\"},\n",
" {\"RequirementNo\": \"FR-02\": \"Requirement Description\": \"description of this functional requirement goes here\"},\n",
" {\"RequirementNo\": \"NFR-01\": \"Requirement Description\": \"description of this non-functional requirement goes here\"},\n",
" {\"RequirementNo\": \"NFR-02\": \"Requirement Description\": \"description of this non-functional requirement goes here\"}\n",
" ]\n",
"}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "20460e45-c1b7-4dc4-ab07-932235c19895",
"metadata": {
"id": "20460e45-c1b7-4dc4-ab07-932235c19895"
},
"outputs": [],
"source": [
"#Set up user prompt, sending in the requirements doc as input and calling the ReqDoc.extract function. Key to note here is the explicit instructions to\n",
"#respond in JSON format.\n",
"\n",
"def req_doc_user_prompt(doc):\n",
" user_prompt = \"Here is the contents from a requirement document.\\n\"\n",
" user_prompt += f\"{doc.extract()} \\n\"\n",
" user_prompt += \"Please scan through the document and extract only the actual requirements. For example, ignore sections or \\\n",
"paragraphs such as Approvers, table of contents and similar sections which are not really requirements.\\\n",
"You must respond in a JSON format\"\n",
" user_prompt += \"If the content is empty, respond that there are no valid requirements you could extract and ask for a proper document.\\n\"\n",
" user_prompt = user_prompt[:25_000] # Truncate if more than 25,000 characters\n",
" return user_prompt\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891",
"metadata": {
"id": "3a9f0f84-69a0-4971-a545-5bb40c2f9891"
},
"outputs": [],
"source": [
"#Function to call chatgpt-4o-mini model with the user and system prompts set above and returning the json formatted result obtained from chatgpt\n",
"\n",
"def get_requirements(doc):\n",
" reqdoc = ReqDoc(doc)\n",
" response = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": req_doc_system_prompt},\n",
" {\"role\": \"user\", \"content\": req_doc_user_prompt(reqdoc)}\n",
" ],\n",
" response_format={\"type\": \"json_object\"}\n",
" )\n",
" result = response.choices[0].message.content\n",
" return json.loads(result)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e",
"metadata": {
"id": "f9bb04ef-78d3-4e0f-9ed1-59a961a0663e"
},
"outputs": [],
"source": [
"#Uncomment and run this if you want to see the extracted requriements in json format.\n",
"#get_requirements(\"reqdoc.docx\")"
]
},
{
"cell_type": "markdown",
"id": "1fe8618c-1dfe-4030-bad8-405731294c93",
"metadata": {
"id": "1fe8618c-1dfe-4030-bad8-405731294c93"
},
"source": [
"### Next, we will make another call to gpt-4o-mini"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b",
"metadata": {
"id": "db2c1eb3-7740-43a4-9c0b-37b7e70c739b"
},
"outputs": [],
"source": [
"#Set up system prompt to ask for test cases in table format\n",
"\n",
"system_prompt = \"You are an assitant that receives a list of functional and non functional requirements in JSON format. You are the expert in generating unit test cases for each requirement. \\\n",
"You will create as many different test cases as needed for each requirement and produce a result in a table. Order the table by requirement No. Provide clear details on test case pass criteria. \\\n",
"The table will contain the following columns. \\\n",
"1.S No\\\n",
"2.Requirement No\\\n",
"3.Requirement Description\\\n",
"4.Test Case ID\\\n",
"5.Test case summary\\\n",
"6.Test case description\\\n",
"7.Success criteria \\n\"\n",
"system_prompt += \"If you are provided with an empty list, ask for a proper requirement doc\\n\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5",
"metadata": {
"id": "c4cd2bdf-e1bd-43ff-85fa-760ba39ed8c5"
},
"outputs": [],
"source": [
"# Set up user prompt passing in the req doc file. This in turn will call the get_requirements function, which will make a call to chatgpt.\n",
"\n",
"def get_testcase_user_prompt(reqdoc):\n",
" user_prompt = \"You are looking at the following list of requirements. \\n\"\n",
" user_prompt += f\"{get_requirements(reqdoc)}\\n\"\n",
" user_prompt += \"Prepare unit test cases for each of these requirements in a table and send that table as response. \\n\"\n",
" user_prompt += user_prompt[:25000]\n",
" return user_prompt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59d859e2-e5bb-4bd6-ab59-5ad967d5d2e0",
"metadata": {
"id": "59d859e2-e5bb-4bd6-ab59-5ad967d5d2e0"
},
"outputs": [],
"source": [
"#This is the 2nd call to chatgpt to get test cases. display(Markdown) will take care of producing a neatly formatted table output.\n",
"def create_testcase_doc(reqdoc):\n",
" stream = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": get_testcase_user_prompt(reqdoc)}\n",
" ],\n",
" stream=True\n",
" )\n",
" response = \"\"\n",
" display_handle = display(Markdown(\"\"), display_id=True)\n",
" for chunk in stream:\n",
" response += chunk.choices[0].delta.content or ''\n",
" response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0612d662-7047-4620-aa1c-2eb1c3d715cb",
"metadata": {
"id": "0612d662-7047-4620-aa1c-2eb1c3d715cb"
},
"outputs": [],
"source": [
"#The final piece of code. Provide the uploaded requirements filename below.\n",
"file_path = r\"reqdoc.docx\"\n",
"#print(file_path)\n",
"create_testcase_doc(file_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "82ae4371-22dd-4f2a-97c9-a70e0232a0aa",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,131 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "6418dce8-3ad0-4da9-81de-b3bf57956086",
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "75b7849a-841b-4525-90b9-b9fd003516fb",
"metadata": {},
"outputs": [],
"source": [
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "45c07164-3276-47f3-8620-a5d0ca6a8d24",
"metadata": {},
"outputs": [],
"source": [
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b334629a-cf2a-49fa-b198-edd73493720f",
"metadata": {},
"outputs": [],
"source": [
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; \\\n",
"please provide a short summary of this website in markdown. \\\n",
"If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e4dd0855-302d-4423-9b8b-80c4bbb9ab31",
"metadata": {},
"outputs": [],
"source": [
"website = Website(\"https://cnn.com\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "65c6cc43-a16a-4337-8c3d-4ab10ee0377a",
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59799f7b-a244-4572-9296-34e4b87ba026",
"metadata": {},
"outputs": [],
"source": [
"import ollama\n",
"\n",
"MODEL = \"llama3.2\"\n",
"response = ollama.chat(model=MODEL, messages=messages)\n",
"print(response['message']['content'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a0c03050-60d2-4165-9d8a-27eb57455704",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,45 @@
import ollama, os
from openai import OpenAI
from dotenv import load_dotenv
from IPython.display import Markdown, display
load_dotenv()
open_key = os.getenv("OPENAI_API_KEY")
OPEN_MODEL = "gpt-4-turbo"
ollama_model = "llama3.2"
openai = OpenAI()
system_prompt = "You are an assistant that focuses on the reason for each code, analysing and interpreting what the code does and how it could be improved, \
Give your answer in markdown down with two different topics namely: Explanation and Code Improvement. However if you think there is no possible improvement \
to said code, simply state 'no possible improvement '"
def user_prompt():
custom_message = input("Write your prompt message: ")
return custom_message
def explain():
response = openai.chat.completions.create(model=OPEN_MODEL,
messages = [
{"role":"system", "content":system_prompt},
{"role": "user", "content":user_prompt()}
])
result = response.choices[0].message.content
display(Markdown(result))
# explain() run this to get the openai output with peronalized input
#With ollama
ollama_api = "https://localhost:11434/api/chat"
def explainer_with_ollama():
response = ollama.chat(model=ollama_model, messages=[
{"role":"system", "content":system_prompt},
{"role":"user", "content":user_prompt()}
])
result = response["message"]["content"]
display(Markdown(result))
#explainer_with_ollama() run for ollama output with same personalized input

View File

@@ -310,7 +310,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "llm_env",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@@ -324,7 +324,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.11.11"
}
},
"nbformat": 4,

View File

@@ -0,0 +1,125 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "a767b6bc-65fe-42b2-988f-efd54125114f",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, clear_output\n",
"from openai import OpenAI\n",
"\n",
"load_dotenv(override=True)\n",
"api_key = os.getenv('DEEPSEEK_API_KEY')\n",
"base_url=os.getenv('DEEPSEEK_BASE_URL')\n",
"MODEL = \"deepseek-chat\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
"]\n",
" \n",
"# Check the key\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; Looks like you are using DeepSeek (R1) model.\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n",
" \n",
"openai = OpenAI(api_key=api_key, base_url=base_url)\n",
"\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" \n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n",
" \n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=messages_for(website),\n",
" stream=True\n",
" )\n",
" print(\"Streaming response:\")\n",
" accumulated_content = \"\" # Accumulate the content here\n",
" for chunk in response:\n",
" if chunk.choices[0].delta.content: # Check if there's content in the chunk\n",
" accumulated_content += chunk.choices[0].delta.content # Append the chunk to the accumulated content\n",
" clear_output(wait=True) # Clear the previous output\n",
" display(Markdown(accumulated_content)) # Display the updated content\n",
"\n",
"def display_summary():\n",
" url = str(input(\"Enter the URL of the website you want to summarize: \"))\n",
" summarize(url)\n",
"\n",
"display_summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "01c9e5e7-7510-43ef-bb9c-aa44b15d39a7",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,118 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from bs4 import BeautifulSoup\n",
"from IPython.display import Markdown, display, clear_output\n",
"from openai import OpenAI\n",
"\n",
"load_dotenv(override=True)\n",
"\n",
"# Day 2 Exercise with Ollama API\n",
"api_key = os.getenv('OLLAMA_API_KEY')\n",
"base_url = os.getenv('OLLAMA_BASE_URL')\n",
"MODEL = \"llama3.2\"\n",
"\n",
"system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
"and provides a short summary, ignoring text that might be navigation related. \\\n",
"Respond in markdown.\"\n",
"\n",
"messages = [\n",
" {\"role\": \"system\", \"content\": \"You are a snarky assistant\"},\n",
" {\"role\": \"user\", \"content\": \"What is 2 + 2?\"}\n",
"]\n",
" \n",
"# Check the key\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; Looks like you are using DeepSeek (R1) model.\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")\n",
" \n",
"openai = OpenAI(api_key=api_key, base_url=base_url)\n",
"\n",
"headers = {\n",
" \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
"}\n",
"\n",
"class Website:\n",
"\n",
" def __init__(self, url):\n",
" \"\"\"\n",
" Create this Website object from the given url using the BeautifulSoup library\n",
" \"\"\"\n",
" self.url = url\n",
" response = requests.get(url, headers=headers)\n",
" soup = BeautifulSoup(response.content, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
" \n",
"def user_prompt_for(website):\n",
" user_prompt = f\"You are looking at a website titled {website.title}\"\n",
" user_prompt += \"\\nThe contents of this website is as follows; please provide a short summary of this website in markdown. If it includes news or announcements, then summarize these too.\\n\\n\"\n",
" user_prompt += website.text\n",
" return user_prompt\n",
"\n",
"def messages_for(website):\n",
" return [\n",
" {\"role\": \"system\", \"content\": system_prompt},\n",
" {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
" ]\n",
" \n",
"def summarize(url):\n",
" website = Website(url)\n",
" response = openai.chat.completions.create(\n",
" model=MODEL,\n",
" messages=messages_for(website),\n",
" stream=True\n",
" )\n",
" print(\"Streaming response:\")\n",
" accumulated_content = \"\" # Accumulate the content here\n",
" for chunk in response:\n",
" if chunk.choices[0].delta.content: # Check if there's content in the chunk\n",
" accumulated_content += chunk.choices[0].delta.content # Append the chunk to the accumulated content\n",
" clear_output(wait=True) # Clear the previous output\n",
" display(Markdown(accumulated_content)) # Display the updated content\n",
" \n",
"def display_summary():\n",
" url = str(input(\"Enter the URL of the website you want to summarize: \"))\n",
" summarize(url)\n",
"\n",
"display_summary()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.11"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@@ -203,6 +203,46 @@
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "bc7d1de3-e2ac-46ff-a302-3b4ba38c4c90",
"metadata": {},
"source": [
"## Also trying the amazing reasoning model DeepSeek\n",
"\n",
"Here we use the version of DeepSeek-reasoner that's been distilled to 1.5B. \n",
"This is actually a 1.5B variant of Qwen that has been fine-tuned using synethic data generated by Deepseek R1.\n",
"\n",
"Other sizes of DeepSeek are [here](https://ollama.com/library/deepseek-r1) all the way up to the full 671B parameter version, which would use up 404GB of your drive and is far too large for most!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cf9eb44e-fe5b-47aa-b719-0bb63669ab3d",
"metadata": {},
"outputs": [],
"source": [
"!ollama pull deepseek-r1:1.5b"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1d3d554b-e00d-4c08-9300-45e073950a76",
"metadata": {},
"outputs": [],
"source": [
"# This may take a few minutes to run! You should then see a fascinating \"thinking\" trace inside <think> tags, followed by some decent definitions\n",
"\n",
"response = ollama_via_openai.chat.completions.create(\n",
" model=\"deepseek-r1:1.5b\",\n",
" messages=[{\"role\": \"user\", \"content\": \"Please give definitions of some core concepts behind LLMs: a neural network, attention and the transformer\"}]\n",
")\n",
"\n",
"print(response.choices[0].message.content)"
]
},
{
"cell_type": "markdown",
"id": "1622d9bb-5c68-4d4e-9ca4-b492c751f898",
@@ -216,7 +256,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "402d5686-4e76-4110-b65a-b3906c35c0a4",
"id": "6de38216-6d1c-48c4-877b-86d403f4e0f8",
"metadata": {},
"outputs": [],
"source": []

View File

@@ -334,7 +334,7 @@
"metadata": {},
"outputs": [],
"source": [
"create_brochure(\"HuggingFace\", \"https://huggingface.com\")"
"create_brochure(\"HuggingFace\", \"https://huggingface.co\")"
]
},
{

View File

@@ -27,7 +27,15 @@
"\n",
"Click in the cell below and press Shift+Return to run it. \n",
"If this gives you problems, then please try working through these instructions to address: \n",
"https://chatgpt.com/share/676e6e3b-db44-8012-abaa-b3cf62c83eb3"
"https://chatgpt.com/share/676e6e3b-db44-8012-abaa-b3cf62c83eb3\n",
"\n",
"I've also heard that you might have problems if you are using a work computer that's running security software zscaler.\n",
"\n",
"Some advice from students in this situation with zscaler:\n",
"\n",
"> In the anaconda prompt, this helped sometimes, although still got failures occasionally running code in Jupyter:\n",
"`conda config --set ssl_verify false` \n",
"Another thing that helped was to add `verify=False` anytime where there is `request.get(..)`, so `request.get(url, headers=headers)` becomes `request.get(url, headers=headers, verify=False)`"
]
},
{
@@ -99,7 +107,7 @@
" venv_name = os.path.basename(virtual_env)\n",
" print(f\"Environment Name: {venv_name}\")\n",
"\n",
"if conda_name != \"llms\" and virtual_env != \"llms\":\n",
"if conda_name != \"llms\" and venv_name != \"llms\" and venv_name != \"venv\":\n",
" print(\"Neither Anaconda nor Virtualenv seem to be activated with the expected name 'llms'\")\n",
" print(\"Did you run 'jupyter lab' from an activated environment with (llms) showing on the command line?\")\n",
" print(\"If in doubt, close down all jupyter lab, and follow Part 5 in the SETUP-PC or SETUP-mac guide.\")"
@@ -405,6 +413,14 @@
"from diagnostics import Diagnostics\n",
"Diagnostics().run()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e1955b9a-d344-4782-b448-2770d0edd90c",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {