From fe8344f62dae4513d3b448393d2eb6a6ddc03963 Mon Sep 17 00:00:00 2001 From: bepeace <72848781+BePeace@users.noreply.github.com> Date: Wed, 14 May 2025 20:58:48 -0700 Subject: [PATCH] Day 2 work using py for ollama - using url - using library - using openai - using ollama to summarize website --- .../ag-w1d2-ollama-site-summary | 55 +++++++++++++++++++ .../ag-w1d2-use-local-ollama-library.py | 32 +++++++++++ .../ag-w1d2-use-local-ollama-url-py | 22 ++++++++ .../ag-w1d2-use-local-ollama-with-openai.py | 23 ++++++++ 4 files changed, 132 insertions(+) create mode 100644 week1/community-contributions/ag-w1d2-ollama-site-summary create mode 100644 week1/community-contributions/ag-w1d2-use-local-ollama-library.py create mode 100644 week1/community-contributions/ag-w1d2-use-local-ollama-url-py create mode 100644 week1/community-contributions/ag-w1d2-use-local-ollama-with-openai.py diff --git a/week1/community-contributions/ag-w1d2-ollama-site-summary b/week1/community-contributions/ag-w1d2-ollama-site-summary new file mode 100644 index 0000000..bdc723c --- /dev/null +++ b/week1/community-contributions/ag-w1d2-ollama-site-summary @@ -0,0 +1,55 @@ +import ollama +import requests +from bs4 import BeautifulSoup +from IPython.display import Markdown, display + +MODEL = "llama3.2" + +#headers and class for website to summarize +headers = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36" +} +class Website: + def __init__(self, url): + self.url = url + response = requests.get(url, headers=headers) + soup = BeautifulSoup(response.content, 'html.parser') + self.title = soup.title.string if soup.title else "No title found" + for irrelevant in soup.body(["script", "style", "img", "input"]): + irrelevant.decompose() + self.text = soup.body.get_text(separator="\n", strip=True) + +#define prompts +system_prompt = "You are an assistant that analyzes the contents of a website \ +and provides a short summary, ignoring text that might be navigation related. \ +Respond in markdown." + +def user_prompt_for(website): + user_prompt = f"You are looking at a website titled {website.title}" + user_prompt += "\nThe content of this website is as follows; \ +please provide a short summary of this website in markdown. \ +If it includes news or announcements, then summarize these too.\n\n" + user_prompt += website.text + return user_prompt + +#prepare message for use in OpenAI call +def messages_for(website): + return [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt_for(website)} + ] + +#define function to summarize a given website +def summarize(url): + website = Website(url) + response = ollama.chat(model=MODEL, messages=messages_for(website)) + return response['message']['content'] + +#function to display summary in markdown format +def display_summary(url): + summary = summarize(url) + display(Markdown(summary)) + print(summary) + +url = "https://edwarddonner.com" +display_summary(url) \ No newline at end of file diff --git a/week1/community-contributions/ag-w1d2-use-local-ollama-library.py b/week1/community-contributions/ag-w1d2-use-local-ollama-library.py new file mode 100644 index 0000000..e911c82 --- /dev/null +++ b/week1/community-contributions/ag-w1d2-use-local-ollama-library.py @@ -0,0 +1,32 @@ +import ollama +from IPython.display import Markdown, display + +MODEL = "llama3.2" + +# Create a messages list (Note that "system" role is not required) +messages = [ + { "role": "user", "content": "Describe some of the business applications of Generative AI"} +] + +""" +#under the covers calls this API with specified payload + +OLLAMA_API = "http://local_host:11434/api/chat" +payload = { + "model": MODEL, + "messages": messages, + "stream": False +} +response = requests.post(OLLAMA_API, json=payload, headers=HEADERS) + +""" +response = ollama.chat(model=MODEL, messages=messages) +#print(response['message']['content']) +answer = response['message']['content'] + +#Note that markdown will not display in VSCode but only in Jupyter +#to view in markdown in VSCode, save output to .md file and then oipen in VSCode +display(Markdown(answer)) +print(answer) + + diff --git a/week1/community-contributions/ag-w1d2-use-local-ollama-url-py b/week1/community-contributions/ag-w1d2-use-local-ollama-url-py new file mode 100644 index 0000000..b70dee5 --- /dev/null +++ b/week1/community-contributions/ag-w1d2-use-local-ollama-url-py @@ -0,0 +1,22 @@ +import ollama +import requests +from IPython.display import Markdown, display + +OLLAMA_API = "http://localhost:11434/api/chat" +HEADERS = {"Content-Type": "application/json"} +MODEL = "llama3.2" + +# Create a messages list (Note that "system" role is not required) +messages = [ + { "role": "user", "content": "Describe some of the business applications of Generative AI"} +] + +payload = { + "model": MODEL, + "messages": messages, + "stream": False +} + +response = requests.post(OLLAMA_API, json=payload, headers=HEADERS) +print(response.json()['message']['content']) + diff --git a/week1/community-contributions/ag-w1d2-use-local-ollama-with-openai.py b/week1/community-contributions/ag-w1d2-use-local-ollama-with-openai.py new file mode 100644 index 0000000..bcac160 --- /dev/null +++ b/week1/community-contributions/ag-w1d2-use-local-ollama-with-openai.py @@ -0,0 +1,23 @@ +from openai import OpenAI + +MODEL = "llama3.2" + +messages = [ + { "role": "user", "content": "Describe some of the business applications of Generative AI"} +] + +# The python class OpenAI is simply code written by OpenAI engineers that +# makes calls over the internet to an endpoint. +ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama') + +# When we call openai.chat.completions.create(), this python code just makes +# a web request to: "https://api.openai.com/v1/chat/completions" +# Code like this is known as a "client library" - it's just wrapper code that +# runs on your machine to make web requests. The actual power of GPT is running +# on OpenAI's cloud behind this API, not on your computer +response = ollama_via_openai.chat.completions.create( + model=MODEL, + messages=messages +) + +print(response.choices[0].message.content)