Merge pull request #385 from BePeace/main
Day 2 work using py for ollama
This commit is contained in:
55
week1/community-contributions/ag-w1d2-ollama-site-summary
Normal file
55
week1/community-contributions/ag-w1d2-ollama-site-summary
Normal file
@@ -0,0 +1,55 @@
|
||||
import ollama
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from IPython.display import Markdown, display
|
||||
|
||||
MODEL = "llama3.2"
|
||||
|
||||
#headers and class for website to summarize
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
|
||||
}
|
||||
class Website:
|
||||
def __init__(self, url):
|
||||
self.url = url
|
||||
response = requests.get(url, headers=headers)
|
||||
soup = BeautifulSoup(response.content, 'html.parser')
|
||||
self.title = soup.title.string if soup.title else "No title found"
|
||||
for irrelevant in soup.body(["script", "style", "img", "input"]):
|
||||
irrelevant.decompose()
|
||||
self.text = soup.body.get_text(separator="\n", strip=True)
|
||||
|
||||
#define prompts
|
||||
system_prompt = "You are an assistant that analyzes the contents of a website \
|
||||
and provides a short summary, ignoring text that might be navigation related. \
|
||||
Respond in markdown."
|
||||
|
||||
def user_prompt_for(website):
|
||||
user_prompt = f"You are looking at a website titled {website.title}"
|
||||
user_prompt += "\nThe content of this website is as follows; \
|
||||
please provide a short summary of this website in markdown. \
|
||||
If it includes news or announcements, then summarize these too.\n\n"
|
||||
user_prompt += website.text
|
||||
return user_prompt
|
||||
|
||||
#prepare message for use in OpenAI call
|
||||
def messages_for(website):
|
||||
return [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": user_prompt_for(website)}
|
||||
]
|
||||
|
||||
#define function to summarize a given website
|
||||
def summarize(url):
|
||||
website = Website(url)
|
||||
response = ollama.chat(model=MODEL, messages=messages_for(website))
|
||||
return response['message']['content']
|
||||
|
||||
#function to display summary in markdown format
|
||||
def display_summary(url):
|
||||
summary = summarize(url)
|
||||
display(Markdown(summary))
|
||||
print(summary)
|
||||
|
||||
url = "https://edwarddonner.com"
|
||||
display_summary(url)
|
||||
@@ -0,0 +1,32 @@
|
||||
import ollama
|
||||
from IPython.display import Markdown, display
|
||||
|
||||
MODEL = "llama3.2"
|
||||
|
||||
# Create a messages list (Note that "system" role is not required)
|
||||
messages = [
|
||||
{ "role": "user", "content": "Describe some of the business applications of Generative AI"}
|
||||
]
|
||||
|
||||
"""
|
||||
#under the covers calls this API with specified payload
|
||||
|
||||
OLLAMA_API = "http://local_host:11434/api/chat"
|
||||
payload = {
|
||||
"model": MODEL,
|
||||
"messages": messages,
|
||||
"stream": False
|
||||
}
|
||||
response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)
|
||||
|
||||
"""
|
||||
response = ollama.chat(model=MODEL, messages=messages)
|
||||
#print(response['message']['content'])
|
||||
answer = response['message']['content']
|
||||
|
||||
#Note that markdown will not display in VSCode but only in Jupyter
|
||||
#to view in markdown in VSCode, save output to .md file and then oipen in VSCode
|
||||
display(Markdown(answer))
|
||||
print(answer)
|
||||
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
import ollama
|
||||
import requests
|
||||
from IPython.display import Markdown, display
|
||||
|
||||
OLLAMA_API = "http://localhost:11434/api/chat"
|
||||
HEADERS = {"Content-Type": "application/json"}
|
||||
MODEL = "llama3.2"
|
||||
|
||||
# Create a messages list (Note that "system" role is not required)
|
||||
messages = [
|
||||
{ "role": "user", "content": "Describe some of the business applications of Generative AI"}
|
||||
]
|
||||
|
||||
payload = {
|
||||
"model": MODEL,
|
||||
"messages": messages,
|
||||
"stream": False
|
||||
}
|
||||
|
||||
response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)
|
||||
print(response.json()['message']['content'])
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
from openai import OpenAI
|
||||
|
||||
MODEL = "llama3.2"
|
||||
|
||||
messages = [
|
||||
{ "role": "user", "content": "Describe some of the business applications of Generative AI"}
|
||||
]
|
||||
|
||||
# The python class OpenAI is simply code written by OpenAI engineers that
|
||||
# makes calls over the internet to an endpoint.
|
||||
ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')
|
||||
|
||||
# When we call openai.chat.completions.create(), this python code just makes
|
||||
# a web request to: "https://api.openai.com/v1/chat/completions"
|
||||
# Code like this is known as a "client library" - it's just wrapper code that
|
||||
# runs on your machine to make web requests. The actual power of GPT is running
|
||||
# on OpenAI's cloud behind this API, not on your computer
|
||||
response = ollama_via_openai.chat.completions.create(
|
||||
model=MODEL,
|
||||
messages=messages
|
||||
)
|
||||
|
||||
print(response.choices[0].message.content)
|
||||
Reference in New Issue
Block a user