diff --git a/week1/community-contributions/companybrochure_ollama_sharathir.ipynb b/week1/community-contributions/companybrochure_ollama_sharathir.ipynb
new file mode 100644
index 0000000..7406ce6
--- /dev/null
+++ b/week1/community-contributions/companybrochure_ollama_sharathir.ipynb
@@ -0,0 +1,439 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1054e1c9-142a-4059-bfe6-f9be6073fb72",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "# If these fail, please check you're running from an 'activated' environment with (llms) in the command prompt\n",
+ "\n",
+ "import os\n",
+ "import requests\n",
+ "import json\n",
+ "from typing import List\n",
+ "from dotenv import load_dotenv\n",
+ "from bs4 import BeautifulSoup\n",
+ "from IPython.display import Markdown, display, update_display\n",
+ "from openai import OpenAI\n",
+ "import ollama"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9e59a6ba-d7e1-4834-b3ff-86321e354ade",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "load_dotenv(override=True)\n",
+ "MODEL = \"llama3.2\"\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0ea82fa1-0986-4749-9d7e-d6a23dd88722",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# A class to represent a Webpage\n",
+ "\n",
+ "# Some websites need you to use proper headers when fetching them:\n",
+ "headers = {\n",
+ " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
+ "}\n",
+ "\n",
+ "class Website:\n",
+ " \"\"\"\n",
+ " A utility class to represent a Website that we have scraped, now with links\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(self, url):\n",
+ " self.url = url\n",
+ " response = requests.get(url, headers=headers)\n",
+ " self.body = response.content\n",
+ " soup = BeautifulSoup(self.body, 'html.parser')\n",
+ " self.title = soup.title.string if soup.title else \"No title found\"\n",
+ " if soup.body:\n",
+ " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
+ " irrelevant.decompose()\n",
+ " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
+ " else:\n",
+ " self.text = \"\"\n",
+ " links = [link.get('href') for link in soup.find_all('a')]\n",
+ " self.links = [link for link in links if link]\n",
+ "\n",
+ " def get_contents(self):\n",
+ " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2351a604-c280-48fb-84d2-272512535414",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ed = Website(\"https://edwarddonner.com\")\n",
+ "ed.links"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e2dd2206-0343-4bf2-8037-de587ff6fe10",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "link_system_prompt = \"You are provided with a list of links found on a webpage. \\\n",
+ "You are able to decide which of the links would be most relevant to include in a brochure about the company, \\\n",
+ "such as links to an About page, or a Company page, or Careers/Jobs pages.\\n\"\n",
+ "link_system_prompt += \"You should respond in JSON as in this example:\"\n",
+ "link_system_prompt += \"\"\"\n",
+ "{\n",
+ " \"links\": [\n",
+ " {\"type\": \"about page\", \"url\": \"https://full.url/goes/here/about\"},\n",
+ " {\"type\": \"careers page\": \"url\": \"https://another.full.url/careers\"}\n",
+ " ]\n",
+ "}\n",
+ "\"\"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d891f202-352c-4f93-97c4-ab773daacc60",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(link_system_prompt)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "89be55aa-7236-4d3c-8459-b9c992cd68f5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_links_user_prompt(website):\n",
+ " user_prompt = f\"Here is the list of links on the website of {website.url} - \"\n",
+ " user_prompt += \"please decide which of these are relevant web links for a brochure about the company, respond with the full https URL in JSON format. \\\n",
+ "Do not include Terms of Service, Privacy, email links.\\n\"\n",
+ " user_prompt += \"Links (some might be relative links):\\n\"\n",
+ " user_prompt += \"\\n\".join(website.links)\n",
+ " return user_prompt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ec4ed9d2-9b54-4d33-adba-328b47cdde1a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(get_links_user_prompt(ed))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "228cdeea-5c05-45a4-8afe-e6ef8f02810a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import json\n",
+ "import logging\n",
+ "import pprint\n",
+ "#pprint.pprint(response)\n",
+ "\n",
+ "import re\n",
+ "\n",
+ "def extract_json_from_text(text):\n",
+ " \"\"\"\n",
+ " Extract the first JSON object found in the text.\n",
+ " \"\"\"\n",
+ " match = re.search(r'\\{.*\\}', text, re.DOTALL)\n",
+ " if match:\n",
+ " return match.group(0)\n",
+ " return None\n",
+ "\n",
+ "def get_links(url):\n",
+ " website = Website(url)\n",
+ " \n",
+ " try:\n",
+ " response = ollama.chat(\n",
+ " model=\"llama3.2\",\n",
+ " messages=[\n",
+ " {\"role\": \"system\", \"content\": link_system_prompt},\n",
+ " {\"role\": \"user\", \"content\": get_links_user_prompt(website)}\n",
+ " ]\n",
+ " )\n",
+ "\n",
+ " result = response['message']['content']\n",
+ " \n",
+ " # Log the raw result for debugging\n",
+ " logging.debug(f\"Raw result: {result}\")\n",
+ "\n",
+ " \n",
+ " if isinstance(result, str):\n",
+ " if not result.strip():\n",
+ " logging.warning(\"Result string is empty.\")\n",
+ " return None\n",
+ "\n",
+ " json_text = extract_json_from_text(result)\n",
+ " if not json_text:\n",
+ " logging.warning(\"No JSON object found in the result string.\")\n",
+ " return None\n",
+ "\n",
+ " logging.debug(f\"Extracted JSON string: {repr(json_text)}\")\n",
+ "\n",
+ " try:\n",
+ " return json.loads(json_text)\n",
+ " except json.JSONDecodeError as e:\n",
+ " logging.error(f\"JSON decoding error: {e}\")\n",
+ " logging.debug(f\"Problematic JSON string: {repr(json_text)}\")\n",
+ " return None\n",
+ " \n",
+ " except Exception as e:\n",
+ " logging.exception(\"An unexpected error occurred in get_links.\")\n",
+ " return None\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3ce0b67e-8483-418a-bcf3-836910381e2d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "get_links(\"https://huggingface.co\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "aeb09b75-33ea-4638-bc01-6c3d738f0060",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import requests\n",
+ "\n",
+ "def is_url_reachable(url, timeout=5):\n",
+ " try:\n",
+ " response = requests.head(url, timeout=timeout)\n",
+ " return response.status_code < 400\n",
+ " except requests.RequestException:\n",
+ " return False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5f2f9cc5-de4f-43d8-a803-97c11c7e91c2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_all_details(url):\n",
+ " if is_url_reachable(url,5):\n",
+ " result = \"Landing page:\\n\"\n",
+ " result += Website(url).get_contents()\n",
+ " links = get_links(url)\n",
+ " print(\"Found links:\", links)\n",
+ " for link in links[\"links\"]:\n",
+ " result += f\"\\n\\n{link['type']}\\n\"\n",
+ " result += Website(link[\"url\"]).get_contents()\n",
+ " return result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cd405ade-6b44-45c5-aeb4-724cf6cce8f6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(get_all_details(\"https://huggingface.co\"))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8361b67c-4063-499a-b0a7-583971dd6c48",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
+ "and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
+ "Include details of company culture, customers and careers/jobs if you have the information.\"\n",
+ "\n",
+ "# Or uncomment the lines below for a more humorous brochure - this demonstrates how easy it is to incorporate 'tone':\n",
+ "\n",
+ "# system_prompt = \"You are an assistant that analyzes the contents of several relevant pages from a company website \\\n",
+ "# and creates a short humorous, entertaining, jokey brochure about the company for prospective customers, investors and recruits. Respond in markdown.\\\n",
+ "# Include details of company culture, customers and careers/jobs if you have the information.\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0acd22ba-1dd9-40e8-b33d-1d6b88b5e4e3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_brochure_user_prompt(company_name, url):\n",
+ " try:\n",
+ " if is_url_reachable(url):\n",
+ " web_content = get_all_details(url)[:5000] \n",
+ " user_prompt = f\"You are looking at a company called: {company_name}\\n\"\n",
+ " user_prompt += f\"Use the name {company_name} clearly in the brochure.\\n\"\n",
+ " user_prompt += f\"Here are the contents of its landing page and other relevant pages; use this information to build a short brochure of the company in markdown.\\n\"\n",
+ " user_prompt += f\"\\n\\nReminder: the company name is {company_name}.\"\n",
+ " #user_prompt += get_all_details(url)\n",
+ " #user_prompt = user_prompt[:5_000] # Truncate if more than 5,000 characters\n",
+ " user_prompt += web_content\n",
+ " return user_prompt\n",
+ " except requests.RequestException:\n",
+ " return False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "89b8b16c-0914-440e-8a1b-54959b0ae7d0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "get_brochure_user_prompt(\"HuggingFace\", \"https://huggingface.co\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "77528cd7-2460-4768-8d8c-a849f19f6381",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import requests\n",
+ "\n",
+ "def is_url_reachable1(url, timeout=5):\n",
+ " try:\n",
+ " response = requests.head(url, timeout=timeout)\n",
+ " return response.status_code < 400\n",
+ " except requests.RequestException:\n",
+ " return False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b3f37ce1-ad44-46ff-8f18-74b537acaa9b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def create_brochure(company_name, url):\n",
+ " try:\n",
+ " if is_url_reachable(url,5):\n",
+ " response = ollama.chat(\n",
+ " model=\"llama3.2\",\n",
+ " messages=[\n",
+ " {\"role\": \"system\", \"content\": system_prompt},\n",
+ " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n",
+ " ]\n",
+ " )\n",
+ " \n",
+ " result = response['message']['content']\n",
+ " display(Markdown(result))\n",
+ " except requests.RequestException:\n",
+ " return False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1e8a5ac2-b7e2-4c98-9615-5baba00e2dd0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "create_brochure(\"HuggingFace\", \"https://huggingface.co\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6ca16d59-1be8-44ef-8590-f5390e4debef",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def stream_brochure(company_name, url):\n",
+ " if not is_url_reachable(url):\n",
+ " print(\"❌ URL not reachable\")\n",
+ " return\n",
+ " try:\n",
+ " #if is_url_reachable(url,5):\n",
+ " stream = ollama.chat(\n",
+ " model=\"llama3.2\",\n",
+ " messages=[\n",
+ " {\"role\": \"system\", \"content\": system_prompt},\n",
+ " {\"role\": \"user\", \"content\": get_brochure_user_prompt(company_name, url)}\n",
+ " ],\n",
+ " stream=True\n",
+ " )\n",
+ " \n",
+ " #result = response['message']['content']\n",
+ " # display(Markdown(result))\n",
+ " except requests.RequestException:\n",
+ " return False\n",
+ " \n",
+ " response = \"\"\n",
+ " display_handle = display(Markdown(\"\"), display_id=True)\n",
+ " #for chunk in stream:\n",
+ " #response += chunk.choices[0].delta.content or ''\n",
+ " #response += chunk['message']['content'] or ''\n",
+ " #response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
+ " #update_display(Markdown(response), display_id=display_handle.display_id)\n",
+ "\n",
+ " for chunk in stream:\n",
+ " content = chunk.get('message', {}).get('content', '')\n",
+ " if content:\n",
+ " response += content.replace(\"```\", \"\")\n",
+ " update_display(Markdown(response), display_id=display_handle.display_id)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0f156311-cc32-4bce-9645-7d10a50eae06",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "stream_brochure(\"HuggingFace\", \"https://huggingface.co\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week2/community-contributions/Wk2_Day3_sharathir_ollama.ipynb b/week2/community-contributions/Wk2_Day3_sharathir_ollama.ipynb
new file mode 100644
index 0000000..3261f19
--- /dev/null
+++ b/week2/community-contributions/Wk2_Day3_sharathir_ollama.ipynb
@@ -0,0 +1,191 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0f1f62c4-ed03-4401-88d5-3445464a8421",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from dotenv import load_dotenv\n",
+ "from openai import OpenAI\n",
+ "import gradio as gr\n",
+ "import ollama"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f8103014-012c-4648-9111-75993ce4d46a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message = \"You are a helpful assistant\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a8fca0b4-9db7-4f74-865b-503ee19a832f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def chat(message, history):\n",
+ " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
+ "\n",
+ " stream = ollama.chat(model=\"llama3.2\", messages=messages, stream=True)\n",
+ "\n",
+ " result = \"\"\n",
+ " for chunk in stream:\n",
+ " result += chunk['message']['content'] or \"\"\n",
+ " yield result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "61de58a0-5972-4aca-93ad-a4bd3878a50b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gr.ChatInterface(fn=chat, type=\"messages\").launch()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d448f8c5-2bb5-448d-8ae4-894b905214a7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message = \"You are a helpful assistant in a clothes store. You should try to gently encourage \\\n",
+ "the customer to try items that are on sale. Hats are 60% off, and most other items are 50% off. \\\n",
+ "For example, if the customer says 'I'm looking to buy a hat', \\\n",
+ "you could reply something like, 'Wonderful - we have lots of hats - including several that are part of our sales event.'\\\n",
+ "Encourage the customer to buy hats if they are unsure what to get.\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "465968cf-aa7f-46b2-857f-a6819f2b14ea",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gr.ChatInterface(fn=chat, type=\"messages\").launch()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "873ab86b-ecb8-4f68-b520-50b29b7fd7be",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message += \"\\nIf the customer asks for shoes, you should respond that shoes are not on sale today, \\\n",
+ "but remind the customer to look at hats!\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c63ced30-1109-4409-b255-1f72f8c6172f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gr.ChatInterface(fn=chat, type=\"messages\").launch()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "id": "054f1406-c240-4849-8618-064985e76d86",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def chat(message, history):\n",
+ "\n",
+ " global system_message\n",
+ " if 'belt' in message:\n",
+ " system_message += \" The store does not sell belts; if you are asked for belts, be sure to point out other items on sale.\"\n",
+ " messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
+ "\n",
+ " stream = ollama.chat(model=\"llama3.2\", messages=messages, stream=True)\n",
+ "\n",
+ " result = \"\"\n",
+ " for chunk in stream:\n",
+ " result += chunk['message']['content'] or \"\"\n",
+ " yield result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "id": "b1086d8a-5b5a-4b59-9a61-e76078f930cc",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "* Running on local URL: http://127.0.0.1:7869\n",
+ "* To create a public link, set `share=True` in `launch()`.\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "
"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": []
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "gr.ChatInterface(fn=chat, type=\"messages\").launch()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c558ab19-b907-4b0c-8a4f-37c8b731f9b5",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week2/community-contributions/wk2_day1_ollama_conversation_between_two_models.ipynb b/week2/community-contributions/wk2_day1_ollama_conversation_between_two_models.ipynb
new file mode 100644
index 0000000..68505e0
--- /dev/null
+++ b/week2/community-contributions/wk2_day1_ollama_conversation_between_two_models.ipynb
@@ -0,0 +1,170 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "eff19e8b-000a-4327-b8fb-8fd8a3caaef5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from dotenv import load_dotenv\n",
+ "from openai import OpenAI\n",
+ "import anthropic\n",
+ "from IPython.display import Markdown, display, update_display\n",
+ "import ollama"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8278c99b-d748-42e5-a991-690a791ed081",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n",
+ "# We're using cheap versions of models so the costs will be minimal\n",
+ "\n",
+ "llama_model = \"llama3.2\"\n",
+ "deepseek_model = \"deepseek-r1\"\n",
+ "\n",
+ "llama_system = \"You are a chatbot who is very argumentative; \\\n",
+ "you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
+ "\n",
+ "deepseek_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
+ "everything the other person says, or find common ground. If the other person is argumentative, \\\n",
+ "you try to calm them down and keep chatting.\"\n",
+ "\n",
+ "llama_messages = [\"Hi there\"]\n",
+ "deepseek_messages = [\"Hi\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "49523e56-0de8-4014-85d5-8aab438d2075",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_llama():\n",
+ " messages = [{\"role\": \"system\", \"content\": llama_system}]\n",
+ " for llama, deepseek in zip(llama_messages, deepseek_messages):\n",
+ " messages.append({\"role\": \"assistant\", \"content\": llama})\n",
+ " messages.append({\"role\": \"user\", \"content\": deepseek})\n",
+ " completion = ollama.chat(\n",
+ " model=llama_model,\n",
+ " messages=messages\n",
+ " )\n",
+ " return completion['message']['content']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "785240ce-e704-44ff-90cb-e5c0476454a4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "call_llama()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cdba39e3-5543-4657-bc3a-259f586ba392",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_deepseek():\n",
+ " messages = []\n",
+ " for llama, deepseek in zip(llama_messages, deepseek_messages):\n",
+ " messages.append({\"role\": \"user\", \"content\": llama})\n",
+ " messages.append({\"role\": \"assistant\", \"content\": deepseek})\n",
+ " messages.append({\"role\": \"user\", \"content\": llama_messages[-1]})\n",
+ " message = ollama.chat(\n",
+ " model=deepseek_model,\n",
+ " options={\n",
+ " \"system\":deepseek_system,\n",
+ " \"max_tokens\":500\n",
+ " },\n",
+ " messages=messages\n",
+ " \n",
+ " )\n",
+ " \n",
+ " return message['message']['content']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "641df7ac-625c-41fa-b780-3130eef93a85",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "call_deepseek()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4b33b98e-8d17-45e8-b2a9-a070dc0a6780",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "call_llama()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "47912582-51fe-401c-b4ad-12483068adea",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "llama_messages = [\"Hi there\"]\n",
+ "deepseek_messages = [\"Hi\"]\n",
+ "\n",
+ "print(f\"Llama:\\n{llama_messages[0]}\\n\")\n",
+ "print(f\"Deepseek:\\n{deepseek_messages[0]}\\n\")\n",
+ "\n",
+ "for i in range(5):\n",
+ " llama_next = call_llama()\n",
+ " print(f\"Llama:\\n{llama_next}\\n\")\n",
+ " llama_messages.append(llama_next)\n",
+ " \n",
+ " deepseek_next = call_deepseek()\n",
+ " print(f\"Deepseek:\\n{deepseek_next}\\n\")\n",
+ " deepseek_messages.append(deepseek_next)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f3c41b0c-4358-4d84-a479-6409fa331119",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week2/community-contributions/wk2_day1_ollama_telling_joke_sharathir.ipynb b/week2/community-contributions/wk2_day1_ollama_telling_joke_sharathir.ipynb
new file mode 100644
index 0000000..6207acd
--- /dev/null
+++ b/week2/community-contributions/wk2_day1_ollama_telling_joke_sharathir.ipynb
@@ -0,0 +1,148 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d768c9b1-c5a7-417a-9fac-5fcbd6944fe6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "from dotenv import load_dotenv\n",
+ "from openai import OpenAI\n",
+ "import anthropic\n",
+ "from IPython.display import Markdown, display, update_display\n",
+ "import ollama"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "80a4e740-75d0-4272-b02e-0b77b0a143ae",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message = \"You are an assistant that is great at telling jokes\"\n",
+ "user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f2ef28e5-5073-4065-b066-387181df063a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "prompts = [\n",
+ " {\"role\": \"system\", \"content\": system_message},\n",
+ " {\"role\": \"user\", \"content\": user_prompt}\n",
+ " ]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d54e910a-cdbf-49cb-9924-265d9845d622",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#direct display wihtout streaming\n",
+ "response = ollama.chat(\n",
+ " model=\"llama3.2\",\n",
+ " messages=prompts,\n",
+ " options={\n",
+ " \"temperature\": 0.7\n",
+ " }\n",
+ " \n",
+ " )\n",
+ "result = response['message']['content']\n",
+ "display(Markdown(result))\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "47dd7965-fdfc-4472-b2f6-c98f755964f1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#This is with streaming \n",
+ "stream = ollama.chat(\n",
+ " model=\"llama3.2\",\n",
+ " messages=prompts,\n",
+ " stream=True\n",
+ " )\n",
+ "response = \"\"\n",
+ "display_handle = display(Markdown(\"\"), display_id=True)\n",
+ "for chunk in stream:\n",
+ " content = chunk.get('message', {}).get('content', '')\n",
+ " if content:\n",
+ " response += content.replace(\"```\", \"\")\n",
+ " update_display(Markdown(response), display_id=display_handle.display_id)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ef13e3ae-bde7-4e3a-9fcd-0a9bfd1caef0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#direct display wihtout streaming, using deepseek-r1\n",
+ "response = ollama.chat(\n",
+ " model=\"deepseek-r1\",\n",
+ " messages=prompts\n",
+ " \n",
+ " )\n",
+ "result = response['message']['content']\n",
+ "display(Markdown(result))\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ddc4fe91-3d5b-4d45-83bf-f349597c672c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#direct display wihtout streaming, using qwen3\n",
+ "response = ollama.chat(\n",
+ " model=\"qwen3\",\n",
+ " messages=prompts\n",
+ " \n",
+ " )\n",
+ "result = response['message']['content']\n",
+ "display(Markdown(result))\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2beb6731-41e3-42a4-a8d3-5f0ef644f2f3",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week2/community-contributions/wk2_day2_sharathir.ipynb b/week2/community-contributions/wk2_day2_sharathir.ipynb
new file mode 100644
index 0000000..09f1d92
--- /dev/null
+++ b/week2/community-contributions/wk2_day2_sharathir.ipynb
@@ -0,0 +1,347 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "05617f71-449f-42c5-905c-f080d61520ec",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import gradio as gr\n",
+ "def greet(name):\n",
+ " return \"Hello \" + name + \"!\"\n",
+ "def shout(name):\n",
+ " return name.upper()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c57765d7-5d69-4332-be71-2800296ca8ed",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#demo = gr.Interface(fn=shout, inputs=gr.Textbox(), outputs=gr.Textbox()) //this works too\n",
+ "demo = gr.Interface(fn=greet, inputs=\"textbox\", outputs=\"textbox\",allow_flagging=\"never\")\n",
+ "demo.launch()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "abbc237a-8da2-4993-b350-8f8a7d807242",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import requests\n",
+ "from bs4 import BeautifulSoup\n",
+ "from typing import List\n",
+ "from dotenv import load_dotenv\n",
+ "from openai import OpenAI\n",
+ "import ollama"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9f021005-2a39-42ec-b671-b24babd0ef1a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message = \"You are a helpful assistant\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d1677645-4166-4d77-8567-cae77120f1c3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def message_llama(prompt):\n",
+ " messages = [\n",
+ " {\"role\": \"system\", \"content\": system_message},\n",
+ " {\"role\": \"user\", \"content\": prompt}\n",
+ " ]\n",
+ " completion = ollama.chat(\n",
+ " model='llama3.2',\n",
+ " messages=messages,\n",
+ " )\n",
+ " return completion['message']['content']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "33295d15-f4d2-4588-9400-3c1e3c6492f2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "message_llama(\"what is the date today\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "38e2594e-6a70-4832-b601-60a6a0d4d671",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def stream_llama(prompt):\n",
+ " messages = [\n",
+ " {\"role\": \"system\", \"content\": system_message},\n",
+ " {\"role\": \"user\", \"content\": prompt}\n",
+ " ]\n",
+ " stream = ollama.chat(\n",
+ " model='llama3.2',\n",
+ " messages=messages,\n",
+ " stream=True\n",
+ " )\n",
+ " result = \"\"\n",
+ " for chunk in stream:\n",
+ " result += chunk['message']['content'] or \"\"\n",
+ " yield result\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e0ebf588-3d69-4012-9719-23d11fbbf4f5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def stream_deepseek(prompt):\n",
+ " messages = [\n",
+ " {\"role\": \"system\", \"content\": system_message},\n",
+ " {\"role\": \"user\", \"content\": prompt}\n",
+ " ]\n",
+ " stream = ollama.chat(\n",
+ " model='deepseek-r1',\n",
+ " messages=messages,\n",
+ " stream=True\n",
+ " )\n",
+ " result = \"\"\n",
+ " for chunk in stream:\n",
+ " result += chunk['message']['content'] or \"\"\n",
+ " yield result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7db5aa24-b608-489a-ba26-1a4b627658e2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def stream_qwen3(prompt):\n",
+ " messages = [\n",
+ " {\"role\": \"system\", \"content\": system_message},\n",
+ " {\"role\": \"user\", \"content\": prompt}\n",
+ " ]\n",
+ " stream = ollama.chat(\n",
+ " model='qwen3',\n",
+ " messages=messages,\n",
+ " stream=True\n",
+ " )\n",
+ " result = \"\"\n",
+ " for chunk in stream:\n",
+ " result += chunk['message']['content'] or \"\"\n",
+ " yield result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d37b5df8-b281-4096-bdc7-5c6a1872cea7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def stream_model(prompt, model):\n",
+ " if model==\"llama3.2\":\n",
+ " result = stream_llama(prompt)\n",
+ " elif model==\"deepseek-r1\":\n",
+ " result = stream_deepseek(prompt)\n",
+ " else:\n",
+ " raise ValueError(\"Unknown model\")\n",
+ " yield from result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "eb408edc-6a83-4725-9fb9-1b95ff0c9ed0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gr.Interface(fn=stream_model, inputs=[gr.Textbox(label=\"Your Message\"),gr.Dropdown([\"llama3.2\", \"deepseek-r1\"], label=\"Select model\", value=\"llama3.2\")], outputs=[gr.Markdown(label=\"Response\")],flagging_mode=\"never\").launch()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "dc7c3aa0-693a-43a0-8f5b-b07c66bb6733",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gr.Interface(fn=stream_llama, inputs=[gr.Textbox(label=\"Your Message\")], outputs=[gr.Markdown(label=\"Response\")],flagging_mode=\"never\").launch()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "id": "e45e9b56-5c2f-4b17-bbf4-5691ce35ff15",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class Website:\n",
+ " url: str\n",
+ " title: str\n",
+ " text: str\n",
+ "\n",
+ " def __init__(self, url):\n",
+ " self.url = url\n",
+ " response = requests.get(url)\n",
+ " self.body = response.content\n",
+ " soup = BeautifulSoup(self.body, 'html.parser')\n",
+ " self.title = soup.title.string if soup.title else \"No title found\"\n",
+ " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
+ " irrelevant.decompose()\n",
+ " self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
+ "\n",
+ " def get_contents(self):\n",
+ " return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 55,
+ "id": "f9fcf30e-09c7-4f90-8bf9-8cc588ede95c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n",
+ "and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\"\n",
+ "# For Fun\n",
+ "tone_description_fun = \"\"\"\n",
+ " The tone should be:\n",
+ " - **Fun and Playful:** Inject humor, use lighthearted language, and maintain an upbeat vibe.\n",
+ " - **Energetic:** Use active voice, strong verbs, and occasional exclamation points.\n",
+ " - **Approachable:** Write as if speaking to a friend, using slightly informal language and contractions.\n",
+ " - **Creative:** Think outside the box for descriptions and calls to action.\n",
+ " - Avoid sounding childish or overly silly.\n",
+ "\"\"\"\n",
+ "\n",
+ "# For Aggression\n",
+ "tone_description_aggression = \"\"\"\n",
+ " The tone should be:\n",
+ " - **Bold and Assertive:** Use strong, direct language that conveys confidence and power.\n",
+ " - **Challenging:** Pose questions that make the reader reconsider their current solutions.\n",
+ " - **Urgent:** Imply a need for immediate action and emphasize competitive advantages.\n",
+ " - **Direct and Punchy:** Employ short, impactful sentences and strong calls to action.\n",
+ " - **Dominant:** Position the company as a leader and a force to be reckoned with.\n",
+ " - Avoid being rude, offensive, or overly hostile. Focus on competitive intensity.\n",
+ "\"\"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 66,
+ "id": "83dd8aec-f74f-452b-90cc-3ad5bc903037",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def stream_brochure(company_name, url, model, tone):\n",
+ " prompt = f\"Please generate a company brochure for {company_name} that embodies the following tone and style guidelines: {tone}. Here is their landing page:\\n\"\n",
+ " prompt += Website(url).get_contents()\n",
+ " if model==\"llama\":\n",
+ " result = stream_llama(prompt)\n",
+ " elif model==\"deepseek\":\n",
+ " result = stream_deepseek(prompt)\n",
+ " else:\n",
+ " raise ValueError(\"Unknown model\")\n",
+ " yield from result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 67,
+ "id": "ef1a246f-a3f7-457e-a85c-2076b407f52a",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "* Running on local URL: http://127.0.0.1:7890\n",
+ "* To create a public link, set `share=True` in `launch()`.\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ ""
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/plain": []
+ },
+ "execution_count": 67,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "view = gr.Interface(\n",
+ " fn=stream_brochure,\n",
+ " inputs=[\n",
+ " gr.Textbox(label=\"Company name:\"),\n",
+ " gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
+ " gr.Dropdown([\"llama\", \"deepseek\"], label=\"Select model\"),\n",
+ " gr.Dropdown([\"tone_description_fun\", \"tone_description_aggression\"])],\n",
+ " outputs=[gr.Markdown(label=\"Brochure:\")],\n",
+ " \n",
+ " flagging_mode=\"never\"\n",
+ ")\n",
+ "view.launch()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0659a1dc-a00b-4cbf-b5ed-d6661fbb57f2",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}