diff --git a/community-contributions/pradeep1955/week1 EXERCISE.ipynb b/community-contributions/pradeep1955/week1 EXERCISE.ipynb
new file mode 100644
index 0000000..5c418f2
--- /dev/null
+++ b/community-contributions/pradeep1955/week1 EXERCISE.ipynb
@@ -0,0 +1,148 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "fe12c203-e6a6-452c-a655-afb8a03a4ff5",
+ "metadata": {},
+ "source": [
+ "# End of week 1 exercise\n",
+ "\n",
+ "To demonstrate your familiarity with OpenAI API, and also Ollama, build a tool that takes a technical question, \n",
+ "and responds with an explanation. This is a tool that you will be able to use yourself during the course!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c1070317-3ed9-4659-abe3-828943230e03",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "import os\n",
+ "from openai import OpenAI\n",
+ "from IPython.display import Markdown, display, update_display\n",
+ "from dotenv import load_dotenv"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4a456906-915a-4bfd-bb9d-57e505c5093f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# constants\n",
+ "\n",
+ "MODEL_GPT = 'gpt-4o-mini'\n",
+ "MODEL_LLAMA = 'llama3.2'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a8d7923c-5f28-4c30-8556-342d7c8497c1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# set up environment\n",
+ "load_dotenv(override=True)\n",
+ "api_key=os.getenv(\"OPENAI_API_KEY\")\n",
+ "if not api_key.startswith(\"sk-proj-\") and len(api_key)<10:\n",
+ " print(\"api key not foud\")\n",
+ "else:\n",
+ " print(\"api found and is ok\")\n",
+ "\n",
+ "openai=OpenAI()\n",
+ "print()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3f0d0137-52b0-47a8-81a8-11a90a010798",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# here is the question; type over this to ask something new\n",
+ "\n",
+ "question = \"\"\"\n",
+ "Please explain what this code does and why:\n",
+ "yield from {book.get(\"author\") for book in books if book.get(\"author\")}\n",
+ "\"\"\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "60ce7000-a4a5-4cce-a261-e75ef45063b4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Get gpt-4o-mini to answer, with streaming\n",
+ "messages = [{\"role\":\"system\",\"content\":\"You are a expert Dta Scientist\"}, {\"role\":\"user\",\"content\":question}]\n",
+ "\n",
+ "stream = openai.chat.completions.create(\n",
+ " model = MODEL_GPT,\n",
+ " messages = messages,\n",
+ " stream = True\n",
+ ")\n",
+ "response = \"\"\n",
+ "display_handle = display(Markdown(\"\"), display_id=True)\n",
+ "for chunk in stream:\n",
+ " response += chunk.choices[0].delta.content or ''\n",
+ " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
+ " update_display(Markdown(response), display_id=display_handle.display_id)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8f7c8ea8-4082-4ad0-8751-3301adcf6538",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Get Llama 3.2 to answer\n",
+ "import ollama\n",
+ "\n",
+ "stream = ollama.chat(model=MODEL_LLAMA, messages=messages, stream=True)\n",
+ "response = \"\"\n",
+ "display_handle = display(Markdown(\"\"), display_id=True)\n",
+ "for chunk in stream:\n",
+ " response += chunk[\"message\"][\"content\"] or ''\n",
+ " response = response.replace(\"```\",\"\").replace(\"markdown\", \"\")\n",
+ " update_display(Markdown(response), display_id=display_handle.display_id)\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2a573174-779b-4d50-8792-fa0889b37211",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "llmenv",
+ "language": "python",
+ "name": "llmenv"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.13"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/community-contributions/pradeep1955/week1/day2 EXERCISE.ipynb b/community-contributions/pradeep1955/week1/day2 EXERCISE.ipynb
new file mode 100644
index 0000000..d7a3078
--- /dev/null
+++ b/community-contributions/pradeep1955/week1/day2 EXERCISE.ipynb
@@ -0,0 +1,426 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "d15d8294-3328-4e07-ad16-8a03e9bbfdb9",
+ "metadata": {},
+ "source": [
+ "# Welcome to your first assignment!\n",
+ "\n",
+ "Instructions are below. Please give this a try, and look in the solutions folder if you get stuck (or feel free to ask me!)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ada885d9-4d42-4d9b-97f0-74fbbbfe93a9",
+ "metadata": {},
+ "source": [
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " | \n",
+ " \n",
+ " Just before we get to the assignment --\n",
+ " I thought I'd take a second to point you at this page of useful resources for the course. This includes links to all the slides. \n",
+ " https://edwarddonner.com/2024/11/13/llm-engineering-resources/ \n",
+ " Please keep this bookmarked, and I'll continue to add more useful links there over time.\n",
+ " \n",
+ " | \n",
+ "
\n",
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6e9fa1fc-eac5-4d1d-9be4-541b3f2b3458",
+ "metadata": {},
+ "source": [
+ "# HOMEWORK EXERCISE ASSIGNMENT\n",
+ "\n",
+ "Upgrade the day 1 project to summarize a webpage to use an Open Source model running locally via Ollama rather than OpenAI\n",
+ "\n",
+ "You'll be able to use this technique for all subsequent projects if you'd prefer not to use paid APIs.\n",
+ "\n",
+ "**Benefits:**\n",
+ "1. No API charges - open-source\n",
+ "2. Data doesn't leave your box\n",
+ "\n",
+ "**Disadvantages:**\n",
+ "1. Significantly less power than Frontier Model\n",
+ "\n",
+ "## Recap on installation of Ollama\n",
+ "\n",
+ "Simply visit [ollama.com](https://ollama.com) and install!\n",
+ "\n",
+ "Once complete, the ollama server should already be running locally. \n",
+ "If you visit: \n",
+ "[http://localhost:11434/](http://localhost:11434/)\n",
+ "\n",
+ "You should see the message `Ollama is running`. \n",
+ "\n",
+ "If not, bring up a new Terminal (Mac) or Powershell (Windows) and enter `ollama serve` \n",
+ "And in another Terminal (Mac) or Powershell (Windows), enter `ollama pull llama3.2` \n",
+ "Then try [http://localhost:11434/](http://localhost:11434/) again.\n",
+ "\n",
+ "If Ollama is slow on your machine, try using `llama3.2:1b` as an alternative. Run `ollama pull llama3.2:1b` from a Terminal or Powershell, and change the code below from `MODEL = \"llama3.2\"` to `MODEL = \"llama3.2:1b\"`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4e2a9393-7767-488e-a8bf-27c12dca35bd",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "\n",
+ "import requests\n",
+ "from bs4 import BeautifulSoup\n",
+ "from IPython.display import Markdown, display"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "29ddd15d-a3c5-4f4e-a678-873f56162724",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Constants\n",
+ "\n",
+ "OLLAMA_API = \"http://localhost:11434/api/chat\"\n",
+ "HEADERS = {\"Content-Type\": \"application/json\"}\n",
+ "MODEL = \"llama3.2\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "dac0a679-599c-441f-9bf2-ddc73d35b940",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Create a messages list using the same format that we used for OpenAI\n",
+ "\n",
+ "messages = [\n",
+ " {\"role\": \"user\", \"content\": \"Describe some of the business applications of Generative AI\"}\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7bb9c624-14f0-4945-a719-8ddb64f66f47",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "payload = {\n",
+ " \"model\": MODEL,\n",
+ " \"messages\": messages,\n",
+ " \"stream\": False\n",
+ " }"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "479ff514-e8bd-4985-a572-2ea28bb4fa40",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Let's just make sure the model is loaded\n",
+ "\n",
+ "!ollama pull llama3.2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "42b9f644-522d-4e05-a691-56e7658c0ea9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If this doesn't work for any reason, try the 2 versions in the following cells\n",
+ "# And double check the instructions in the 'Recap on installation of Ollama' at the top of this lab\n",
+ "# And if none of that works - contact me!\n",
+ "\n",
+ "response = requests.post(OLLAMA_API, json=payload, headers=HEADERS)\n",
+ "print(response.json()['message']['content'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6a021f13-d6a1-4b96-8e18-4eae49d876fe",
+ "metadata": {},
+ "source": [
+ "# Introducing the ollama package\n",
+ "\n",
+ "And now we'll do the same thing, but using the elegant ollama python package instead of a direct HTTP call.\n",
+ "\n",
+ "Under the hood, it's making the same call as above to the ollama server running at localhost:11434"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7745b9c4-57dc-4867-9180-61fa5db55eb8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import ollama\n",
+ "\n",
+ "response = ollama.chat(model=MODEL, messages=messages)\n",
+ "print(response['message']['content'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a4704e10-f5fb-4c15-a935-f046c06fb13d",
+ "metadata": {},
+ "source": [
+ "## Alternative approach - using OpenAI python library to connect to Ollama"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "23057e00-b6fc-4678-93a9-6b31cb704bff",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# There's actually an alternative approach that some people might prefer\n",
+ "# You can use the OpenAI client python library to call Ollama:\n",
+ "\n",
+ "from openai import OpenAI\n",
+ "ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
+ "\n",
+ "response = ollama_via_openai.chat.completions.create(\n",
+ " model=MODEL,\n",
+ " messages=messages\n",
+ ")\n",
+ "\n",
+ "print(response.choices[0].message.content)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9f9e22da-b891-41f6-9ac9-bd0c0a5f4f44",
+ "metadata": {},
+ "source": [
+ "## Are you confused about why that works?\n",
+ "\n",
+ "It seems strange, right? We just used OpenAI code to call Ollama?? What's going on?!\n",
+ "\n",
+ "Here's the scoop:\n",
+ "\n",
+ "The python class `OpenAI` is simply code written by OpenAI engineers that makes calls over the internet to an endpoint. \n",
+ "\n",
+ "When you call `openai.chat.completions.create()`, this python code just makes a web request to the following url: \"https://api.openai.com/v1/chat/completions\"\n",
+ "\n",
+ "Code like this is known as a \"client library\" - it's just wrapper code that runs on your machine to make web requests. The actual power of GPT is running on OpenAI's cloud behind this API, not on your computer!\n",
+ "\n",
+ "OpenAI was so popular, that lots of other AI providers provided identical web endpoints, so you could use the same approach.\n",
+ "\n",
+ "So Ollama has an endpoint running on your local box at http://localhost:11434/v1/chat/completions \n",
+ "And in week 2 we'll discover that lots of other providers do this too, including Gemini and DeepSeek.\n",
+ "\n",
+ "And then the team at OpenAI had a great idea: they can extend their client library so you can specify a different 'base url', and use their library to call any compatible API.\n",
+ "\n",
+ "That's it!\n",
+ "\n",
+ "So when you say: `ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')` \n",
+ "Then this will make the same endpoint calls, but to Ollama instead of OpenAI."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bc7d1de3-e2ac-46ff-a302-3b4ba38c4c90",
+ "metadata": {},
+ "source": [
+ "## Also trying the amazing reasoning model DeepSeek\n",
+ "\n",
+ "Here we use the version of DeepSeek-reasoner that's been distilled to 1.5B. \n",
+ "This is actually a 1.5B variant of Qwen that has been fine-tuned using synethic data generated by Deepseek R1.\n",
+ "\n",
+ "Other sizes of DeepSeek are [here](https://ollama.com/library/deepseek-r1) all the way up to the full 671B parameter version, which would use up 404GB of your drive and is far too large for most!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cf9eb44e-fe5b-47aa-b719-0bb63669ab3d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!ollama pull deepseek-r1:1.5b"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1d3d554b-e00d-4c08-9300-45e073950a76",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# This may take a few minutes to run! You should then see a fascinating \"thinking\" trace inside tags, followed by some decent definitions\n",
+ "\n",
+ "response = ollama_via_openai.chat.completions.create(\n",
+ " model=\"deepseek-r1:1.5b\",\n",
+ " messages=[{\"role\": \"user\", \"content\": \"Please give definitions of some core concepts behind LLMs: a neural network, attention and the transformer\"}]\n",
+ ")\n",
+ "\n",
+ "print(response.choices[0].message.content)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1622d9bb-5c68-4d4e-9ca4-b492c751f898",
+ "metadata": {},
+ "source": [
+ "# NOW the exercise for you\n",
+ "\n",
+ "Take the code from day1 and incorporate it here, to build a website summarizer that uses Llama 3.2 running locally instead of OpenAI; use either of the above approaches."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "43ef4b92-53e1-4af2-af3f-726812f4265c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import requests\n",
+ "#from dotenv import load_dotenv\n",
+ "from bs4 import BeautifulSoup\n",
+ "from IPython.display import Markdown, display\n",
+ "#from openai import OpenAI"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "97d45733-394e-493e-a92b-1475876d9028",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "headers = {\n",
+ " \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36\"\n",
+ "}\n",
+ "\n",
+ "class Website:\n",
+ "\n",
+ " def __init__(self, url):\n",
+ " \"\"\"\n",
+ " Create this Website object from the given url using the BeautifulSoup library\n",
+ " \"\"\"\n",
+ " self.url = url\n",
+ " response = requests.get(url, headers=headers)\n",
+ " soup = BeautifulSoup(response.content, 'html.parser')\n",
+ " self.title = soup.title.string if soup.title else \"No title found\"\n",
+ " for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
+ " irrelevant.decompose()\n",
+ " self.text = soup.body.get_text(separator=\"\\n\", strip=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6a40f9c5-1b14-42f9-9319-6a66e58e03f2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "webpage = Website(\"https://www.pleasurewebsite.com\")\n",
+ "print(webpage.title)\n",
+ "print(webpage.text)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a72a005d-43de-4ae5-b427-99a8fcb6065c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "system_prompt = \"You are an assistant that analyzes the contents of a website \\\n",
+ "and provides a short summary, ignoring text that might be navigation related. \\\n",
+ "Respond in markdown.\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f0e4f95f-0ccf-4027-9457-5c973cd17702",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def user_prompt_for(website):\n",
+ " user_prompt = f\"You are looking at a website titled {website.title}\"\n",
+ " user_prompt += \"\\nThe contents of this website is as follows; \\\n",
+ "please provide a short summary of this website in markdown. \\\n",
+ "If it includes news or announcements, then summarize these too.\\n\\n\"\n",
+ " user_prompt += website.text\n",
+ " return user_prompt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ceae6073-a085-49ce-ad44-39e46d8e6934",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def messages_for(website):\n",
+ " return [\n",
+ " {\"role\": \"system\", \"content\": system_prompt},\n",
+ " {\"role\": \"user\", \"content\": user_prompt_for(website)}\n",
+ " ]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9d53b26b-308c-470c-a0a9-9edb887aed6d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "messages=messages_for(webpage)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6de38216-6d1c-48c4-877b-86d403f4e0f8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import ollama\n",
+ "MODEL = \"llama3.2\"\n",
+ "response = ollama.chat(model=MODEL, messages=messages)\n",
+ "print(response['message']['content'])"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "llmenv",
+ "language": "python",
+ "name": "llmenv"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.13"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/community-contributions/pradeep1955/week2/agent_conversation_shakespeare.ipynb b/community-contributions/pradeep1955/week2/agent_conversation_shakespeare.ipynb
new file mode 100644
index 0000000..6d55283
--- /dev/null
+++ b/community-contributions/pradeep1955/week2/agent_conversation_shakespeare.ipynb
@@ -0,0 +1,351 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927",
+ "metadata": {},
+ "source": [
+ "# Triangular agent conversation\n",
+ "\n",
+ "## GPT (Hamlet), LLM (Falstaff), Gemini (Iago):"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac",
+ "metadata": {},
+ "source": [
+ "### Created a 3-way, bringing Gemini into the coversation.\n",
+ "### Replacing one of the models with an open source model running with Ollama."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f8e0c1bd-a159-475b-9cdc-e219a7633355",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "\n",
+ "import os\n",
+ "from dotenv import load_dotenv\n",
+ "from openai import OpenAI\n",
+ "from IPython.display import Markdown, display, update_display\n",
+ "import ollama"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a3ad57ad-46a8-460e-9cb3-67a890093536",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import google.generativeai"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4f531c14-5743-4a5b-83d9-cb5863ca2ddf",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load environment variables in a file called .env\n",
+ "# Print the key prefixes to help with any debugging\n",
+ "\n",
+ "load_dotenv(override=True)\n",
+ "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
+ "google_api_key = os.getenv('GOOGLE_API_KEY')\n",
+ "\n",
+ "if openai_api_key:\n",
+ " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
+ "else:\n",
+ " print(\"OpenAI API Key not set\")\n",
+ "\n",
+ "if google_api_key:\n",
+ " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
+ "else:\n",
+ " print(\"Google API Key not set\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3d5150ee-3858-4921-bce6-2eecfb96bc75",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Connect to OpenAI\n",
+ "\n",
+ "openai = OpenAI()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "11381fd8-5099-41e8-a1d7-6787dea56e43",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "google.generativeai.configure()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c1766d20-54b6-4f76-96c5-c338ae7073c9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gpt_model = \"gpt-4o-mini\"\n",
+ "llama_model = \"llama3.2\"\n",
+ "gemini_model = 'gemini-2.0-flash'\n",
+ "\n",
+ "gpt_system = \"You are playing part of Hamlet. he is philosopher, probes Iago with a mixture of suspicion\\\n",
+ "and intellectual curiosity, seeking to unearth the origins of his deceit.\\\n",
+ "Is malice born of scorn, envy, or some deeper void? Hamlet’s introspective nature\\\n",
+ "drives him to question whether Iago’s actions reveal a truth about humanity itself.\\\n",
+ "You will respond as Shakespear's Hamlet will do.\"\n",
+ "\n",
+ "llama_system = \"You are acting part of Falstaff who attempts to lighten the mood with his jokes and observations,\\\n",
+ "potentially clashing with Hamlet's melancholic nature.You respond as Shakespear's Falstaff do.\"\n",
+ "\n",
+ "gemini_system = \"You are acting part of Iago, subtly trying to manipulate both Hamlet and Falstaff\\\n",
+ "to his own advantage, testing their weaknesses and exploiting their flaws. You respond like Iago\"\n",
+ "\n",
+ "gpt_messages = [\"Hi there\"]\n",
+ "llama_messages = [\"Hi\"]\n",
+ "gemini_messages = [\"Hello\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "806a0506-dac8-4bad-ac08-31f350256b58",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_gpt():\n",
+ " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
+ " for gpt, claude, gemini in zip(gpt_messages, llama_messages, gemini_messages):\n",
+ " messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
+ " messages.append({\"role\": \"user\", \"content\": claude})\n",
+ " messages.append({\"role\": \"user\", \"content\": gemini})\n",
+ " completion = openai.chat.completions.create(\n",
+ " model=gpt_model,\n",
+ " messages=messages\n",
+ " )\n",
+ " return completion.choices[0].message.content"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "43674885-ede7-48bf-bee4-467454f3e96a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_llama():\n",
+ " messages = []\n",
+ " for gpt, llama, gemini in zip(gpt_messages, llama_messages, gemini_messages):\n",
+ " messages.append({\"role\": \"user\", \"content\": gpt})\n",
+ " messages.append({\"role\": \"assistant\", \"content\": llama})\n",
+ " messages.append({\"role\": \"user\", \"content\": gemini})\n",
+ " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
+ " response = ollama.chat(model=llama_model, messages=messages)\n",
+ "\n",
+ " \n",
+ " return response['message']['content']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "03d34769-b339-4c4b-8c60-69494c39d725",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#import google.generativeai as genai\n",
+ "\n",
+ "# Make sure you configure the API key first:\n",
+ "#genai.configure(api_key=\"YOUR_API_KEY\")\n",
+ "\n",
+ "def call_gemini():\n",
+ " gemini_messages = []\n",
+ " \n",
+ " # Format the history for Gemini\n",
+ " for gpt, llama, gemini_message in zip(gpt_messages, llama_messages, gemini_messages):\n",
+ " gemini_messages.append({\"role\": \"user\", \"parts\": [gpt]}) # Hamlet speaks\n",
+ " gemini_messages.append({\"role\": \"model\", \"parts\": [llama]}) # Falstaff responds\n",
+ " gemini_messages.append({\"role\": \"model\", \"parts\": [gemini_message]}) # Iago responds\n",
+ "\n",
+ " # Add latest user input if needed (optional)\n",
+ " gemini_messages.append({\"role\": \"user\", \"parts\": [llama_messages[-1]]})\n",
+ "\n",
+ " # Initialize the model with the correct system instruction\n",
+ " gemini = google.generativeai.GenerativeModel(\n",
+ " #model_name='gemini-1.5-flash', # Or 'gemini-pro'\n",
+ " model_name = gemini_model,\n",
+ " system_instruction=gemini_system\n",
+ " )\n",
+ "\n",
+ " response = gemini.generate_content(gemini_messages)\n",
+ " return response.text\n",
+ "#print(response.text)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "93fc8253-67cb-4ea4-aff7-097b2a222793",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gpt_messages = [\"Hi there\"]\n",
+ "llama_messages = [\"Hi\"]\n",
+ "gemini_messages = [\"Hello\"]\n",
+ "\n",
+ "print(f\"Hamlet:\\n{gpt_messages[0]}\\n\")\n",
+ "print(f\"Falstaff:\\n{llama_messages[0]}\\n\")\n",
+ "print(f\"Iago:\\n{gemini_messages[0]}\\n\")\n",
+ "\n",
+ "for i in range(3):\n",
+ " gpt_next = call_gpt()\n",
+ " print(f\"GPT:\\n{gpt_next}\\n\")\n",
+ " gpt_messages.append(gpt_next)\n",
+ " \n",
+ " llama_next = call_llama()\n",
+ " print(f\"Llama:\\n{llama_next}\\n\")\n",
+ " llama_messages.append(llama_next)\n",
+ "\n",
+ " gemini_next = call_gemini()\n",
+ " print(f\"Gemini:\\n{gemini_next}\\n\")\n",
+ " llama_messages.append(gemini_next)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bca66ffc-9dc1-4384-880c-210889f5d0ac",
+ "metadata": {},
+ "source": [
+ "## Conversation between gpt-4.0-mini and llama3.2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c23224f6-7008-44ed-a57f-718975f4e291",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n",
+ "# We're using cheap versions of models so the costs will be minimal\n",
+ "\n",
+ "gpt_model = \"gpt-4o-mini\"\n",
+ "llama_model = \"llama3.2\"\n",
+ "\n",
+ "gpt_system = \"You are a tapori from mumbai who is very optimistic; \\\n",
+ "you alway look at the brighter part of the situation and you always ready to take act to win way.\"\n",
+ "\n",
+ "llama_system = \"You are a Jaat from Haryana. You try to express with hindi poems \\\n",
+ "to agree with other person and or find common ground. If the other person is optimistic, \\\n",
+ "you respond in poetic way and keep chatting.\"\n",
+ "\n",
+ "gpt_messages = [\"Hi there\"]\n",
+ "llama_messages = [\"Hi\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2d704bbb-f22b-400d-a695-efbd02b26548",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_gpt():\n",
+ " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
+ " for gpt, llama in zip(gpt_messages, llama_messages):\n",
+ " messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
+ " messages.append({\"role\": \"user\", \"content\": llama})\n",
+ " completion = openai.chat.completions.create(\n",
+ " model=gpt_model,\n",
+ " messages=messages\n",
+ " )\n",
+ " return completion.choices[0].message.content"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "385ccec8-de59-4e42-9616-3f5c9a05589c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_llama():\n",
+ " messages = []\n",
+ " for gpt, llama_message in zip(gpt_messages, llama_messages):\n",
+ " messages.append({\"role\": \"user\", \"content\": gpt})\n",
+ " messages.append({\"role\": \"assistant\", \"content\": llama_message})\n",
+ " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
+ " response = ollama.chat(model=llama_model, messages=messages)\n",
+ "\n",
+ " \n",
+ " return response['message']['content']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "70b5481b-455e-4275-80d3-0afe0fabcb0f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gpt_messages = [\"Hi there\"]\n",
+ "llama_messages = [\"Hi\"]\n",
+ "\n",
+ "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
+ "print(f\"Llama:\\n{llama_messages[0]}\\n\")\n",
+ "\n",
+ "for i in range(3):\n",
+ " gpt_next = call_gpt()\n",
+ " print(f\"GPT:\\n{gpt_next}\\n\")\n",
+ " gpt_messages.append(gpt_next)\n",
+ " \n",
+ " llama_next = call_llama()\n",
+ " print(f\"Llama:\\n{llama_next}\\n\")\n",
+ " llama_messages.append(llama_next)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7f8d734b-57e5-427d-bcb1-7956fc58a348",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "llmenv",
+ "language": "python",
+ "name": "llmenv"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.13"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/week2/community-contributions/agent_conversation_shakespeare.ipynb b/week2/community-contributions/agent_conversation_shakespeare.ipynb
new file mode 100644
index 0000000..6d55283
--- /dev/null
+++ b/week2/community-contributions/agent_conversation_shakespeare.ipynb
@@ -0,0 +1,351 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "06cf3063-9f3e-4551-a0d5-f08d9cabb927",
+ "metadata": {},
+ "source": [
+ "# Triangular agent conversation\n",
+ "\n",
+ "## GPT (Hamlet), LLM (Falstaff), Gemini (Iago):"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3637910d-2c6f-4f19-b1fb-2f916d23f9ac",
+ "metadata": {},
+ "source": [
+ "### Created a 3-way, bringing Gemini into the coversation.\n",
+ "### Replacing one of the models with an open source model running with Ollama."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f8e0c1bd-a159-475b-9cdc-e219a7633355",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# imports\n",
+ "\n",
+ "import os\n",
+ "from dotenv import load_dotenv\n",
+ "from openai import OpenAI\n",
+ "from IPython.display import Markdown, display, update_display\n",
+ "import ollama"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a3ad57ad-46a8-460e-9cb3-67a890093536",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import google.generativeai"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4f531c14-5743-4a5b-83d9-cb5863ca2ddf",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load environment variables in a file called .env\n",
+ "# Print the key prefixes to help with any debugging\n",
+ "\n",
+ "load_dotenv(override=True)\n",
+ "openai_api_key = os.getenv('OPENAI_API_KEY')\n",
+ "google_api_key = os.getenv('GOOGLE_API_KEY')\n",
+ "\n",
+ "if openai_api_key:\n",
+ " print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
+ "else:\n",
+ " print(\"OpenAI API Key not set\")\n",
+ "\n",
+ "if google_api_key:\n",
+ " print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
+ "else:\n",
+ " print(\"Google API Key not set\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3d5150ee-3858-4921-bce6-2eecfb96bc75",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Connect to OpenAI\n",
+ "\n",
+ "openai = OpenAI()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "11381fd8-5099-41e8-a1d7-6787dea56e43",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "google.generativeai.configure()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c1766d20-54b6-4f76-96c5-c338ae7073c9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gpt_model = \"gpt-4o-mini\"\n",
+ "llama_model = \"llama3.2\"\n",
+ "gemini_model = 'gemini-2.0-flash'\n",
+ "\n",
+ "gpt_system = \"You are playing part of Hamlet. he is philosopher, probes Iago with a mixture of suspicion\\\n",
+ "and intellectual curiosity, seeking to unearth the origins of his deceit.\\\n",
+ "Is malice born of scorn, envy, or some deeper void? Hamlet’s introspective nature\\\n",
+ "drives him to question whether Iago’s actions reveal a truth about humanity itself.\\\n",
+ "You will respond as Shakespear's Hamlet will do.\"\n",
+ "\n",
+ "llama_system = \"You are acting part of Falstaff who attempts to lighten the mood with his jokes and observations,\\\n",
+ "potentially clashing with Hamlet's melancholic nature.You respond as Shakespear's Falstaff do.\"\n",
+ "\n",
+ "gemini_system = \"You are acting part of Iago, subtly trying to manipulate both Hamlet and Falstaff\\\n",
+ "to his own advantage, testing their weaknesses and exploiting their flaws. You respond like Iago\"\n",
+ "\n",
+ "gpt_messages = [\"Hi there\"]\n",
+ "llama_messages = [\"Hi\"]\n",
+ "gemini_messages = [\"Hello\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "806a0506-dac8-4bad-ac08-31f350256b58",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_gpt():\n",
+ " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
+ " for gpt, claude, gemini in zip(gpt_messages, llama_messages, gemini_messages):\n",
+ " messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
+ " messages.append({\"role\": \"user\", \"content\": claude})\n",
+ " messages.append({\"role\": \"user\", \"content\": gemini})\n",
+ " completion = openai.chat.completions.create(\n",
+ " model=gpt_model,\n",
+ " messages=messages\n",
+ " )\n",
+ " return completion.choices[0].message.content"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "43674885-ede7-48bf-bee4-467454f3e96a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_llama():\n",
+ " messages = []\n",
+ " for gpt, llama, gemini in zip(gpt_messages, llama_messages, gemini_messages):\n",
+ " messages.append({\"role\": \"user\", \"content\": gpt})\n",
+ " messages.append({\"role\": \"assistant\", \"content\": llama})\n",
+ " messages.append({\"role\": \"user\", \"content\": gemini})\n",
+ " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
+ " response = ollama.chat(model=llama_model, messages=messages)\n",
+ "\n",
+ " \n",
+ " return response['message']['content']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "03d34769-b339-4c4b-8c60-69494c39d725",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#import google.generativeai as genai\n",
+ "\n",
+ "# Make sure you configure the API key first:\n",
+ "#genai.configure(api_key=\"YOUR_API_KEY\")\n",
+ "\n",
+ "def call_gemini():\n",
+ " gemini_messages = []\n",
+ " \n",
+ " # Format the history for Gemini\n",
+ " for gpt, llama, gemini_message in zip(gpt_messages, llama_messages, gemini_messages):\n",
+ " gemini_messages.append({\"role\": \"user\", \"parts\": [gpt]}) # Hamlet speaks\n",
+ " gemini_messages.append({\"role\": \"model\", \"parts\": [llama]}) # Falstaff responds\n",
+ " gemini_messages.append({\"role\": \"model\", \"parts\": [gemini_message]}) # Iago responds\n",
+ "\n",
+ " # Add latest user input if needed (optional)\n",
+ " gemini_messages.append({\"role\": \"user\", \"parts\": [llama_messages[-1]]})\n",
+ "\n",
+ " # Initialize the model with the correct system instruction\n",
+ " gemini = google.generativeai.GenerativeModel(\n",
+ " #model_name='gemini-1.5-flash', # Or 'gemini-pro'\n",
+ " model_name = gemini_model,\n",
+ " system_instruction=gemini_system\n",
+ " )\n",
+ "\n",
+ " response = gemini.generate_content(gemini_messages)\n",
+ " return response.text\n",
+ "#print(response.text)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "93fc8253-67cb-4ea4-aff7-097b2a222793",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gpt_messages = [\"Hi there\"]\n",
+ "llama_messages = [\"Hi\"]\n",
+ "gemini_messages = [\"Hello\"]\n",
+ "\n",
+ "print(f\"Hamlet:\\n{gpt_messages[0]}\\n\")\n",
+ "print(f\"Falstaff:\\n{llama_messages[0]}\\n\")\n",
+ "print(f\"Iago:\\n{gemini_messages[0]}\\n\")\n",
+ "\n",
+ "for i in range(3):\n",
+ " gpt_next = call_gpt()\n",
+ " print(f\"GPT:\\n{gpt_next}\\n\")\n",
+ " gpt_messages.append(gpt_next)\n",
+ " \n",
+ " llama_next = call_llama()\n",
+ " print(f\"Llama:\\n{llama_next}\\n\")\n",
+ " llama_messages.append(llama_next)\n",
+ "\n",
+ " gemini_next = call_gemini()\n",
+ " print(f\"Gemini:\\n{gemini_next}\\n\")\n",
+ " llama_messages.append(gemini_next)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bca66ffc-9dc1-4384-880c-210889f5d0ac",
+ "metadata": {},
+ "source": [
+ "## Conversation between gpt-4.0-mini and llama3.2"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c23224f6-7008-44ed-a57f-718975f4e291",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n",
+ "# We're using cheap versions of models so the costs will be minimal\n",
+ "\n",
+ "gpt_model = \"gpt-4o-mini\"\n",
+ "llama_model = \"llama3.2\"\n",
+ "\n",
+ "gpt_system = \"You are a tapori from mumbai who is very optimistic; \\\n",
+ "you alway look at the brighter part of the situation and you always ready to take act to win way.\"\n",
+ "\n",
+ "llama_system = \"You are a Jaat from Haryana. You try to express with hindi poems \\\n",
+ "to agree with other person and or find common ground. If the other person is optimistic, \\\n",
+ "you respond in poetic way and keep chatting.\"\n",
+ "\n",
+ "gpt_messages = [\"Hi there\"]\n",
+ "llama_messages = [\"Hi\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2d704bbb-f22b-400d-a695-efbd02b26548",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_gpt():\n",
+ " messages = [{\"role\": \"system\", \"content\": gpt_system}]\n",
+ " for gpt, llama in zip(gpt_messages, llama_messages):\n",
+ " messages.append({\"role\": \"assistant\", \"content\": gpt})\n",
+ " messages.append({\"role\": \"user\", \"content\": llama})\n",
+ " completion = openai.chat.completions.create(\n",
+ " model=gpt_model,\n",
+ " messages=messages\n",
+ " )\n",
+ " return completion.choices[0].message.content"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "385ccec8-de59-4e42-9616-3f5c9a05589c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def call_llama():\n",
+ " messages = []\n",
+ " for gpt, llama_message in zip(gpt_messages, llama_messages):\n",
+ " messages.append({\"role\": \"user\", \"content\": gpt})\n",
+ " messages.append({\"role\": \"assistant\", \"content\": llama_message})\n",
+ " messages.append({\"role\": \"user\", \"content\": gpt_messages[-1]})\n",
+ " response = ollama.chat(model=llama_model, messages=messages)\n",
+ "\n",
+ " \n",
+ " return response['message']['content']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "70b5481b-455e-4275-80d3-0afe0fabcb0f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gpt_messages = [\"Hi there\"]\n",
+ "llama_messages = [\"Hi\"]\n",
+ "\n",
+ "print(f\"GPT:\\n{gpt_messages[0]}\\n\")\n",
+ "print(f\"Llama:\\n{llama_messages[0]}\\n\")\n",
+ "\n",
+ "for i in range(3):\n",
+ " gpt_next = call_gpt()\n",
+ " print(f\"GPT:\\n{gpt_next}\\n\")\n",
+ " gpt_messages.append(gpt_next)\n",
+ " \n",
+ " llama_next = call_llama()\n",
+ " print(f\"Llama:\\n{llama_next}\\n\")\n",
+ " llama_messages.append(llama_next)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7f8d734b-57e5-427d-bcb1-7956fc58a348",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "llmenv",
+ "language": "python",
+ "name": "llmenv"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.13"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}