Merge pull request #390 from sharathir/community-contributions-branch

Added my Company Brochure exercise using ollama to community contributions version 1.0
This commit is contained in:
Ed Donner
2025-05-24 10:18:16 -04:00
committed by GitHub
5 changed files with 1295 additions and 0 deletions

View File

@@ -0,0 +1,191 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "0f1f62c4-ed03-4401-88d5-3445464a8421",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import gradio as gr\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f8103014-012c-4648-9111-75993ce4d46a",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a8fca0b4-9db7-4f74-865b-503ee19a832f",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
"\n",
" stream = ollama.chat(model=\"llama3.2\", messages=messages, stream=True)\n",
"\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk['message']['content'] or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "61de58a0-5972-4aca-93ad-a4bd3878a50b",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d448f8c5-2bb5-448d-8ae4-894b905214a7",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant in a clothes store. You should try to gently encourage \\\n",
"the customer to try items that are on sale. Hats are 60% off, and most other items are 50% off. \\\n",
"For example, if the customer says 'I'm looking to buy a hat', \\\n",
"you could reply something like, 'Wonderful - we have lots of hats - including several that are part of our sales event.'\\\n",
"Encourage the customer to buy hats if they are unsure what to get.\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "465968cf-aa7f-46b2-857f-a6819f2b14ea",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "873ab86b-ecb8-4f68-b520-50b29b7fd7be",
"metadata": {},
"outputs": [],
"source": [
"system_message += \"\\nIf the customer asks for shoes, you should respond that shoes are not on sale today, \\\n",
"but remind the customer to look at hats!\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c63ced30-1109-4409-b255-1f72f8c6172f",
"metadata": {},
"outputs": [],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "054f1406-c240-4849-8618-064985e76d86",
"metadata": {},
"outputs": [],
"source": [
"def chat(message, history):\n",
"\n",
" global system_message\n",
" if 'belt' in message:\n",
" system_message += \" The store does not sell belts; if you are asked for belts, be sure to point out other items on sale.\"\n",
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": message}]\n",
"\n",
" stream = ollama.chat(model=\"llama3.2\", messages=messages, stream=True)\n",
"\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk['message']['content'] or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "b1086d8a-5b5a-4b59-9a61-e76078f930cc",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7869\n",
"* To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7869/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gr.ChatInterface(fn=chat, type=\"messages\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c558ab19-b907-4b0c-8a4f-37c8b731f9b5",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,170 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "eff19e8b-000a-4327-b8fb-8fd8a3caaef5",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8278c99b-d748-42e5-a991-690a791ed081",
"metadata": {},
"outputs": [],
"source": [
"# Let's make a conversation between GPT-4o-mini and Claude-3-haiku\n",
"# We're using cheap versions of models so the costs will be minimal\n",
"\n",
"llama_model = \"llama3.2\"\n",
"deepseek_model = \"deepseek-r1\"\n",
"\n",
"llama_system = \"You are a chatbot who is very argumentative; \\\n",
"you disagree with anything in the conversation and you challenge everything, in a snarky way.\"\n",
"\n",
"deepseek_system = \"You are a very polite, courteous chatbot. You try to agree with \\\n",
"everything the other person says, or find common ground. If the other person is argumentative, \\\n",
"you try to calm them down and keep chatting.\"\n",
"\n",
"llama_messages = [\"Hi there\"]\n",
"deepseek_messages = [\"Hi\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "49523e56-0de8-4014-85d5-8aab438d2075",
"metadata": {},
"outputs": [],
"source": [
"def call_llama():\n",
" messages = [{\"role\": \"system\", \"content\": llama_system}]\n",
" for llama, deepseek in zip(llama_messages, deepseek_messages):\n",
" messages.append({\"role\": \"assistant\", \"content\": llama})\n",
" messages.append({\"role\": \"user\", \"content\": deepseek})\n",
" completion = ollama.chat(\n",
" model=llama_model,\n",
" messages=messages\n",
" )\n",
" return completion['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "785240ce-e704-44ff-90cb-e5c0476454a4",
"metadata": {},
"outputs": [],
"source": [
"call_llama()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cdba39e3-5543-4657-bc3a-259f586ba392",
"metadata": {},
"outputs": [],
"source": [
"def call_deepseek():\n",
" messages = []\n",
" for llama, deepseek in zip(llama_messages, deepseek_messages):\n",
" messages.append({\"role\": \"user\", \"content\": llama})\n",
" messages.append({\"role\": \"assistant\", \"content\": deepseek})\n",
" messages.append({\"role\": \"user\", \"content\": llama_messages[-1]})\n",
" message = ollama.chat(\n",
" model=deepseek_model,\n",
" options={\n",
" \"system\":deepseek_system,\n",
" \"max_tokens\":500\n",
" },\n",
" messages=messages\n",
" \n",
" )\n",
" \n",
" return message['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "641df7ac-625c-41fa-b780-3130eef93a85",
"metadata": {},
"outputs": [],
"source": [
"call_deepseek()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4b33b98e-8d17-45e8-b2a9-a070dc0a6780",
"metadata": {},
"outputs": [],
"source": [
"call_llama()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "47912582-51fe-401c-b4ad-12483068adea",
"metadata": {},
"outputs": [],
"source": [
"llama_messages = [\"Hi there\"]\n",
"deepseek_messages = [\"Hi\"]\n",
"\n",
"print(f\"Llama:\\n{llama_messages[0]}\\n\")\n",
"print(f\"Deepseek:\\n{deepseek_messages[0]}\\n\")\n",
"\n",
"for i in range(5):\n",
" llama_next = call_llama()\n",
" print(f\"Llama:\\n{llama_next}\\n\")\n",
" llama_messages.append(llama_next)\n",
" \n",
" deepseek_next = call_deepseek()\n",
" print(f\"Deepseek:\\n{deepseek_next}\\n\")\n",
" deepseek_messages.append(deepseek_next)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f3c41b0c-4358-4d84-a479-6409fa331119",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,148 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "d768c9b1-c5a7-417a-9fac-5fcbd6944fe6",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import anthropic\n",
"from IPython.display import Markdown, display, update_display\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "80a4e740-75d0-4272-b02e-0b77b0a143ae",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that is great at telling jokes\"\n",
"user_prompt = \"Tell a light-hearted joke for an audience of Data Scientists\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f2ef28e5-5073-4065-b066-387181df063a",
"metadata": {},
"outputs": [],
"source": [
"prompts = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d54e910a-cdbf-49cb-9924-265d9845d622",
"metadata": {},
"outputs": [],
"source": [
"#direct display wihtout streaming\n",
"response = ollama.chat(\n",
" model=\"llama3.2\",\n",
" messages=prompts,\n",
" options={\n",
" \"temperature\": 0.7\n",
" }\n",
" \n",
" )\n",
"result = response['message']['content']\n",
"display(Markdown(result))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "47dd7965-fdfc-4472-b2f6-c98f755964f1",
"metadata": {},
"outputs": [],
"source": [
"#This is with streaming \n",
"stream = ollama.chat(\n",
" model=\"llama3.2\",\n",
" messages=prompts,\n",
" stream=True\n",
" )\n",
"response = \"\"\n",
"display_handle = display(Markdown(\"\"), display_id=True)\n",
"for chunk in stream:\n",
" content = chunk.get('message', {}).get('content', '')\n",
" if content:\n",
" response += content.replace(\"```\", \"\")\n",
" update_display(Markdown(response), display_id=display_handle.display_id)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ef13e3ae-bde7-4e3a-9fcd-0a9bfd1caef0",
"metadata": {},
"outputs": [],
"source": [
"#direct display wihtout streaming, using deepseek-r1\n",
"response = ollama.chat(\n",
" model=\"deepseek-r1\",\n",
" messages=prompts\n",
" \n",
" )\n",
"result = response['message']['content']\n",
"display(Markdown(result))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ddc4fe91-3d5b-4d45-83bf-f349597c672c",
"metadata": {},
"outputs": [],
"source": [
"#direct display wihtout streaming, using qwen3\n",
"response = ollama.chat(\n",
" model=\"qwen3\",\n",
" messages=prompts\n",
" \n",
" )\n",
"result = response['message']['content']\n",
"display(Markdown(result))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2beb6731-41e3-42a4-a8d3-5f0ef644f2f3",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,347 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "05617f71-449f-42c5-905c-f080d61520ec",
"metadata": {},
"outputs": [],
"source": [
"import gradio as gr\n",
"def greet(name):\n",
" return \"Hello \" + name + \"!\"\n",
"def shout(name):\n",
" return name.upper()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c57765d7-5d69-4332-be71-2800296ca8ed",
"metadata": {},
"outputs": [],
"source": [
"#demo = gr.Interface(fn=shout, inputs=gr.Textbox(), outputs=gr.Textbox()) //this works too\n",
"demo = gr.Interface(fn=greet, inputs=\"textbox\", outputs=\"textbox\",allow_flagging=\"never\")\n",
"demo.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "abbc237a-8da2-4993-b350-8f8a7d807242",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"from bs4 import BeautifulSoup\n",
"from typing import List\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"import ollama"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9f021005-2a39-42ec-b671-b24babd0ef1a",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are a helpful assistant\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1677645-4166-4d77-8567-cae77120f1c3",
"metadata": {},
"outputs": [],
"source": [
"def message_llama(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" completion = ollama.chat(\n",
" model='llama3.2',\n",
" messages=messages,\n",
" )\n",
" return completion['message']['content']"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "33295d15-f4d2-4588-9400-3c1e3c6492f2",
"metadata": {},
"outputs": [],
"source": [
"message_llama(\"what is the date today\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "38e2594e-6a70-4832-b601-60a6a0d4d671",
"metadata": {},
"outputs": [],
"source": [
"def stream_llama(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = ollama.chat(\n",
" model='llama3.2',\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk['message']['content'] or \"\"\n",
" yield result\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e0ebf588-3d69-4012-9719-23d11fbbf4f5",
"metadata": {},
"outputs": [],
"source": [
"def stream_deepseek(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = ollama.chat(\n",
" model='deepseek-r1',\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk['message']['content'] or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7db5aa24-b608-489a-ba26-1a4b627658e2",
"metadata": {},
"outputs": [],
"source": [
"def stream_qwen3(prompt):\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": system_message},\n",
" {\"role\": \"user\", \"content\": prompt}\n",
" ]\n",
" stream = ollama.chat(\n",
" model='qwen3',\n",
" messages=messages,\n",
" stream=True\n",
" )\n",
" result = \"\"\n",
" for chunk in stream:\n",
" result += chunk['message']['content'] or \"\"\n",
" yield result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d37b5df8-b281-4096-bdc7-5c6a1872cea7",
"metadata": {},
"outputs": [],
"source": [
"def stream_model(prompt, model):\n",
" if model==\"llama3.2\":\n",
" result = stream_llama(prompt)\n",
" elif model==\"deepseek-r1\":\n",
" result = stream_deepseek(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "eb408edc-6a83-4725-9fb9-1b95ff0c9ed0",
"metadata": {},
"outputs": [],
"source": [
"gr.Interface(fn=stream_model, inputs=[gr.Textbox(label=\"Your Message\"),gr.Dropdown([\"llama3.2\", \"deepseek-r1\"], label=\"Select model\", value=\"llama3.2\")], outputs=[gr.Markdown(label=\"Response\")],flagging_mode=\"never\").launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc7c3aa0-693a-43a0-8f5b-b07c66bb6733",
"metadata": {},
"outputs": [],
"source": [
"gr.Interface(fn=stream_llama, inputs=[gr.Textbox(label=\"Your Message\")], outputs=[gr.Markdown(label=\"Response\")],flagging_mode=\"never\").launch()"
]
},
{
"cell_type": "code",
"execution_count": 38,
"id": "e45e9b56-5c2f-4b17-bbf4-5691ce35ff15",
"metadata": {},
"outputs": [],
"source": [
"class Website:\n",
" url: str\n",
" title: str\n",
" text: str\n",
"\n",
" def __init__(self, url):\n",
" self.url = url\n",
" response = requests.get(url)\n",
" self.body = response.content\n",
" soup = BeautifulSoup(self.body, 'html.parser')\n",
" self.title = soup.title.string if soup.title else \"No title found\"\n",
" for irrelevant in soup.body([\"script\", \"style\", \"img\", \"input\"]):\n",
" irrelevant.decompose()\n",
" self.text = soup.body.get_text(separator=\"\\n\", strip=True)\n",
"\n",
" def get_contents(self):\n",
" return f\"Webpage Title:\\n{self.title}\\nWebpage Contents:\\n{self.text}\\n\\n\""
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "f9fcf30e-09c7-4f90-8bf9-8cc588ede95c",
"metadata": {},
"outputs": [],
"source": [
"system_message = \"You are an assistant that analyzes the contents of a company website landing page \\\n",
"and creates a short brochure about the company for prospective customers, investors and recruits. Respond in markdown.\"\n",
"# For Fun\n",
"tone_description_fun = \"\"\"\n",
" The tone should be:\n",
" - **Fun and Playful:** Inject humor, use lighthearted language, and maintain an upbeat vibe.\n",
" - **Energetic:** Use active voice, strong verbs, and occasional exclamation points.\n",
" - **Approachable:** Write as if speaking to a friend, using slightly informal language and contractions.\n",
" - **Creative:** Think outside the box for descriptions and calls to action.\n",
" - Avoid sounding childish or overly silly.\n",
"\"\"\"\n",
"\n",
"# For Aggression\n",
"tone_description_aggression = \"\"\"\n",
" The tone should be:\n",
" - **Bold and Assertive:** Use strong, direct language that conveys confidence and power.\n",
" - **Challenging:** Pose questions that make the reader reconsider their current solutions.\n",
" - **Urgent:** Imply a need for immediate action and emphasize competitive advantages.\n",
" - **Direct and Punchy:** Employ short, impactful sentences and strong calls to action.\n",
" - **Dominant:** Position the company as a leader and a force to be reckoned with.\n",
" - Avoid being rude, offensive, or overly hostile. Focus on competitive intensity.\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 66,
"id": "83dd8aec-f74f-452b-90cc-3ad5bc903037",
"metadata": {},
"outputs": [],
"source": [
"def stream_brochure(company_name, url, model, tone):\n",
" prompt = f\"Please generate a company brochure for {company_name} that embodies the following tone and style guidelines: {tone}. Here is their landing page:\\n\"\n",
" prompt += Website(url).get_contents()\n",
" if model==\"llama\":\n",
" result = stream_llama(prompt)\n",
" elif model==\"deepseek\":\n",
" result = stream_deepseek(prompt)\n",
" else:\n",
" raise ValueError(\"Unknown model\")\n",
" yield from result"
]
},
{
"cell_type": "code",
"execution_count": 67,
"id": "ef1a246f-a3f7-457e-a85c-2076b407f52a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"* Running on local URL: http://127.0.0.1:7890\n",
"* To create a public link, set `share=True` in `launch()`.\n"
]
},
{
"data": {
"text/html": [
"<div><iframe src=\"http://127.0.0.1:7890/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": []
},
"execution_count": 67,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"view = gr.Interface(\n",
" fn=stream_brochure,\n",
" inputs=[\n",
" gr.Textbox(label=\"Company name:\"),\n",
" gr.Textbox(label=\"Landing page URL including http:// or https://\"),\n",
" gr.Dropdown([\"llama\", \"deepseek\"], label=\"Select model\"),\n",
" gr.Dropdown([\"tone_description_fun\", \"tone_description_aggression\"])],\n",
" outputs=[gr.Markdown(label=\"Brochure:\")],\n",
" \n",
" flagging_mode=\"never\"\n",
")\n",
"view.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0659a1dc-a00b-4cbf-b5ed-d6661fbb57f2",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}