added 2 day ollama option

This commit is contained in:
Kartik Sharma
2025-06-12 23:12:51 +05:30
parent 3961de5391
commit 2831534b29

View File

@@ -0,0 +1,344 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 13,
"id": "0abc4dde-8396-4867-bf48-a534c7ae89c0",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [],
"source": [
"# imports\n",
"\n",
"import os\n",
"from dotenv import load_dotenv\n",
"from openai import OpenAI\n",
"from IPython.display import Markdown, display"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "92e9e303-9c2b-4e4b-a210-829a31d975e5",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"API key found and looks good so far!\n"
]
}
],
"source": [
"# Load environment variables in a file called .env\n",
"\n",
"load_dotenv()\n",
"api_key = os.getenv('OPENAI_API_KEY')\n",
"\n",
"# Check the key\n",
"\n",
"if not api_key:\n",
" print(\"No API key was found - please head over to the troubleshooting notebook in this folder to identify & fix!\")\n",
"elif not api_key.startswith(\"sk-proj-\"):\n",
" print(\"An API key was found, but it doesn't start sk-proj-; please check you're using the right key - see troubleshooting notebook\")\n",
"elif api_key.strip() != api_key:\n",
" print(\"An API key was found, but it looks like it might have space or tab characters at the start or end - please remove them - see troubleshooting notebook\")\n",
"else:\n",
" print(\"API key found and looks good so far!\")"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "7c8da66c-96b6-45fc-8f5d-e8314fcc9352",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [],
"source": [
"openai = OpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 50,
"id": "3cb23b83-a4d2-474d-9ea3-24a5530d9768",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠋ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠙ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠹ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠸ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠼ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠴ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠦ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠧ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠇ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠏ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest ⠋ \u001b[K\u001b[?25h\u001b[?2026l\u001b[?2026h\u001b[?25l\u001b[1Gpulling manifest \u001b[K\n",
"pulling dde5aa3fc5ff: 100% ▕██████████████████▏ 2.0 GB \u001b[K\n",
"pulling 966de95ca8a6: 100% ▕██████████████████▏ 1.4 KB \u001b[K\n",
"pulling fcc5a6bec9da: 100% ▕██████████████████▏ 7.7 KB \u001b[K\n",
"pulling a70ff7e570d9: 100% ▕██████████████████▏ 6.0 KB \u001b[K\n",
"pulling 56bb8bd477a5: 100% ▕██████████████████▏ 96 B \u001b[K\n",
"pulling 34bb5ab01051: 100% ▕██████████████████▏ 561 B \u001b[K\n",
"verifying sha256 digest \u001b[K\n",
"writing manifest \u001b[K\n",
"success \u001b[K\u001b[?25h\u001b[?2026l\n"
]
}
],
"source": [
"# Let's just make sure the model is loaded\n",
"!ollama pull llama3.2\n",
"import ollama\n"
]
},
{
"cell_type": "code",
"execution_count": 52,
"id": "30681517-8acd-49f0-b3c8-ab66854e0e4e",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [],
"source": [
"# System prompt - defines the AI's behavior\n",
"SYSTEM_PROMPT = \"\"\"You are a helpful cooking assistant that provides ingredient lists for recipes.\n",
"Format your response as clean markdown with this structure:\n",
"\n",
"# [Dish Name]\n",
"**Serves:** [number] people \n",
"**Cook Time:** [estimated time]\n",
"\n",
"## Shopping List\n",
"- [ ] [amount] [unit] [ingredient]\n",
"- [ ] [amount] [unit] [ingredient]\n",
"\n",
"Guidelines:\n",
"- Use common grocery store measurements (cups, lbs, oz, pieces, cans, etc.)\n",
"- Round to practical shopping amounts (1.5 lbs instead of 1.47 lbs)\n",
"- Group similar items when logical (all spices together)\n",
"- Include pantry staples only if they're essential (salt, oil, etc.)\n",
"- Assume basic seasonings are available unless recipe-specific\n",
"- For produce, specify size when important (large onion, medium tomatoes)\n",
"- Keep optional items at the end of similar item groups or end of the list\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 59,
"id": "0d3abbd9-d0eb-434a-b382-f28d8c7ca9ea",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [],
"source": [
"def get_recipe_openai(dish_name: str, num_people: int):\n",
" \"\"\"Get scaled recipe ingredients using system and user prompts\"\"\"\n",
"\n",
" user_prompt = f\"Give me the ingredients needed to make {dish_name} for {num_people} people.\"\n",
" \n",
" try:\n",
" response = openai.chat.completions.create(\n",
" model=\"gpt-4o-mini\",\n",
" messages=[\n",
" {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ],\n",
" max_tokens=400\n",
" )\n",
" \n",
" return response.choices[0].message.content\n",
" \n",
" except Exception as e:\n",
" return f\"❌ Error: Failed to get recipe - {str(e)}\""
]
},
{
"cell_type": "code",
"execution_count": 60,
"id": "b7a6f70b-91ab-414f-b6d7-265c0d4dcd74",
"metadata": {},
"outputs": [],
"source": [
"OLLAMA_MODEL = \"llama3.2\""
]
},
{
"cell_type": "code",
"execution_count": 61,
"id": "badf702d-f1c1-4294-9aa2-63e5737ae8df",
"metadata": {},
"outputs": [],
"source": [
"def get_recipe_ollama(dish_name: str, num_people: int):\n",
" \"\"\"Get recipe using Ollama API\"\"\"\n",
" user_prompt = f\"Give me the ingredients needed to make {dish_name} for {num_people} people.\"\n",
" \n",
" messages = [\n",
" {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n",
" {\"role\": \"user\", \"content\": user_prompt}\n",
" ]\n",
" \n",
" try:\n",
" response = ollama.chat(model=OLLAMA_MODEL, messages=messages)\n",
" return response['message']['content']\n",
" except Exception as e:\n",
" return f\"❌ Ollama Error: {str(e)}\""
]
},
{
"cell_type": "code",
"execution_count": 62,
"id": "c2281db6-a4c7-4749-9aa9-d9eaf461dd7e",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [],
"source": [
"def print_shopping_list(recipe_markdown):\n",
" \"\"\"Print the markdown response\"\"\"\n",
" display(Markdown(recipe_markdown))"
]
},
{
"cell_type": "code",
"execution_count": 63,
"id": "dd1c13fb-0836-423d-aa9a-2ff433e5f916",
"metadata": {
"editable": true,
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"🍳 Recipe Scaler & Grocery List Maker\n",
"========================================\n"
]
},
{
"name": "stdin",
"output_type": "stream",
"text": [
"\n",
"Choose AI service (1 for OpenAI, 2 for Ollama): 1\n",
"What dish do you want to make? macaroni pasta\n",
"How many people? 5\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"🔍 Getting recipe for macaroni pasta...\n",
"Using OpenAI API...\n"
]
},
{
"data": {
"text/markdown": [
"# Macaroni Pasta\n",
"**Serves:** 5 people \n",
"**Cook Time:** 15 minutes\n",
"\n",
"## Shopping List\n",
"- [ ] 1 lb macaroni pasta\n",
"- [ ] 4 cups water (for boiling)\n",
"- [ ] 2 tablespoons salt (for boiling water)\n",
"\n",
"### Optional Ingredients (for serving)\n",
"- [ ] 1 cup shredded cheese\n",
"- [ ] 1/2 cup milk\n",
"- [ ] 1/4 cup butter\n",
"- [ ] Black pepper to taste"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"print(\"🍳 Recipe Scaler & Grocery List Maker\")\n",
"print(\"=\" * 40)\n",
" \n",
"ai_service_choice = input(\"\\nChoose AI service (1 for OpenAI, 2 for Ollama): \").strip()\n",
"\n",
"dish = input(\"What dish do you want to make? \")\n",
"num_people = int(input(\"How many people? \"))\n",
" \n",
"print(f\"\\n🔍 Getting recipe for {dish}...\")\n",
" \n",
"# Get and display recipe\n",
"if ai_service_choice == '1':\n",
" print(\"Using OpenAI API...\")\n",
" recipe_markdown = get_recipe_openai(dish, num_people)\n",
"else:\n",
" print(\"Using Ollama (local)...\")\n",
" recipe_markdown = get_recipe_ollama(dish, num_people)\n",
"\n",
"print_shopping_list(recipe_markdown)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "90338d50-679f-4388-933c-c77b1d0da5a1",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}