From 6b1f3b6120b01678396fbfc79b74d0ce1fdc36e7 Mon Sep 17 00:00:00 2001 From: abdoulrasheed Date: Mon, 20 Oct 2025 20:36:32 +0100 Subject: [PATCH] W2: Wellness Companion --- .../abdoul/week_two_exercise.ipynb | 345 ++++++++++++++++++ 1 file changed, 345 insertions(+) create mode 100644 community-contributions/abdoul/week_two_exercise.ipynb diff --git a/community-contributions/abdoul/week_two_exercise.ipynb b/community-contributions/abdoul/week_two_exercise.ipynb new file mode 100644 index 0000000..f61fa30 --- /dev/null +++ b/community-contributions/abdoul/week_two_exercise.ipynb @@ -0,0 +1,345 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b9633a89", + "metadata": {}, + "source": [ + "# Adaptive Wellness Companion\n", + "\n", + "This experience pairs conversation, tailored wellness plans, DALLE imagery, and TTS audio into a single Gradio interface." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "e615aae1", + "metadata": {}, + "outputs": [], + "source": [ + "# Core imports for the companion\n", + "import os\n", + "import json\n", + "import base64\n", + "from io import BytesIO\n", + "from pathlib import Path\n", + "from tempfile import NamedTemporaryFile\n", + "\n", + "from openai import OpenAI\n", + "import gradio as gr\n", + "from PIL import Image\n", + "from dotenv import load_dotenv" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "107e0313", + "metadata": {}, + "outputs": [], + "source": [ + "# Create the OpenAI client and validate configuration\n", + "load_dotenv()\n", + "if not os.getenv(\"OPENAI_API_KEY\"):\n", + " raise RuntimeError(\"Set OPENAI_API_KEY before running the wellness companion.\")\n", + "\n", + "client = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "6818218b", + "metadata": {}, + "outputs": [], + "source": [ + "# Model constants and system persona\n", + "MODEL = \"gpt-4o-mini\"\n", + "IMAGE_MODEL = \"dall-e-3\"\n", + "VOICE_MODEL = \"gpt-4o-mini-tts\"\n", + "\n", + "system_message = (\n", + " \"You are an upbeat adaptive wellness coach. \"\n", + " \"Blend evidence-backed guidance with empathy, tailor plans \"\n", + " \"to the user's mood, energy, and stress, and explain reasoning concisely.\"\n", + ")\n", + "\n", + "tools = [\n", + " {\n", + " \"type\": \"function\",\n", + " \"function\": {\n", + " \"name\": \"get_wellness_plan\",\n", + " \"description\": \"Build a wellness micro-plan keyed to the user's current state.\",\n", + " \"parameters\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"mood\": {\"type\": \"string\", \"description\": \"How the user currently feels.\"},\n", + " \"energy\": {\"type\": \"string\", \"description\": \"Low, medium, or high energy.\"},\n", + " \"stress\": {\"type\": \"string\", \"description\": \"Stress intensity words like calm or overwhelmed.\"},\n", + " \"focus_goal\": {\"type\": \"string\", \"description\": \"What the user needs help focusing on right now.\"}\n", + " },\n", + " \"required\": [\"mood\", \"energy\", \"stress\", \"focus_goal\"]\n", + " }\n", + " }\n", + " }\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "a98e466e", + "metadata": {}, + "outputs": [], + "source": [ + "# Tool backends that the model can call during chat\n", + "def get_wellness_plan(mood: str, energy: str, stress: str, focus_goal: str) -> str:\n", + " energy = energy.lower()\n", + " stress = stress.lower()\n", + " palette = \"calming watercolor\"\n", + " movement = \"gentle mobility flow\"\n", + " breathing = \"box breathing (4-4-4-4)\"\n", + " journaling = \"List three small wins and one supportive next step.\"\n", + "\n", + " if \"high\" in energy:\n", + " movement = \"energizing interval walk with posture resets\"\n", + " breathing = \"alternate nostril breathing to balance focus\"\n", + " elif \"low\" in energy:\n", + " movement = \"floor-based decompression stretches\"\n", + "\n", + " if \"over\" in stress or \"anx\" in stress:\n", + " palette = \"soothing pastel sanctuary\"\n", + " breathing = \"4-7-8 breathing to downshift the nervous system\"\n", + " elif \"calm\" in stress:\n", + " palette = \"sunlit studio with optimistic accents\"\n", + "\n", + " focus_goal = focus_goal.strip() or \"refocus\"\n", + "\n", + " plan = {\n", + " \"headline\": \"Adaptive wellness reset\",\n", + " \"visual_theme\": f\"{palette} inspired by {mood}\",\n", + " \"movement\": movement,\n", + " \"breathing\": breathing,\n", + " \"reflection\": f\"Prompt: {journaling}\",\n", + " \"focus_affirmation\": f\"Affirmation: You have the capacity to handle {focus_goal} with grace.\"\n", + " }\n", + " return json.dumps(plan)\n", + "\n", + "tool_registry = {\"get_wellness_plan\": get_wellness_plan}" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "7f0be3e0", + "metadata": {}, + "outputs": [], + "source": [ + "# Multimodal helpers: text-to-speech and imagery\n", + "def talker(message: str) -> str | None:\n", + " if not message:\n", + " return None\n", + " try:\n", + " with client.audio.speech.with_streaming_response.create(\n", + " model=VOICE_MODEL,\n", + " voice=\"alloy\",\n", + " input=message\n", + " ) as response:\n", + " temp_file = NamedTemporaryFile(suffix=\".mp3\", delete=False)\n", + " temp_path = temp_file.name\n", + " temp_file.close()\n", + " response.stream_to_file(temp_path)\n", + " return temp_path\n", + " except Exception as exc:\n", + " print(f\"[warn] audio synthesis unavailable: {exc}\")\n", + " return None\n", + "\n", + "def artist(theme: str) -> Image.Image | None:\n", + " if not theme:\n", + " return None\n", + " try:\n", + " prompt = (\n", + " f\"Immersive poster celebrating a wellness ritual, {theme}, \"\n", + " \"with hopeful lighting and inclusive representation.\"\n", + " )\n", + " response = client.images.generate(\n", + " model=IMAGE_MODEL,\n", + " prompt=prompt,\n", + " size=\"1024x1024\",\n", + " response_format=\"b64_json\"\n", + " )\n", + " image_base64 = response.data[0].b64_json\n", + " image_data = base64.b64decode(image_base64)\n", + " return Image.open(BytesIO(image_data))\n", + " except Exception as exc:\n", + " print(f\"[warn] image generation unavailable: {exc}\")\n", + " return None" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "3d6d9d2d", + "metadata": {}, + "outputs": [], + "source": [ + "# Conversation orchestration with tool calls, imagery, and audio\n", + "def handle_tool_calls_and_theme(message) -> tuple[list[dict], str | None]:\n", + " responses = []\n", + " theme = None\n", + " for tool_call in message.tool_calls or []:\n", + " if tool_call.function.name not in tool_registry:\n", + " continue\n", + " arguments = json.loads(tool_call.function.arguments)\n", + " result = tool_registry[tool_call.function.name](**arguments)\n", + " responses.append(\n", + " {\"role\": \"tool\", \"tool_call_id\": tool_call.id, \"content\": result}\n", + " )\n", + " payload = json.loads(result)\n", + " theme = theme or payload.get(\"visual_theme\")\n", + " return responses, theme\n", + "\n", + "def chat(history: list[dict]) -> tuple[list[dict], str | None, Image.Image | None]:\n", + " conversation = [{\"role\": item[\"role\"], \"content\": item[\"content\"]} for item in history]\n", + " messages = [{\"role\": \"system\", \"content\": system_message}] + conversation\n", + " response = client.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + " theme = None\n", + "\n", + " while response.choices[0].finish_reason == \"tool_calls\":\n", + " tool_message = response.choices[0].message\n", + " tool_responses, candidate_theme = handle_tool_calls_and_theme(tool_message)\n", + " if candidate_theme:\n", + " theme = candidate_theme\n", + " messages.append(tool_message)\n", + " messages.extend(tool_responses)\n", + " response = client.chat.completions.create(model=MODEL, messages=messages, tools=tools)\n", + "\n", + " reply = response.choices[0].message.content\n", + " updated_history = history + [{\"role\": \"assistant\", \"content\": reply}]\n", + " audio_path = talker(reply)\n", + " image = artist(theme)\n", + " print(image)\n", + " return updated_history, audio_path, image\n", + "\n", + "def put_message_in_chatbot(message: str, history: list[dict]) -> tuple[str, list[dict]]:\n", + " return \"\", history + [{\"role\": \"user\", \"content\": message}]" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "fb17fc4f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Gradio Blocks instance: 2 backend functions\n", + "-------------------------------------------\n", + "fn_index=0\n", + " inputs:\n", + " |-\n", + " |-\n", + " outputs:\n", + " |-\n", + " |-\n", + "fn_index=1\n", + " inputs:\n", + " |-\n", + " outputs:\n", + " |-\n", + " |-\n", + " |-" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Assemble the Gradio Blocks UI\n", + "with gr.Blocks(title=\"Adaptive Wellness Companion\") as wellness_ui:\n", + " gr.Markdown(\"### Tell me how you are doing and I'll craft a micro-plan.\")\n", + " with gr.Row():\n", + " chatbot = gr.Chatbot(height=420, type=\"messages\", label=\"Conversation\")\n", + " image_output = gr.Image(height=420, label=\"Visual Inspiration\")\n", + " audio_output = gr.Audio(label=\"Coach Audio\", autoplay=True)\n", + " mood_input = gr.Textbox(label=\"Share your update\", placeholder=\"e.g. Feeling drained after meetings\")\n", + "\n", + " mood_input.submit(\n", + " fn=put_message_in_chatbot,\n", + " inputs=[mood_input, chatbot],\n", + " outputs=[mood_input, chatbot]\n", + " ).then(\n", + " fn=chat,\n", + " inputs=chatbot,\n", + " outputs=[chatbot, audio_output, image_output]\n", + " )\n", + "\n", + "wellness_ui.queue()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "66bbe348", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "* Running on local URL: http://127.0.0.1:7860\n", + "* To create a public link, set `share=True` in `launch()`.\n" + ] + }, + { + "data": { + "text/html": [ + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Launch the interface inline when running in a notebook\n", + "wellness_ui.launch(inline=True, share=False, prevent_thread_lock=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}