💡 Pro tip: Ask specific questions like \"How do I create a button component?\" for the best results!
"""
)
@@ -362,14 +372,15 @@ with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(),css=cu
# Example questions
- gr.Markdown(
+ gr.HTML(
"""
-
-
🚀 Quick Start Questions
-
Click any question below to get started instantly!
+
+
🚀 Quick Start Questions
+
Click any question below to get started instantly!
"""
)
+
with gr.Row():
example_btns = [
gr.Button(
@@ -396,7 +407,6 @@ with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(),css=cu
# Your components with simple styling
chatbot = gr.Chatbot(
- # value=[],
type="messages",
height=400,
placeholder="Ask me anything about Figma! For example: 'How do I create a component?' or 'What are frames in Figma?'",
@@ -409,20 +419,6 @@ with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(),css=cu
elem_classes=["styled-audio"]
)
-
-
-
-
-
-
-
-
-
-
-
- last_response = gr.State("")
-
-
last_response = gr.State("")
current_model = gr.State("OpenAI (GPT-3.5)")
@@ -485,9 +481,4 @@ with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(),css=cu
)
# Launch the app
-# if __name__ == "__main__":
- demo.launch(
- share=True,
- # server_name="0.0.0.0",
- # server_port=7860
- )
\ No newline at end of file
+demo.launch(share=True)
\ No newline at end of file
diff --git a/week2/community-contributions/day_5_onboarding_assistance.ipynb b/week2/community-contributions/day_5_onboarding_assistance.ipynb
new file mode 100644
index 0000000..943d69c
--- /dev/null
+++ b/week2/community-contributions/day_5_onboarding_assistance.ipynb
@@ -0,0 +1,526 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "dc49e5ae",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from openai import OpenAI\n",
+ "from dotenv import load_dotenv\n",
+ "import os\n",
+ "load_dotenv()\n",
+ "import gradio as gr\n",
+ "import base64\n",
+ "from io import BytesIO\n",
+ "from PIL import Image\n",
+ "from IPython.display import Audio, display\n",
+ "import google.generativeai\n",
+ "import anthropic\n",
+ "\n",
+ "client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
+ "\n",
+ "# Configure Gemini\n",
+ "google.generativeai.configure(api_key=os.getenv(\"GOOGLE_API_KEY\"))\n",
+ "\n",
+ "# Configure Claude\n",
+ "claude = anthropic.Anthropic(api_key=os.getenv(\"ANTHROPIC_API_KEY\"))\n",
+ "openAI_model = \"gpt-3.5-turbo\"\n",
+ "gemini_model = \"gemini-2.0-flash\"\n",
+ "claude_model = \"claude-sonnet-4-20250514\"\n",
+ "openai_audio_model = \"tts-1\"\n",
+ "\n",
+ "# Figma onboarding knowledge base\n",
+ "FIGMA_KNOWLEDGE = \"\"\"\n",
+ "You are a helpful Figma onboarding assistant. You help new users learn Figma's core features and workflows.\n",
+ "\n",
+ "Key Figma concepts to help users with:\n",
+ "- Interface overview (toolbar, layers panel, properties panel)\n",
+ "- Creating and editing frames\n",
+ "- Working with shapes, text, and components\n",
+ "- Using the pen tool for custom shapes\n",
+ "- Auto Layout for responsive designs\n",
+ "- Components and variants\n",
+ "- Prototyping and interactions\n",
+ "- Collaboration features\n",
+ "- Design systems and libraries\n",
+ "- Exporting assets\n",
+ "- Keyboard shortcuts\n",
+ "\n",
+ "Always provide clear, step-by-step instructions and mention relevant keyboard shortcuts when applicable.\n",
+ "\"\"\"\n",
+ "\n",
+ "promts = {\n",
+ " \"Charlie\": FIGMA_KNOWLEDGE\n",
+ "}\n",
+ "\n",
+ "def truncate_for_tts(text, max_length=4000):\n",
+ " \"\"\"Truncate text for TTS while preserving complete sentences\"\"\"\n",
+ " if len(text) <= max_length:\n",
+ " return text\n",
+ " \n",
+ " # Try to truncate at sentence boundaries\n",
+ " sentences = text.split('. ')\n",
+ " truncated = \"\"\n",
+ " \n",
+ " for sentence in sentences:\n",
+ " if len(truncated + sentence + '. ') <= max_length:\n",
+ " truncated += sentence + '. '\n",
+ " else:\n",
+ " break\n",
+ " \n",
+ " # If we couldn't fit any complete sentences, just truncate hard\n",
+ " if not truncated.strip():\n",
+ " truncated = text[:max_length-10] + \"...\"\n",
+ " \n",
+ " return truncated.strip()\n",
+ "\n",
+ "def talker_openai(message):\n",
+ " \"\"\"Generate audio from text using OpenAI TTS\"\"\"\n",
+ " try:\n",
+ " # Truncate message for TTS\n",
+ " truncated_message = truncate_for_tts(message)\n",
+ " \n",
+ " response = client.audio.speech.create(\n",
+ " model=\"tts-1\",\n",
+ " voice=\"onyx\",\n",
+ " input=truncated_message\n",
+ " )\n",
+ "\n",
+ " audio_stream = BytesIO(response.content)\n",
+ " output_filename = \"output_audio_openai.mp3\"\n",
+ " with open(output_filename, \"wb\") as f:\n",
+ " f.write(audio_stream.read())\n",
+ "\n",
+ " return output_filename\n",
+ " except Exception as e:\n",
+ " print(f\"Error generating audio with OpenAI: {str(e)}\")\n",
+ " return None\n",
+ "\n",
+ "def talker(message, model_choice):\n",
+ " \"\"\"Generate audio from text using selected model\"\"\"\n",
+ " return talker_openai(message)\n",
+ "\n",
+ "def get_figma_help_openai(user_question, chat_history):\n",
+ " \"\"\"Get Figma onboarding assistance using OpenAI\"\"\"\n",
+ " try:\n",
+ " messages = [\n",
+ " {\"role\": \"system\", \"content\": FIGMA_KNOWLEDGE}\n",
+ " ]\n",
+ " \n",
+ " # Convert messages format chat history to OpenAI format\n",
+ " for msg in chat_history:\n",
+ " if msg[\"role\"] == \"user\":\n",
+ " messages.append({\"role\": \"user\", \"content\": msg[\"content\"]})\n",
+ " elif msg[\"role\"] == \"assistant\":\n",
+ " messages.append({\"role\": \"assistant\", \"content\": msg[\"content\"]})\n",
+ " \n",
+ " messages.append({\"role\": \"user\", \"content\": user_question})\n",
+ " \n",
+ " response = client.chat.completions.create(\n",
+ " model=openAI_model,\n",
+ " messages=messages,\n",
+ " max_tokens=500,\n",
+ " temperature=0.7\n",
+ " )\n",
+ " return response.choices[0].message.content\n",
+ " \n",
+ " except Exception as e:\n",
+ " return f\"Sorry, I encountered an error with OpenAI: {str(e)}\"\n",
+ "\n",
+ "def get_figma_help_gemini(user_question, chat_history):\n",
+ " \"\"\"Get Figma onboarding assistance using Gemini\"\"\"\n",
+ " try:\n",
+ " gemini = google.generativeai.GenerativeModel(\n",
+ " model_name=gemini_model,\n",
+ " system_instruction=FIGMA_KNOWLEDGE,\n",
+ " )\n",
+ " \n",
+ " # Build conversation context from messages format\n",
+ " conversation_context = \"\"\n",
+ " for msg in chat_history:\n",
+ " if msg[\"role\"] == \"user\":\n",
+ " conversation_context += f\"User: {msg['content']}\\n\"\n",
+ " elif msg[\"role\"] == \"assistant\":\n",
+ " conversation_context += f\"Assistant: {msg['content']}\\n\\n\"\n",
+ " \n",
+ " message = conversation_context + f\"User: {user_question}\"\n",
+ " response = gemini.generate_content(message)\n",
+ " reply = response.text\n",
+ " return reply\n",
+ " \n",
+ " except Exception as e:\n",
+ " return f\"Sorry, I encountered an error with Gemini: {str(e)}\"\n",
+ "\n",
+ "def get_figma_help_claude(user_question, chat_history):\n",
+ " \"\"\"Get Figma onboarding assistance using Claude\"\"\"\n",
+ " try:\n",
+ " # Convert messages format to Claude format\n",
+ " claude_messages = []\n",
+ " for msg in chat_history:\n",
+ " if msg[\"role\"] == \"user\":\n",
+ " claude_messages.append({\"role\": \"user\", \"content\": msg[\"content\"]})\n",
+ " elif msg[\"role\"] == \"assistant\":\n",
+ " claude_messages.append({\"role\": \"assistant\", \"content\": msg[\"content\"]})\n",
+ " \n",
+ " # Add the current question\n",
+ " claude_messages.append({\"role\": \"user\", \"content\": user_question})\n",
+ " \n",
+ " response = claude.messages.create(\n",
+ " model=claude_model,\n",
+ " max_tokens=500,\n",
+ " temperature=0.7,\n",
+ " system=promts[\"Charlie\"],\n",
+ " messages=claude_messages,\n",
+ " )\n",
+ " reply = response.content[0].text\n",
+ " return reply\n",
+ " \n",
+ " except Exception as e:\n",
+ " return f\"Sorry, I encountered an error with Claude: {str(e)}\"\n",
+ "\n",
+ "def respond(message, chat_history, model_choice):\n",
+ " if not message.strip():\n",
+ " return \"\", chat_history, \"\", model_choice\n",
+ " \n",
+ " bot_message = get_figma_help(message, chat_history, model_choice)\n",
+ " \n",
+ " # Add user message and bot response in messages format\n",
+ " new_history = chat_history + [\n",
+ " {\"role\": \"user\", \"content\": message},\n",
+ " {\"role\": \"assistant\", \"content\": bot_message}\n",
+ " ]\n",
+ " \n",
+ " return \"\", new_history, bot_message, model_choice\n",
+ "\n",
+ "def clear_chat():\n",
+ " \"\"\"Clear the chat history\"\"\"\n",
+ " return [], \"\", None\n",
+ "\n",
+ "def get_figma_help(user_question, chat_history, model_choice):\n",
+ " \"\"\"Get Figma onboarding assistance using selected model\"\"\"\n",
+ " if model_choice == \"OpenAI (GPT-3.5)\":\n",
+ " return get_figma_help_openai(user_question, chat_history)\n",
+ " elif model_choice == \"Google Gemini (2.0 Flash)\":\n",
+ " return get_figma_help_gemini(user_question, chat_history)\n",
+ " elif model_choice == \"Claude (Sonnet 4)\":\n",
+ " return get_figma_help_claude(user_question, chat_history)\n",
+ " else:\n",
+ " return \"Please select a valid model.\"\n",
+ "\n",
+ "custom_css = \"\"\"\n",
+ "/* Chat area styling */\n",
+ ".styled-chat {\n",
+ " border-radius: 15px !important;\n",
+ " box-shadow: 0 4px 12px var(--shadow-color) !important;\n",
+ " border: 1px solid var(--border-color) !important;\n",
+ " padding: 10px;\n",
+ "}\n",
+ "\n",
+ "/* Audio player styling */\n",
+ ".styled-audio {\n",
+ " border-radius: 15px !important;\n",
+ " box-shadow: 0 4px 12px var(--shadow-color) !important;\n",
+ " border: 10px solid var(--block-background-fill) !important;\n",
+ " padding: 10px;\n",
+ " background-color: var(--background-fill-secondary) !important;\n",
+ "}\n",
+ "\n",
+ "/* Header styling */\n",
+ ".header-container {\n",
+ " text-align: center;\n",
+ " padding: 20px;\n",
+ " background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);\n",
+ " border-radius: 15px;\n",
+ " margin-bottom: 20px;\n",
+ "}\n",
+ "\n",
+ ".header-title {\n",
+ " color: white;\n",
+ " margin: 0;\n",
+ " font-size: 2.5em;\n",
+ "}\n",
+ "\n",
+ ".header-subtitle {\n",
+ " color: #f0f0f0;\n",
+ " margin: 10px 0 0 0;\n",
+ " font-size: 1.2em;\n",
+ "}\n",
+ "\n",
+ "/* Features section styling */\n",
+ ".features-container {\n",
+ " background: #f8f9fa;\n",
+ " padding: 20px;\n",
+ " border-radius: 10px;\n",
+ " border-left: 4px solid #667eea;\n",
+ "}\n",
+ "\n",
+ ".features-title {\n",
+ " color: #333;\n",
+ " margin-top: 0;\n",
+ "}\n",
+ "\n",
+ ".features-grid {\n",
+ " display: grid;\n",
+ " grid-template-columns: 1fr 1fr;\n",
+ " gap: 15px;\n",
+ " margin-top: 15px;\n",
+ "}\n",
+ "\n",
+ ".feature-item {\n",
+ " color: #333;\n",
+ " margin: 10px 0;\n",
+ "}\n",
+ "\n",
+ ".feature-title {\n",
+ " color: #667eea;\n",
+ "}\n",
+ "\n",
+ ".feature-description {\n",
+ " color: #666;\n",
+ "}\n",
+ "\n",
+ "/* Pro tip styling */\n",
+ ".protip-container {\n",
+ " text-align: center;\n",
+ " margin-top: 20px;\n",
+ " padding: 15px;\n",
+ " background: #e8f4f8;\n",
+ " border-radius: 8px;\n",
+ "}\n",
+ "\n",
+ ".protip-text {\n",
+ " margin: 0;\n",
+ " color: #2c5aa0 !important;\n",
+ " font-weight: 500;\n",
+ "}\n",
+ "\n",
+ "/* Quick start questions styling */\n",
+ ".quickstart-container {\n",
+ " background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);\n",
+ " padding: 15px 20px;\n",
+ " border-radius: 10px;\n",
+ " margin: 20px 0;\n",
+ "}\n",
+ "\n",
+ ".quickstart-title {\n",
+ " color: white !important;\n",
+ " margin: 0;\n",
+ " font-size: 1.3em;\n",
+ " text-align: center;\n",
+ "}\n",
+ "\n",
+ ".quickstart-subtitle {\n",
+ " color: #f0f8ff !important;\n",
+ " margin: 5px 0 0 0;\n",
+ " text-align: center;\n",
+ " font-size: 0.9em;\n",
+ "}\n",
+ "\"\"\"\n",
+ "\n",
+ "# Create Gradio interface\n",
+ "with gr.Blocks(title=\"Figma Onboarding Assistant\", theme=gr.themes.Soft(), css=custom_css) as demo:\n",
+ " gr.HTML(\n",
+ " \"\"\"\n",
+ " \n",
+ " \n",
+ "
\n",
+ "
✨ What I can help you with:
\n",
+ "
\n",
+ "
\n",
+ "
🚀 Getting Started
\n",
+ " Interface overview, basic navigation
\n",
+ "
🛠️ Tools & Features
\n",
+ " Pen tool, shapes, text, layers
\n",
+ "
📐 Auto Layout
\n",
+ " Responsive design techniques
\n",
+ "
🔗 Prototyping
\n",
+ " Interactions and animations
\n",
+ "
\n",
+ "
\n",
+ "
🧩 Components
\n",
+ " Creating reusable elements
\n",
+ "
👥 Collaboration
\n",
+ " Sharing and team workflows
\n",
+ "
📚 Design Systems
\n",
+ " Libraries and style guides
\n",
+ "
⚡ Shortcuts
\n",
+ " Productivity tips and tricks
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
💡 Pro tip: Ask specific questions like \"How do I create a button component?\" for the best results!
\n",
+ "
\n",
+ " \"\"\"\n",
+ " )\n",
+ " \n",
+ " # Model selection dropdown\n",
+ " model_dropdown = gr.Dropdown(\n",
+ " choices=[\"OpenAI (GPT-3.5)\", \"Google Gemini (2.0 Flash)\", \"Claude (Sonnet 4)\"],\n",
+ " value=\"OpenAI (GPT-3.5)\",\n",
+ " label=\"Select AI Model\",\n",
+ " info=\"Choose which AI model to use for responses\"\n",
+ " )\n",
+ " \n",
+ " with gr.Row():\n",
+ " msg = gr.Textbox(\n",
+ " placeholder=\"Type your Figma question here...\",\n",
+ " container=False,\n",
+ " scale=4\n",
+ " )\n",
+ " submit_btn = gr.Button(\"Ask\", scale=1, variant=\"primary\")\n",
+ " clear_btn = gr.Button(\"Clear Chat\", scale=1)\n",
+ " audio_btn = gr.Button(\"🔊 Play Audio\", scale=1, variant=\"secondary\")\n",
+ " clear_audio_btn = gr.Button(\"🔇 Clear Audio\", scale=1, variant=\"secondary\")\n",
+ " \n",
+ "\n",
+ " # Example questions\n",
+ " gr.HTML(\n",
+ " \"\"\"\n",
+ "
\n",
+ "
🚀 Quick Start Questions
\n",
+ "
Click any question below to get started instantly!
\n",
+ "
\n",
+ " \"\"\"\n",
+ " )\n",
+ " \n",
+ " with gr.Row():\n",
+ " example_btns = [\n",
+ " gr.Button(\n",
+ " \"How do I create my first frame?\", \n",
+ " size=\"sm\",\n",
+ " variant=\"secondary\"\n",
+ " ),\n",
+ " gr.Button(\n",
+ " \"What's the difference between components and instances?\", \n",
+ " size=\"sm\",\n",
+ " variant=\"secondary\"\n",
+ " ),\n",
+ " gr.Button(\n",
+ " \"How do I use Auto Layout?\", \n",
+ " size=\"sm\",\n",
+ " variant=\"secondary\"\n",
+ " ),\n",
+ " gr.Button(\n",
+ " \"How do I create a prototype?\", \n",
+ " size=\"sm\",\n",
+ " variant=\"secondary\"\n",
+ " )\n",
+ " ]\n",
+ "\n",
+ " # Your components with simple styling\n",
+ " chatbot = gr.Chatbot(\n",
+ " type=\"messages\",\n",
+ " height=400,\n",
+ " placeholder=\"Ask me anything about Figma! For example: 'How do I create a component?' or 'What are frames in Figma?'\",\n",
+ " elem_classes=[\"styled-chat\"]\n",
+ " )\n",
+ "\n",
+ " audio_output = gr.Audio(\n",
+ " label=\"Audio Response\",\n",
+ " visible=True,\n",
+ " elem_classes=[\"styled-audio\"]\n",
+ " )\n",
+ "\n",
+ " last_response = gr.State(\"\")\n",
+ " current_model = gr.State(\"OpenAI (GPT-3.5)\")\n",
+ " \n",
+ " def respond(message, chat_history, model_choice):\n",
+ " if not message.strip():\n",
+ " return \"\", chat_history, \"\", model_choice\n",
+ " \n",
+ " bot_message = get_figma_help(message, chat_history, model_choice)\n",
+ " new_history = chat_history + [\n",
+ " {\"role\": \"user\", \"content\": message},\n",
+ " {\"role\": \"assistant\", \"content\": bot_message}]\n",
+ " return \"\", new_history, bot_message, model_choice\n",
+ " \n",
+ " def play_audio(last_message, model_choice):\n",
+ " if last_message:\n",
+ " audio_file = talker(last_message, model_choice)\n",
+ " if audio_file:\n",
+ " return audio_file\n",
+ " return None\n",
+ " \n",
+ " def clear_audio():\n",
+ " \"\"\"Clear the audio output\"\"\"\n",
+ " return None\n",
+ " \n",
+ " def use_example(example_text):\n",
+ " return example_text\n",
+ " \n",
+ " # Set up interactions\n",
+ " submit_btn.click(\n",
+ " respond, \n",
+ " inputs=[msg, chatbot, model_dropdown], \n",
+ " outputs=[msg, chatbot, last_response, current_model]\n",
+ " )\n",
+ " msg.submit(\n",
+ " respond, \n",
+ " inputs=[msg, chatbot, model_dropdown], \n",
+ " outputs=[msg, chatbot, last_response, current_model]\n",
+ " )\n",
+ " clear_btn.click(clear_chat, outputs=[chatbot, msg, last_response])\n",
+ " \n",
+ " # Audio button functionality - now uses selected model\n",
+ " audio_btn.click(\n",
+ " play_audio,\n",
+ " inputs=[last_response, current_model],\n",
+ " outputs=[audio_output]\n",
+ " )\n",
+ " \n",
+ " # Clear audio button functionality\n",
+ " clear_audio_btn.click(\n",
+ " clear_audio,\n",
+ " outputs=[audio_output]\n",
+ " )\n",
+ " \n",
+ " # Example button clicks\n",
+ " for i, btn in enumerate(example_btns):\n",
+ " btn.click(\n",
+ " use_example,\n",
+ " inputs=[btn],\n",
+ " outputs=[msg]\n",
+ " )\n",
+ "\n",
+ "# Launch the app\n",
+ "demo.launch(share=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "90b29a7d-aec8-49d2-83c7-3e3ab96c47e1",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "venv",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.13"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}