diff --git a/week2/community-contributions/Figma_assistance/.github/workflows/update_space.yml b/week2/community-contributions/Figma_assistance/.github/workflows/update_space.yml
new file mode 100644
index 0000000..7e328a7
--- /dev/null
+++ b/week2/community-contributions/Figma_assistance/.github/workflows/update_space.yml
@@ -0,0 +1,28 @@
+name: Run Python script
+
+on:
+ push:
+ branches:
+ - figma_assistance
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.9'
+
+ - name: Install Gradio
+ run: python -m pip install gradio
+
+ - name: Log in to Hugging Face
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
+
+ - name: Deploy to Spaces
+ run: gradio deploy
diff --git a/week2/community-contributions/Figma_assistance/README.md b/week2/community-contributions/Figma_assistance/README.md
new file mode 100644
index 0000000..26a783b
--- /dev/null
+++ b/week2/community-contributions/Figma_assistance/README.md
@@ -0,0 +1,6 @@
+---
+title: Figma_assistance
+app_file: day_5_figma_assistance.py
+sdk: gradio
+sdk_version: 5.38.2
+---
diff --git a/week2/community-contributions/Figma_assistance/day_5_figma_assistance.py b/week2/community-contributions/Figma_assistance/day_5_figma_assistance.py
new file mode 100644
index 0000000..32cf8b8
--- /dev/null
+++ b/week2/community-contributions/Figma_assistance/day_5_figma_assistance.py
@@ -0,0 +1,493 @@
+from openai import OpenAI
+from dotenv import load_dotenv
+import os
+load_dotenv()
+import gradio as gr
+import base64
+from io import BytesIO
+from PIL import Image
+from IPython.display import Audio, display
+import google.generativeai
+import anthropic
+
+client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+
+# Configure Gemini
+google.generativeai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
+
+# Configure Claude
+claude = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
+openAI_model = "gpt-3.5-turbo"
+gemini_model = "gemini-2.0-flash"
+claude_model = "claude-sonnet-4-20250514"
+openai_audio_model = "tts-1"
+
+# Figma onboarding knowledge base
+FIGMA_KNOWLEDGE = """
+You are a helpful Figma onboarding assistant. You help new users learn Figma's core features and workflows.
+
+Key Figma concepts to help users with:
+- Interface overview (toolbar, layers panel, properties panel)
+- Creating and editing frames
+- Working with shapes, text, and components
+- Using the pen tool for custom shapes
+- Auto Layout for responsive designs
+- Components and variants
+- Prototyping and interactions
+- Collaboration features
+- Design systems and libraries
+- Exporting assets
+- Keyboard shortcuts
+
+Always provide clear, step-by-step instructions and mention relevant keyboard shortcuts when applicable.
+"""
+
+promts = {
+ "Charlie": FIGMA_KNOWLEDGE
+}
+
+def truncate_for_tts(text, max_length=4000):
+ """Truncate text for TTS while preserving complete sentences"""
+ if len(text) <= max_length:
+ return text
+
+ # Try to truncate at sentence boundaries
+ sentences = text.split('. ')
+ truncated = ""
+
+ for sentence in sentences:
+ if len(truncated + sentence + '. ') <= max_length:
+ truncated += sentence + '. '
+ else:
+ break
+
+ # If we couldn't fit any complete sentences, just truncate hard
+ if not truncated.strip():
+ truncated = text[:max_length-10] + "..."
+
+ return truncated.strip()
+
+def talker_openai(message):
+ """Generate audio from text using OpenAI TTS"""
+ try:
+ # Truncate message for TTS
+ truncated_message = truncate_for_tts(message)
+
+ response = client.audio.speech.create(
+ model="tts-1",
+ voice="onyx",
+ input=truncated_message
+ )
+
+ audio_stream = BytesIO(response.content)
+ output_filename = "output_audio_openai.mp3"
+ with open(output_filename, "wb") as f:
+ f.write(audio_stream.read())
+
+ return output_filename
+ except Exception as e:
+ print(f"Error generating audio with OpenAI: {str(e)}")
+ return None
+
+# def talker_gemini(message):
+# """Generate audio from text using Gemini TTS"""
+# try:
+# # Try the newer Gemini 2.0 TTS API
+# model = google.generativeai.GenerativeModel(gemini_model)
+
+# # Truncate message for TTS
+# truncated_message = truncate_for_tts(message)
+
+# # Generate audio using Gemini with simplified config
+# response = model.generate_content(
+# truncated_message,
+# generation_config={
+# "response_modalities": ["AUDIO"]
+# }
+# )
+
+# # Check if response has audio data
+# if hasattr(response, 'audio_data') and response.audio_data:
+# output_filename = "output_audio_gemini.wav"
+# with open(output_filename, "wb") as f:
+# f.write(response.audio_data)
+# return output_filename
+# else:
+# print("Gemini response does not contain audio data")
+# raise Exception("No audio data in Gemini response")
+
+# except Exception as e:
+# print(f"Error generating audio with Gemini: {str(e)}")
+# print("Gemini TTS not available, using OpenAI TTS with different voice")
+# # Use OpenAI TTS but with a different voice to distinguish
+# try:
+# # Truncate message for TTS
+# truncated_message = truncate_for_tts(message)
+
+# response = client.audio.speech.create(
+# model="tts-1",
+# voice="alloy", # Different voice to indicate it's for Gemini responses
+# input=truncated_message
+# )
+# audio_stream = BytesIO(response.content)
+# output_filename = "output_audio_gemini_fallback.mp3"
+# with open(output_filename, "wb") as f:
+# f.write(audio_stream.read())
+# return output_filename
+# except Exception as fallback_error:
+# print(f"Fallback TTS also failed: {str(fallback_error)}")
+# return None
+
+# def talker_claude(message):
+# """Generate audio from text using Claude TTS (fallback to OpenAI)"""
+# try:
+# # Truncate message for TTS
+# truncated_message = truncate_for_tts(message)
+
+# # Claude doesn't have native TTS, so we'll use OpenAI TTS
+# # but with a different filename to distinguish
+# response = client.audio.speech.create(
+# model="tts-1",
+# voice="nova", # Different voice for Claude responses
+# input=truncated_message
+# )
+
+# audio_stream = BytesIO(response.content)
+# output_filename = "output_audio_claude.mp3"
+# with open(output_filename, "wb") as f:
+# f.write(audio_stream.read())
+
+# return output_filename
+# except Exception as e:
+# print(f"Error generating audio for Claude: {str(e)}")
+# return None
+
+def talker(message, model_choice):
+ """Generate audio from text using selected model"""
+ # if model_choice == "Google Gemini (2.0 Flash)":
+ # return talker_gemini(message)
+ # elif model_choice == "Claude (Sonnet 4)":
+ # return talker_claude(message)
+ # else:
+ return talker_openai(message)
+
+def get_figma_help_openai(user_question, chat_history):
+ """Get Figma onboarding assistance using OpenAI"""
+ try:
+ messages = [
+ {"role": "system", "content": FIGMA_KNOWLEDGE}
+ ]
+
+ # Convert messages format chat history to OpenAI format
+ for msg in chat_history:
+ if msg["role"] == "user":
+ messages.append({"role": "user", "content": msg["content"]})
+ elif msg["role"] == "assistant":
+ messages.append({"role": "assistant", "content": msg["content"]})
+
+ messages.append({"role": "user", "content": user_question})
+
+ response = client.chat.completions.create(
+ model=openAI_model,
+ messages=messages,
+ max_tokens=500,
+ temperature=0.7
+ )
+ return response.choices[0].message.content
+
+ except Exception as e:
+ return f"Sorry, I encountered an error with OpenAI: {str(e)}"
+
+def get_figma_help_gemini(user_question, chat_history):
+ """Get Figma onboarding assistance using Gemini"""
+ try:
+ gemini = google.generativeai.GenerativeModel(
+ model_name=gemini_model,
+ system_instruction=FIGMA_KNOWLEDGE,
+ )
+
+ # Build conversation context from messages format
+ conversation_context = ""
+ for msg in chat_history:
+ if msg["role"] == "user":
+ conversation_context += f"User: {msg['content']}\n"
+ elif msg["role"] == "assistant":
+ conversation_context += f"Assistant: {msg['content']}\n\n"
+
+ message = conversation_context + f"User: {user_question}"
+ response = gemini.generate_content(message)
+ reply = response.text
+ return reply
+
+ except Exception as e:
+ return f"Sorry, I encountered an error with Gemini: {str(e)}"
+
+def get_figma_help_claude(user_question, chat_history):
+ """Get Figma onboarding assistance using Claude"""
+ try:
+ # Convert messages format to Claude format
+ claude_messages = []
+ for msg in chat_history:
+ if msg["role"] == "user":
+ claude_messages.append({"role": "user", "content": msg["content"]})
+ elif msg["role"] == "assistant":
+ claude_messages.append({"role": "assistant", "content": msg["content"]})
+
+ # Add the current question
+ claude_messages.append({"role": "user", "content": user_question})
+
+ response = claude.messages.create(
+ model=claude_model,
+ max_tokens=500,
+ temperature=0.7,
+ system=promts["Charlie"],
+ messages=claude_messages,
+ )
+ reply = response.content[0].text
+ return reply
+
+ except Exception as e:
+ return f"Sorry, I encountered an error with Claude: {str(e)}"
+
+def respond(message, chat_history, model_choice):
+ if not message.strip():
+ return "", chat_history, "", model_choice
+
+ bot_message = get_figma_help(message, chat_history, model_choice)
+
+ # Add user message and bot response in messages format
+ new_history = chat_history + [
+ {"role": "user", "content": message},
+ {"role": "assistant", "content": bot_message}
+ ]
+
+ return "", new_history, bot_message, model_choice
+
+def clear_chat():
+ """Clear the chat history"""
+ return [], "", None
+
+def get_figma_help(user_question, chat_history, model_choice):
+ """Get Figma onboarding assistance using selected model"""
+ if model_choice == "OpenAI (GPT-3.5)":
+ return get_figma_help_openai(user_question, chat_history)
+ elif model_choice == "Google Gemini (2.0 Flash)":
+ return get_figma_help_gemini(user_question, chat_history)
+ elif model_choice == "Claude (Sonnet 4)":
+ return get_figma_help_claude(user_question, chat_history)
+ else:
+ return "Please select a valid model."
+
+
+custom_css = """
+
+/* Chat area styling */
+.styled-chat {
+ border-radius: 15px !important;
+ box-shadow: 0 4px 12px var(--shadow-color) !important;
+ border: 1px solid var(--border-color) !important;
+ padding: 10px;
+ # background-color: #fff;
+}
+
+/* Audio player styling */
+.styled-audio {
+ border-radius: 15px !important;
+ box-shadow: 0 4px 12px var(--shadow-color) !important;
+ border: 10px solid var(--block-background-fill) !important;
+ padding: 10px;
+ background-color: var(--background-fill-secondary) !important;
+}
+"""
+
+# Create Gradio interface
+with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(),css=custom_css) as demo:
+ gr.Markdown(
+ """
+
+
🎨 Figma Onboarding Assistant
+
Your AI-powered Figma learning companion
+
+
+
+
✨ What I can help you with:
+
+
+
🚀 Getting Started
+ Interface overview, basic navigation
+
🛠️ Tools & Features
+ Pen tool, shapes, text, layers
+
📐 Auto Layout
+ Responsive design techniques
+
🔗 Prototyping
+ Interactions and animations
+
+
+
🧩 Components
+ Creating reusable elements
+
👥 Collaboration
+ Sharing and team workflows
+
📚 Design Systems
+ Libraries and style guides
+
⚡ Shortcuts
+ Productivity tips and tricks
+
+
+
+
+
+
💡 Pro tip: Ask specific questions like \"How do I create a button component?\" for the best results!
+
+ """
+ )
+
+ # Model selection dropdown
+ model_dropdown = gr.Dropdown(
+ choices=["OpenAI (GPT-3.5)", "Google Gemini (2.0 Flash)", "Claude (Sonnet 4)"],
+ value="OpenAI (GPT-3.5)",
+ label="Select AI Model",
+ info="Choose which AI model to use for responses"
+ )
+
+ with gr.Row():
+ msg = gr.Textbox(
+ placeholder="Type your Figma question here...",
+ container=False,
+ scale=4
+ )
+ submit_btn = gr.Button("Ask", scale=1, variant="primary")
+ clear_btn = gr.Button("Clear Chat", scale=1)
+ audio_btn = gr.Button("🔊 Play Audio", scale=1, variant="secondary")
+ clear_audio_btn = gr.Button("🔇 Clear Audio", scale=1, variant="secondary")
+
+
+ # Example questions
+ gr.Markdown(
+ """
+
+
🚀 Quick Start Questions
+
Click any question below to get started instantly!
+
+ """
+ )
+ with gr.Row():
+ example_btns = [
+ gr.Button(
+ "How do I create my first frame?",
+ size="sm",
+ variant="secondary"
+ ),
+ gr.Button(
+ "What's the difference between components and instances?",
+ size="sm",
+ variant="secondary"
+ ),
+ gr.Button(
+ "How do I use Auto Layout?",
+ size="sm",
+ variant="secondary"
+ ),
+ gr.Button(
+ "How do I create a prototype?",
+ size="sm",
+ variant="secondary"
+ )
+ ]
+
+ # Your components with simple styling
+ chatbot = gr.Chatbot(
+ # value=[],
+ type="messages",
+ height=400,
+ placeholder="Ask me anything about Figma! For example: 'How do I create a component?' or 'What are frames in Figma?'",
+ elem_classes=["styled-chat"]
+ )
+
+ audio_output = gr.Audio(
+ label="Audio Response",
+ visible=True,
+ elem_classes=["styled-audio"]
+ )
+
+
+
+
+
+
+
+
+
+
+
+
+ last_response = gr.State("")
+
+
+ last_response = gr.State("")
+ current_model = gr.State("OpenAI (GPT-3.5)")
+
+ def respond(message, chat_history, model_choice):
+ if not message.strip():
+ return "", chat_history, "", model_choice
+
+ bot_message = get_figma_help(message, chat_history, model_choice)
+ new_history = chat_history + [
+ {"role": "user", "content": message},
+ {"role": "assistant", "content": bot_message}]
+ return "", new_history, bot_message, model_choice
+
+ def play_audio(last_message, model_choice):
+ if last_message:
+ audio_file = talker(last_message, model_choice)
+ if audio_file:
+ return audio_file
+ return None
+
+ def clear_audio():
+ """Clear the audio output"""
+ return None
+
+ def use_example(example_text):
+ return example_text
+
+ # Set up interactions
+ submit_btn.click(
+ respond,
+ inputs=[msg, chatbot, model_dropdown],
+ outputs=[msg, chatbot, last_response, current_model]
+ )
+ msg.submit(
+ respond,
+ inputs=[msg, chatbot, model_dropdown],
+ outputs=[msg, chatbot, last_response, current_model]
+ )
+ clear_btn.click(clear_chat, outputs=[chatbot, msg, last_response])
+
+ # Audio button functionality - now uses selected model
+ audio_btn.click(
+ play_audio,
+ inputs=[last_response, current_model],
+ outputs=[audio_output]
+ )
+
+ # Clear audio button functionality
+ clear_audio_btn.click(
+ clear_audio,
+ outputs=[audio_output]
+ )
+
+ # Example button clicks
+ for i, btn in enumerate(example_btns):
+ btn.click(
+ use_example,
+ inputs=[btn],
+ outputs=[msg]
+ )
+
+# Launch the app
+# if __name__ == "__main__":
+ demo.launch(
+ share=True,
+ # server_name="0.0.0.0",
+ # server_port=7860
+ )
\ No newline at end of file
diff --git a/week2/community-contributions/Figma_assistance/requirements.txt b/week2/community-contributions/Figma_assistance/requirements.txt
new file mode 100644
index 0000000..c090b08
--- /dev/null
+++ b/week2/community-contributions/Figma_assistance/requirements.txt
@@ -0,0 +1,7 @@
+openai
+python-dotenv
+gradio
+pillow
+google-generativeai
+anthropic
+ipython
diff --git a/week2/community-contributions/day_5_figma_assistance.ipynb b/week2/community-contributions/day_5_figma_assistance.ipynb
new file mode 100644
index 0000000..bb30876
--- /dev/null
+++ b/week2/community-contributions/day_5_figma_assistance.ipynb
@@ -0,0 +1,535 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "dc49e5ae",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from openai import OpenAI\n",
+ "from dotenv import load_dotenv\n",
+ "import os\n",
+ "load_dotenv()\n",
+ "import gradio as gr\n",
+ "import base64\n",
+ "from io import BytesIO\n",
+ "from PIL import Image\n",
+ "from IPython.display import Audio, display\n",
+ "import google.generativeai\n",
+ "import anthropic\n",
+ "\n",
+ "client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
+ "\n",
+ "# Configure Gemini\n",
+ "google.generativeai.configure(api_key=os.getenv(\"GOOGLE_API_KEY\"))\n",
+ "\n",
+ "# Configure Claude\n",
+ "claude = anthropic.Anthropic(api_key=os.getenv(\"ANTHROPIC_API_KEY\"))\n",
+ "openAI_model = \"gpt-3.5-turbo\"\n",
+ "gemini_model = \"gemini-2.0-flash\"\n",
+ "claude_model = \"claude-sonnet-4-20250514\"\n",
+ "openai_audio_model = \"tts-1\"\n",
+ "\n",
+ "# Figma onboarding knowledge base\n",
+ "FIGMA_KNOWLEDGE = \"\"\"\n",
+ "You are a helpful Figma onboarding assistant. You help new users learn Figma's core features and workflows.\n",
+ "\n",
+ "Key Figma concepts to help users with:\n",
+ "- Interface overview (toolbar, layers panel, properties panel)\n",
+ "- Creating and editing frames\n",
+ "- Working with shapes, text, and components\n",
+ "- Using the pen tool for custom shapes\n",
+ "- Auto Layout for responsive designs\n",
+ "- Components and variants\n",
+ "- Prototyping and interactions\n",
+ "- Collaboration features\n",
+ "- Design systems and libraries\n",
+ "- Exporting assets\n",
+ "- Keyboard shortcuts\n",
+ "\n",
+ "Always provide clear, step-by-step instructions and mention relevant keyboard shortcuts when applicable.\n",
+ "\"\"\"\n",
+ "\n",
+ "promts = {\n",
+ " \"Charlie\": FIGMA_KNOWLEDGE\n",
+ "}\n",
+ "\n",
+ "def truncate_for_tts(text, max_length=4000):\n",
+ " \"\"\"Truncate text for TTS while preserving complete sentences\"\"\"\n",
+ " if len(text) <= max_length:\n",
+ " return text\n",
+ " \n",
+ " # Try to truncate at sentence boundaries\n",
+ " sentences = text.split('. ')\n",
+ " truncated = \"\"\n",
+ " \n",
+ " for sentence in sentences:\n",
+ " if len(truncated + sentence + '. ') <= max_length:\n",
+ " truncated += sentence + '. '\n",
+ " else:\n",
+ " break\n",
+ " \n",
+ " # If we couldn't fit any complete sentences, just truncate hard\n",
+ " if not truncated.strip():\n",
+ " truncated = text[:max_length-10] + \"...\"\n",
+ " \n",
+ " return truncated.strip()\n",
+ "\n",
+ "def talker_openai(message):\n",
+ " \"\"\"Generate audio from text using OpenAI TTS\"\"\"\n",
+ " try:\n",
+ " # Truncate message for TTS\n",
+ " truncated_message = truncate_for_tts(message)\n",
+ " \n",
+ " response = client.audio.speech.create(\n",
+ " model=\"tts-1\",\n",
+ " voice=\"onyx\",\n",
+ " input=truncated_message\n",
+ " )\n",
+ "\n",
+ " audio_stream = BytesIO(response.content)\n",
+ " output_filename = \"output_audio_openai.mp3\"\n",
+ " with open(output_filename, \"wb\") as f:\n",
+ " f.write(audio_stream.read())\n",
+ "\n",
+ " return output_filename\n",
+ " except Exception as e:\n",
+ " print(f\"Error generating audio with OpenAI: {str(e)}\")\n",
+ " return None\n",
+ "\n",
+ "# def talker_gemini(message):\n",
+ "# \"\"\"Generate audio from text using Gemini TTS\"\"\"\n",
+ "# try:\n",
+ "# # Try the newer Gemini 2.0 TTS API\n",
+ "# model = google.generativeai.GenerativeModel(gemini_model)\n",
+ " \n",
+ "# # Truncate message for TTS\n",
+ "# truncated_message = truncate_for_tts(message)\n",
+ " \n",
+ "# # Generate audio using Gemini with simplified config\n",
+ "# response = model.generate_content(\n",
+ "# truncated_message,\n",
+ "# generation_config={\n",
+ "# \"response_modalities\": [\"AUDIO\"]\n",
+ "# }\n",
+ "# )\n",
+ " \n",
+ "# # Check if response has audio data\n",
+ "# if hasattr(response, 'audio_data') and response.audio_data:\n",
+ "# output_filename = \"output_audio_gemini.wav\"\n",
+ "# with open(output_filename, \"wb\") as f:\n",
+ "# f.write(response.audio_data)\n",
+ "# return output_filename\n",
+ "# else:\n",
+ "# print(\"Gemini response does not contain audio data\")\n",
+ "# raise Exception(\"No audio data in Gemini response\")\n",
+ " \n",
+ "# except Exception as e:\n",
+ "# print(f\"Error generating audio with Gemini: {str(e)}\")\n",
+ "# print(\"Gemini TTS not available, using OpenAI TTS with different voice\")\n",
+ "# # Use OpenAI TTS but with a different voice to distinguish\n",
+ "# try:\n",
+ "# # Truncate message for TTS\n",
+ "# truncated_message = truncate_for_tts(message)\n",
+ " \n",
+ "# response = client.audio.speech.create(\n",
+ "# model=\"tts-1\",\n",
+ "# voice=\"alloy\", # Different voice to indicate it's for Gemini responses\n",
+ "# input=truncated_message\n",
+ "# )\n",
+ "# audio_stream = BytesIO(response.content)\n",
+ "# output_filename = \"output_audio_gemini_fallback.mp3\"\n",
+ "# with open(output_filename, \"wb\") as f:\n",
+ "# f.write(audio_stream.read())\n",
+ "# return output_filename\n",
+ "# except Exception as fallback_error:\n",
+ "# print(f\"Fallback TTS also failed: {str(fallback_error)}\")\n",
+ "# return None\n",
+ "\n",
+ "# def talker_claude(message):\n",
+ "# \"\"\"Generate audio from text using Claude TTS (fallback to OpenAI)\"\"\"\n",
+ "# try:\n",
+ "# # Truncate message for TTS\n",
+ "# truncated_message = truncate_for_tts(message)\n",
+ " \n",
+ "# # Claude doesn't have native TTS, so we'll use OpenAI TTS\n",
+ "# # but with a different filename to distinguish\n",
+ "# response = client.audio.speech.create(\n",
+ "# model=\"tts-1\",\n",
+ "# voice=\"nova\", # Different voice for Claude responses\n",
+ "# input=truncated_message\n",
+ "# )\n",
+ "\n",
+ "# audio_stream = BytesIO(response.content)\n",
+ "# output_filename = \"output_audio_claude.mp3\"\n",
+ "# with open(output_filename, \"wb\") as f:\n",
+ "# f.write(audio_stream.read())\n",
+ "\n",
+ "# return output_filename\n",
+ "# except Exception as e:\n",
+ "# print(f\"Error generating audio for Claude: {str(e)}\")\n",
+ "# return None\n",
+ "\n",
+ "def talker(message, model_choice):\n",
+ " \"\"\"Generate audio from text using selected model\"\"\"\n",
+ " # if model_choice == \"Google Gemini (2.0 Flash)\":\n",
+ " # return talker_gemini(message)\n",
+ " # elif model_choice == \"Claude (Sonnet 4)\":\n",
+ " # return talker_claude(message)\n",
+ " # else:\n",
+ " return talker_openai(message)\n",
+ "\n",
+ "def get_figma_help_openai(user_question, chat_history):\n",
+ " \"\"\"Get Figma onboarding assistance using OpenAI\"\"\"\n",
+ " try:\n",
+ " messages = [\n",
+ " {\"role\": \"system\", \"content\": FIGMA_KNOWLEDGE}\n",
+ " ]\n",
+ " \n",
+ " # Convert messages format chat history to OpenAI format\n",
+ " for msg in chat_history:\n",
+ " if msg[\"role\"] == \"user\":\n",
+ " messages.append({\"role\": \"user\", \"content\": msg[\"content\"]})\n",
+ " elif msg[\"role\"] == \"assistant\":\n",
+ " messages.append({\"role\": \"assistant\", \"content\": msg[\"content\"]})\n",
+ " \n",
+ " messages.append({\"role\": \"user\", \"content\": user_question})\n",
+ " \n",
+ " response = client.chat.completions.create(\n",
+ " model=openAI_model,\n",
+ " messages=messages,\n",
+ " max_tokens=500,\n",
+ " temperature=0.7\n",
+ " )\n",
+ " return response.choices[0].message.content\n",
+ " \n",
+ " except Exception as e:\n",
+ " return f\"Sorry, I encountered an error with OpenAI: {str(e)}\"\n",
+ "\n",
+ "def get_figma_help_gemini(user_question, chat_history):\n",
+ " \"\"\"Get Figma onboarding assistance using Gemini\"\"\"\n",
+ " try:\n",
+ " gemini = google.generativeai.GenerativeModel(\n",
+ " model_name=gemini_model,\n",
+ " system_instruction=FIGMA_KNOWLEDGE,\n",
+ " )\n",
+ " \n",
+ " # Build conversation context from messages format\n",
+ " conversation_context = \"\"\n",
+ " for msg in chat_history:\n",
+ " if msg[\"role\"] == \"user\":\n",
+ " conversation_context += f\"User: {msg['content']}\\n\"\n",
+ " elif msg[\"role\"] == \"assistant\":\n",
+ " conversation_context += f\"Assistant: {msg['content']}\\n\\n\"\n",
+ " \n",
+ " message = conversation_context + f\"User: {user_question}\"\n",
+ " response = gemini.generate_content(message)\n",
+ " reply = response.text\n",
+ " return reply\n",
+ " \n",
+ " except Exception as e:\n",
+ " return f\"Sorry, I encountered an error with Gemini: {str(e)}\"\n",
+ "\n",
+ "def get_figma_help_claude(user_question, chat_history):\n",
+ " \"\"\"Get Figma onboarding assistance using Claude\"\"\"\n",
+ " try:\n",
+ " # Convert messages format to Claude format\n",
+ " claude_messages = []\n",
+ " for msg in chat_history:\n",
+ " if msg[\"role\"] == \"user\":\n",
+ " claude_messages.append({\"role\": \"user\", \"content\": msg[\"content\"]})\n",
+ " elif msg[\"role\"] == \"assistant\":\n",
+ " claude_messages.append({\"role\": \"assistant\", \"content\": msg[\"content\"]})\n",
+ " \n",
+ " # Add the current question\n",
+ " claude_messages.append({\"role\": \"user\", \"content\": user_question})\n",
+ " \n",
+ " response = claude.messages.create(\n",
+ " model=claude_model,\n",
+ " max_tokens=500,\n",
+ " temperature=0.7,\n",
+ " system=promts[\"Charlie\"],\n",
+ " messages=claude_messages,\n",
+ " )\n",
+ " reply = response.content[0].text\n",
+ " return reply\n",
+ " \n",
+ " except Exception as e:\n",
+ " return f\"Sorry, I encountered an error with Claude: {str(e)}\"\n",
+ "\n",
+ "def respond(message, chat_history, model_choice):\n",
+ " if not message.strip():\n",
+ " return \"\", chat_history, \"\", model_choice\n",
+ " \n",
+ " bot_message = get_figma_help(message, chat_history, model_choice)\n",
+ " \n",
+ " # Add user message and bot response in messages format\n",
+ " new_history = chat_history + [\n",
+ " {\"role\": \"user\", \"content\": message},\n",
+ " {\"role\": \"assistant\", \"content\": bot_message}\n",
+ " ]\n",
+ " \n",
+ " return \"\", new_history, bot_message, model_choice\n",
+ "\n",
+ "def clear_chat():\n",
+ " \"\"\"Clear the chat history\"\"\"\n",
+ " return [], \"\", None\n",
+ "\n",
+ "def get_figma_help(user_question, chat_history, model_choice):\n",
+ " \"\"\"Get Figma onboarding assistance using selected model\"\"\"\n",
+ " if model_choice == \"OpenAI (GPT-3.5)\":\n",
+ " return get_figma_help_openai(user_question, chat_history)\n",
+ " elif model_choice == \"Google Gemini (2.0 Flash)\":\n",
+ " return get_figma_help_gemini(user_question, chat_history)\n",
+ " elif model_choice == \"Claude (Sonnet 4)\":\n",
+ " return get_figma_help_claude(user_question, chat_history)\n",
+ " else:\n",
+ " return \"Please select a valid model.\"\n",
+ "\n",
+ "\n",
+ "custom_css = \"\"\"\n",
+ "\n",
+ "/* Chat area styling */\n",
+ ".styled-chat {\n",
+ " border-radius: 15px !important;\n",
+ " box-shadow: 0 4px 12px var(--shadow-color) !important;\n",
+ " border: 1px solid var(--border-color) !important;\n",
+ " padding: 10px;\n",
+ " # background-color: #fff;\n",
+ "}\n",
+ "\n",
+ "/* Audio player styling */\n",
+ ".styled-audio {\n",
+ " border-radius: 15px !important;\n",
+ " box-shadow: 0 4px 12px var(--shadow-color) !important;\n",
+ " border: 10px solid var(--block-background-fill) !important;\n",
+ " padding: 10px;\n",
+ " background-color: var(--background-fill-secondary) !important;\n",
+ "}\n",
+ "\"\"\"\n",
+ "\n",
+ "# Create Gradio interface\n",
+ "with gr.Blocks(title=\"Figma Onboarding Assistant\", theme=gr.themes.Soft(),css=custom_css) as demo:\n",
+ " gr.Markdown(\n",
+ " \"\"\"\n",
+ " \n",
+ "
🎨 Figma Onboarding Assistant
\n",
+ "
Your AI-powered Figma learning companion
\n",
+ "
\n",
+ " \n",
+ " \n",
+ "
✨ What I can help you with:
\n",
+ "
\n",
+ "
\n",
+ "
🚀 Getting Started
\n",
+ " Interface overview, basic navigation
\n",
+ "
🛠️ Tools & Features
\n",
+ " Pen tool, shapes, text, layers
\n",
+ "
📐 Auto Layout
\n",
+ " Responsive design techniques
\n",
+ "
🔗 Prototyping
\n",
+ " Interactions and animations
\n",
+ "
\n",
+ "
\n",
+ "
🧩 Components
\n",
+ " Creating reusable elements
\n",
+ "
👥 Collaboration
\n",
+ " Sharing and team workflows
\n",
+ "
📚 Design Systems
\n",
+ " Libraries and style guides
\n",
+ "
⚡ Shortcuts
\n",
+ " Productivity tips and tricks
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ " \n",
+ " \n",
+ "
💡 Pro tip: Ask specific questions like \\\"How do I create a button component?\\\" for the best results!
\n",
+ "
\n",
+ " \"\"\"\n",
+ " )\n",
+ " \n",
+ " # Model selection dropdown\n",
+ " model_dropdown = gr.Dropdown(\n",
+ " choices=[\"OpenAI (GPT-3.5)\", \"Google Gemini (2.0 Flash)\", \"Claude (Sonnet 4)\"],\n",
+ " value=\"OpenAI (GPT-3.5)\",\n",
+ " label=\"Select AI Model\",\n",
+ " info=\"Choose which AI model to use for responses\"\n",
+ " )\n",
+ " \n",
+ " with gr.Row():\n",
+ " msg = gr.Textbox(\n",
+ " placeholder=\"Type your Figma question here...\",\n",
+ " container=False,\n",
+ " scale=4\n",
+ " )\n",
+ " submit_btn = gr.Button(\"Ask\", scale=1, variant=\"primary\")\n",
+ " clear_btn = gr.Button(\"Clear Chat\", scale=1)\n",
+ " audio_btn = gr.Button(\"🔊 Play Audio\", scale=1, variant=\"secondary\")\n",
+ " clear_audio_btn = gr.Button(\"🔇 Clear Audio\", scale=1, variant=\"secondary\")\n",
+ " \n",
+ "\n",
+ " # Example questions\n",
+ " gr.Markdown(\n",
+ " \"\"\"\n",
+ " \n",
+ "
🚀 Quick Start Questions
\n",
+ "
Click any question below to get started instantly!
\n",
+ "
\n",
+ " \"\"\"\n",
+ " )\n",
+ " with gr.Row():\n",
+ " example_btns = [\n",
+ " gr.Button(\n",
+ " \"How do I create my first frame?\", \n",
+ " size=\"sm\",\n",
+ " variant=\"secondary\"\n",
+ " ),\n",
+ " gr.Button(\n",
+ " \"What's the difference between components and instances?\", \n",
+ " size=\"sm\",\n",
+ " variant=\"secondary\"\n",
+ " ),\n",
+ " gr.Button(\n",
+ " \"How do I use Auto Layout?\", \n",
+ " size=\"sm\",\n",
+ " variant=\"secondary\"\n",
+ " ),\n",
+ " gr.Button(\n",
+ " \"How do I create a prototype?\", \n",
+ " size=\"sm\",\n",
+ " variant=\"secondary\"\n",
+ " )\n",
+ " ]\n",
+ "\n",
+ " # Your components with simple styling\n",
+ " chatbot = gr.Chatbot(\n",
+ " # value=[],\n",
+ " type=\"messages\",\n",
+ " height=400,\n",
+ " placeholder=\"Ask me anything about Figma! For example: 'How do I create a component?' or 'What are frames in Figma?'\",\n",
+ " elem_classes=[\"styled-chat\"]\n",
+ " )\n",
+ "\n",
+ " audio_output = gr.Audio(\n",
+ " label=\"Audio Response\",\n",
+ " visible=True,\n",
+ " elem_classes=[\"styled-audio\"]\n",
+ " )\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " last_response = gr.State(\"\")\n",
+ "\n",
+ " \n",
+ " last_response = gr.State(\"\")\n",
+ " current_model = gr.State(\"OpenAI (GPT-3.5)\")\n",
+ " \n",
+ " def respond(message, chat_history, model_choice):\n",
+ " if not message.strip():\n",
+ " return \"\", chat_history, \"\", model_choice\n",
+ " \n",
+ " bot_message = get_figma_help(message, chat_history, model_choice)\n",
+ " new_history = chat_history + [\n",
+ " {\"role\": \"user\", \"content\": message},\n",
+ " {\"role\": \"assistant\", \"content\": bot_message}]\n",
+ " return \"\", new_history, bot_message, model_choice\n",
+ " \n",
+ " def play_audio(last_message, model_choice):\n",
+ " if last_message:\n",
+ " audio_file = talker(last_message, model_choice)\n",
+ " if audio_file:\n",
+ " return audio_file\n",
+ " return None\n",
+ " \n",
+ " def clear_audio():\n",
+ " \"\"\"Clear the audio output\"\"\"\n",
+ " return None\n",
+ " \n",
+ " def use_example(example_text):\n",
+ " return example_text\n",
+ " \n",
+ " # Set up interactions\n",
+ " submit_btn.click(\n",
+ " respond, \n",
+ " inputs=[msg, chatbot, model_dropdown], \n",
+ " outputs=[msg, chatbot, last_response, current_model]\n",
+ " )\n",
+ " msg.submit(\n",
+ " respond, \n",
+ " inputs=[msg, chatbot, model_dropdown], \n",
+ " outputs=[msg, chatbot, last_response, current_model]\n",
+ " )\n",
+ " clear_btn.click(clear_chat, outputs=[chatbot, msg, last_response])\n",
+ " \n",
+ " # Audio button functionality - now uses selected model\n",
+ " audio_btn.click(\n",
+ " play_audio,\n",
+ " inputs=[last_response, current_model],\n",
+ " outputs=[audio_output]\n",
+ " )\n",
+ " \n",
+ " # Clear audio button functionality\n",
+ " clear_audio_btn.click(\n",
+ " clear_audio,\n",
+ " outputs=[audio_output]\n",
+ " )\n",
+ " \n",
+ " # Example button clicks\n",
+ " for i, btn in enumerate(example_btns):\n",
+ " btn.click(\n",
+ " use_example,\n",
+ " inputs=[btn],\n",
+ " outputs=[msg]\n",
+ " )\n",
+ "\n",
+ "# Launch the app\n",
+ "# if __name__ == \"__main__\":\n",
+ " demo.launch(\n",
+ " share=True,\n",
+ " # server_name=\"0.0.0.0\",\n",
+ " # server_port=7860\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "90b29a7d-aec8-49d2-83c7-3e3ab96c47e1",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.13"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}