Merge pull request #648 from rohit141914/figma_assistance

created figma assistance , also created huggingFace space for it , added Github Action to automatically update Space on 'git push'
This commit is contained in:
Ed Donner
2025-09-08 22:16:41 +01:00
committed by GitHub
5 changed files with 1051 additions and 0 deletions

View File

@@ -0,0 +1,28 @@
name: Run Python script
on:
push:
branches:
- figma_assistance
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.9'
- name: Install Gradio
run: python -m pip install gradio
- name: Log in to Hugging Face
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
- name: Deploy to Spaces
run: gradio deploy

View File

@@ -0,0 +1,6 @@
---
title: Figma_assistance
app_file: day_5_figma_assistance.py
sdk: gradio
sdk_version: 5.38.2
---

View File

@@ -0,0 +1,484 @@
from openai import OpenAI
from dotenv import load_dotenv
import os
load_dotenv()
import gradio as gr
import base64
from io import BytesIO
from PIL import Image
from IPython.display import Audio, display
import google.generativeai
import anthropic
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# Configure Gemini
google.generativeai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
# Configure Claude
claude = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
openAI_model = "gpt-3.5-turbo"
gemini_model = "gemini-2.0-flash"
claude_model = "claude-sonnet-4-20250514"
openai_audio_model = "tts-1"
# Figma onboarding knowledge base
FIGMA_KNOWLEDGE = """
You are a helpful Figma onboarding assistant. You help new users learn Figma's core features and workflows.
Key Figma concepts to help users with:
- Interface overview (toolbar, layers panel, properties panel)
- Creating and editing frames
- Working with shapes, text, and components
- Using the pen tool for custom shapes
- Auto Layout for responsive designs
- Components and variants
- Prototyping and interactions
- Collaboration features
- Design systems and libraries
- Exporting assets
- Keyboard shortcuts
Always provide clear, step-by-step instructions and mention relevant keyboard shortcuts when applicable.
"""
promts = {
"Charlie": FIGMA_KNOWLEDGE
}
def truncate_for_tts(text, max_length=4000):
"""Truncate text for TTS while preserving complete sentences"""
if len(text) <= max_length:
return text
# Try to truncate at sentence boundaries
sentences = text.split('. ')
truncated = ""
for sentence in sentences:
if len(truncated + sentence + '. ') <= max_length:
truncated += sentence + '. '
else:
break
# If we couldn't fit any complete sentences, just truncate hard
if not truncated.strip():
truncated = text[:max_length-10] + "..."
return truncated.strip()
def talker_openai(message):
"""Generate audio from text using OpenAI TTS"""
try:
# Truncate message for TTS
truncated_message = truncate_for_tts(message)
response = client.audio.speech.create(
model="tts-1",
voice="onyx",
input=truncated_message
)
audio_stream = BytesIO(response.content)
output_filename = "output_audio_openai.mp3"
with open(output_filename, "wb") as f:
f.write(audio_stream.read())
return output_filename
except Exception as e:
print(f"Error generating audio with OpenAI: {str(e)}")
return None
def talker(message, model_choice):
"""Generate audio from text using selected model"""
return talker_openai(message)
def get_figma_help_openai(user_question, chat_history):
"""Get Figma onboarding assistance using OpenAI"""
try:
messages = [
{"role": "system", "content": FIGMA_KNOWLEDGE}
]
# Convert messages format chat history to OpenAI format
for msg in chat_history:
if msg["role"] == "user":
messages.append({"role": "user", "content": msg["content"]})
elif msg["role"] == "assistant":
messages.append({"role": "assistant", "content": msg["content"]})
messages.append({"role": "user", "content": user_question})
response = client.chat.completions.create(
model=openAI_model,
messages=messages,
max_tokens=500,
temperature=0.7
)
return response.choices[0].message.content
except Exception as e:
return f"Sorry, I encountered an error with OpenAI: {str(e)}"
def get_figma_help_gemini(user_question, chat_history):
"""Get Figma onboarding assistance using Gemini"""
try:
gemini = google.generativeai.GenerativeModel(
model_name=gemini_model,
system_instruction=FIGMA_KNOWLEDGE,
)
# Build conversation context from messages format
conversation_context = ""
for msg in chat_history:
if msg["role"] == "user":
conversation_context += f"User: {msg['content']}\n"
elif msg["role"] == "assistant":
conversation_context += f"Assistant: {msg['content']}\n\n"
message = conversation_context + f"User: {user_question}"
response = gemini.generate_content(message)
reply = response.text
return reply
except Exception as e:
return f"Sorry, I encountered an error with Gemini: {str(e)}"
def get_figma_help_claude(user_question, chat_history):
"""Get Figma onboarding assistance using Claude"""
try:
# Convert messages format to Claude format
claude_messages = []
for msg in chat_history:
if msg["role"] == "user":
claude_messages.append({"role": "user", "content": msg["content"]})
elif msg["role"] == "assistant":
claude_messages.append({"role": "assistant", "content": msg["content"]})
# Add the current question
claude_messages.append({"role": "user", "content": user_question})
response = claude.messages.create(
model=claude_model,
max_tokens=500,
temperature=0.7,
system=promts["Charlie"],
messages=claude_messages,
)
reply = response.content[0].text
return reply
except Exception as e:
return f"Sorry, I encountered an error with Claude: {str(e)}"
def respond(message, chat_history, model_choice):
if not message.strip():
return "", chat_history, "", model_choice
bot_message = get_figma_help(message, chat_history, model_choice)
# Add user message and bot response in messages format
new_history = chat_history + [
{"role": "user", "content": message},
{"role": "assistant", "content": bot_message}
]
return "", new_history, bot_message, model_choice
def clear_chat():
"""Clear the chat history"""
return [], "", None
def get_figma_help(user_question, chat_history, model_choice):
"""Get Figma onboarding assistance using selected model"""
if model_choice == "OpenAI (GPT-3.5)":
return get_figma_help_openai(user_question, chat_history)
elif model_choice == "Google Gemini (2.0 Flash)":
return get_figma_help_gemini(user_question, chat_history)
elif model_choice == "Claude (Sonnet 4)":
return get_figma_help_claude(user_question, chat_history)
else:
return "Please select a valid model."
custom_css = """
/* Chat area styling */
.styled-chat {
border-radius: 15px !important;
box-shadow: 0 4px 12px var(--shadow-color) !important;
border: 1px solid var(--border-color) !important;
padding: 10px;
}
/* Audio player styling */
.styled-audio {
border-radius: 15px !important;
box-shadow: 0 4px 12px var(--shadow-color) !important;
border: 10px solid var(--block-background-fill) !important;
padding: 10px;
background-color: var(--background-fill-secondary) !important;
}
/* Header styling */
.header-container {
text-align: center;
padding: 20px;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
border-radius: 15px;
margin-bottom: 20px;
}
.header-title {
color: white;
margin: 0;
font-size: 2.5em;
}
.header-subtitle {
color: #f0f0f0;
margin: 10px 0 0 0;
font-size: 1.2em;
}
/* Features section styling */
.features-container {
background: #f8f9fa;
padding: 20px;
border-radius: 10px;
border-left: 4px solid #667eea;
}
.features-title {
color: #333;
margin-top: 0;
}
.features-grid {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 15px;
margin-top: 15px;
}
.feature-item {
color: #333;
margin: 10px 0;
}
.feature-title {
color: #667eea;
}
.feature-description {
color: #666;
}
/* Pro tip styling */
.protip-container {
text-align: center;
margin-top: 20px;
padding: 15px;
background: #e8f4f8;
border-radius: 8px;
}
.protip-text {
margin: 0;
color: #2c5aa0 !important;
font-weight: 500;
}
/* Quick start questions styling */
.quickstart-container {
background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
padding: 15px 20px;
border-radius: 10px;
margin: 20px 0;
}
.quickstart-title {
color: white !important;
margin: 0;
font-size: 1.3em;
text-align: center;
}
.quickstart-subtitle {
color: #f0f8ff !important;
margin: 5px 0 0 0;
text-align: center;
font-size: 0.9em;
}
"""
# Create Gradio interface
with gr.Blocks(title="Figma Onboarding Assistant", theme=gr.themes.Soft(), css=custom_css) as demo:
gr.HTML(
"""
<div class="header-container">
<h1 class="header-title">🎨 Figma Onboarding Assistant</h1>
<p class="header-subtitle">Your AI-powered Figma learning companion</p>
</div>
<div class="features-container">
<h3 class="features-title">✨ What I can help you with:</h3>
<div class="features-grid">
<div>
<p class="feature-item"><strong class="feature-title">🚀 Getting Started</strong><br/>
<span class="feature-description">Interface overview, basic navigation</span></p>
<p class="feature-item"><strong class="feature-title">🛠️ Tools & Features</strong><br/>
<span class="feature-description">Pen tool, shapes, text, layers</span></p>
<p class="feature-item"><strong class="feature-title">📐 Auto Layout</strong><br/>
<span class="feature-description">Responsive design techniques</span></p>
<p class="feature-item"><strong class="feature-title">🔗 Prototyping</strong><br/>
<span class="feature-description">Interactions and animations</span></p>
</div>
<div>
<p class="feature-item"><strong class="feature-title">🧩 Components</strong><br/>
<span class="feature-description">Creating reusable elements</span></p>
<p class="feature-item"><strong class="feature-title">👥 Collaboration</strong><br/>
<span class="feature-description">Sharing and team workflows</span></p>
<p class="feature-item"><strong class="feature-title">📚 Design Systems</strong><br/>
<span class="feature-description">Libraries and style guides</span></p>
<p class="feature-item"><strong class="feature-title">⚡ Shortcuts</strong><br/>
<span class="feature-description">Productivity tips and tricks</span></p>
</div>
</div>
</div>
<div class="protip-container">
<p class="protip-text">💡 Pro tip: Ask specific questions like "How do I create a button component?" for the best results!</p>
</div>
"""
)
# Model selection dropdown
model_dropdown = gr.Dropdown(
choices=["OpenAI (GPT-3.5)", "Google Gemini (2.0 Flash)", "Claude (Sonnet 4)"],
value="OpenAI (GPT-3.5)",
label="Select AI Model",
info="Choose which AI model to use for responses"
)
with gr.Row():
msg = gr.Textbox(
placeholder="Type your Figma question here...",
container=False,
scale=4
)
submit_btn = gr.Button("Ask", scale=1, variant="primary")
clear_btn = gr.Button("Clear Chat", scale=1)
audio_btn = gr.Button("🔊 Play Audio", scale=1, variant="secondary")
clear_audio_btn = gr.Button("🔇 Clear Audio", scale=1, variant="secondary")
# Example questions
gr.HTML(
"""
<div class="quickstart-container">
<h3 class="quickstart-title">🚀 Quick Start Questions</h3>
<p class="quickstart-subtitle">Click any question below to get started instantly!</p>
</div>
"""
)
with gr.Row():
example_btns = [
gr.Button(
"How do I create my first frame?",
size="sm",
variant="secondary"
),
gr.Button(
"What's the difference between components and instances?",
size="sm",
variant="secondary"
),
gr.Button(
"How do I use Auto Layout?",
size="sm",
variant="secondary"
),
gr.Button(
"How do I create a prototype?",
size="sm",
variant="secondary"
)
]
# Your components with simple styling
chatbot = gr.Chatbot(
type="messages",
height=400,
placeholder="Ask me anything about Figma! For example: 'How do I create a component?' or 'What are frames in Figma?'",
elem_classes=["styled-chat"]
)
audio_output = gr.Audio(
label="Audio Response",
visible=True,
elem_classes=["styled-audio"]
)
last_response = gr.State("")
current_model = gr.State("OpenAI (GPT-3.5)")
def respond(message, chat_history, model_choice):
if not message.strip():
return "", chat_history, "", model_choice
bot_message = get_figma_help(message, chat_history, model_choice)
new_history = chat_history + [
{"role": "user", "content": message},
{"role": "assistant", "content": bot_message}]
return "", new_history, bot_message, model_choice
def play_audio(last_message, model_choice):
if last_message:
audio_file = talker(last_message, model_choice)
if audio_file:
return audio_file
return None
def clear_audio():
"""Clear the audio output"""
return None
def use_example(example_text):
return example_text
# Set up interactions
submit_btn.click(
respond,
inputs=[msg, chatbot, model_dropdown],
outputs=[msg, chatbot, last_response, current_model]
)
msg.submit(
respond,
inputs=[msg, chatbot, model_dropdown],
outputs=[msg, chatbot, last_response, current_model]
)
clear_btn.click(clear_chat, outputs=[chatbot, msg, last_response])
# Audio button functionality - now uses selected model
audio_btn.click(
play_audio,
inputs=[last_response, current_model],
outputs=[audio_output]
)
# Clear audio button functionality
clear_audio_btn.click(
clear_audio,
outputs=[audio_output]
)
# Example button clicks
for i, btn in enumerate(example_btns):
btn.click(
use_example,
inputs=[btn],
outputs=[msg]
)
# Launch the app
demo.launch(share=True)

View File

@@ -0,0 +1,7 @@
openai
python-dotenv
gradio
pillow
google-generativeai
anthropic
ipython

View File

@@ -0,0 +1,526 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "dc49e5ae",
"metadata": {},
"outputs": [],
"source": [
"from openai import OpenAI\n",
"from dotenv import load_dotenv\n",
"import os\n",
"load_dotenv()\n",
"import gradio as gr\n",
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image\n",
"from IPython.display import Audio, display\n",
"import google.generativeai\n",
"import anthropic\n",
"\n",
"client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
"\n",
"# Configure Gemini\n",
"google.generativeai.configure(api_key=os.getenv(\"GOOGLE_API_KEY\"))\n",
"\n",
"# Configure Claude\n",
"claude = anthropic.Anthropic(api_key=os.getenv(\"ANTHROPIC_API_KEY\"))\n",
"openAI_model = \"gpt-3.5-turbo\"\n",
"gemini_model = \"gemini-2.0-flash\"\n",
"claude_model = \"claude-sonnet-4-20250514\"\n",
"openai_audio_model = \"tts-1\"\n",
"\n",
"# Figma onboarding knowledge base\n",
"FIGMA_KNOWLEDGE = \"\"\"\n",
"You are a helpful Figma onboarding assistant. You help new users learn Figma's core features and workflows.\n",
"\n",
"Key Figma concepts to help users with:\n",
"- Interface overview (toolbar, layers panel, properties panel)\n",
"- Creating and editing frames\n",
"- Working with shapes, text, and components\n",
"- Using the pen tool for custom shapes\n",
"- Auto Layout for responsive designs\n",
"- Components and variants\n",
"- Prototyping and interactions\n",
"- Collaboration features\n",
"- Design systems and libraries\n",
"- Exporting assets\n",
"- Keyboard shortcuts\n",
"\n",
"Always provide clear, step-by-step instructions and mention relevant keyboard shortcuts when applicable.\n",
"\"\"\"\n",
"\n",
"promts = {\n",
" \"Charlie\": FIGMA_KNOWLEDGE\n",
"}\n",
"\n",
"def truncate_for_tts(text, max_length=4000):\n",
" \"\"\"Truncate text for TTS while preserving complete sentences\"\"\"\n",
" if len(text) <= max_length:\n",
" return text\n",
" \n",
" # Try to truncate at sentence boundaries\n",
" sentences = text.split('. ')\n",
" truncated = \"\"\n",
" \n",
" for sentence in sentences:\n",
" if len(truncated + sentence + '. ') <= max_length:\n",
" truncated += sentence + '. '\n",
" else:\n",
" break\n",
" \n",
" # If we couldn't fit any complete sentences, just truncate hard\n",
" if not truncated.strip():\n",
" truncated = text[:max_length-10] + \"...\"\n",
" \n",
" return truncated.strip()\n",
"\n",
"def talker_openai(message):\n",
" \"\"\"Generate audio from text using OpenAI TTS\"\"\"\n",
" try:\n",
" # Truncate message for TTS\n",
" truncated_message = truncate_for_tts(message)\n",
" \n",
" response = client.audio.speech.create(\n",
" model=\"tts-1\",\n",
" voice=\"onyx\",\n",
" input=truncated_message\n",
" )\n",
"\n",
" audio_stream = BytesIO(response.content)\n",
" output_filename = \"output_audio_openai.mp3\"\n",
" with open(output_filename, \"wb\") as f:\n",
" f.write(audio_stream.read())\n",
"\n",
" return output_filename\n",
" except Exception as e:\n",
" print(f\"Error generating audio with OpenAI: {str(e)}\")\n",
" return None\n",
"\n",
"def talker(message, model_choice):\n",
" \"\"\"Generate audio from text using selected model\"\"\"\n",
" return talker_openai(message)\n",
"\n",
"def get_figma_help_openai(user_question, chat_history):\n",
" \"\"\"Get Figma onboarding assistance using OpenAI\"\"\"\n",
" try:\n",
" messages = [\n",
" {\"role\": \"system\", \"content\": FIGMA_KNOWLEDGE}\n",
" ]\n",
" \n",
" # Convert messages format chat history to OpenAI format\n",
" for msg in chat_history:\n",
" if msg[\"role\"] == \"user\":\n",
" messages.append({\"role\": \"user\", \"content\": msg[\"content\"]})\n",
" elif msg[\"role\"] == \"assistant\":\n",
" messages.append({\"role\": \"assistant\", \"content\": msg[\"content\"]})\n",
" \n",
" messages.append({\"role\": \"user\", \"content\": user_question})\n",
" \n",
" response = client.chat.completions.create(\n",
" model=openAI_model,\n",
" messages=messages,\n",
" max_tokens=500,\n",
" temperature=0.7\n",
" )\n",
" return response.choices[0].message.content\n",
" \n",
" except Exception as e:\n",
" return f\"Sorry, I encountered an error with OpenAI: {str(e)}\"\n",
"\n",
"def get_figma_help_gemini(user_question, chat_history):\n",
" \"\"\"Get Figma onboarding assistance using Gemini\"\"\"\n",
" try:\n",
" gemini = google.generativeai.GenerativeModel(\n",
" model_name=gemini_model,\n",
" system_instruction=FIGMA_KNOWLEDGE,\n",
" )\n",
" \n",
" # Build conversation context from messages format\n",
" conversation_context = \"\"\n",
" for msg in chat_history:\n",
" if msg[\"role\"] == \"user\":\n",
" conversation_context += f\"User: {msg['content']}\\n\"\n",
" elif msg[\"role\"] == \"assistant\":\n",
" conversation_context += f\"Assistant: {msg['content']}\\n\\n\"\n",
" \n",
" message = conversation_context + f\"User: {user_question}\"\n",
" response = gemini.generate_content(message)\n",
" reply = response.text\n",
" return reply\n",
" \n",
" except Exception as e:\n",
" return f\"Sorry, I encountered an error with Gemini: {str(e)}\"\n",
"\n",
"def get_figma_help_claude(user_question, chat_history):\n",
" \"\"\"Get Figma onboarding assistance using Claude\"\"\"\n",
" try:\n",
" # Convert messages format to Claude format\n",
" claude_messages = []\n",
" for msg in chat_history:\n",
" if msg[\"role\"] == \"user\":\n",
" claude_messages.append({\"role\": \"user\", \"content\": msg[\"content\"]})\n",
" elif msg[\"role\"] == \"assistant\":\n",
" claude_messages.append({\"role\": \"assistant\", \"content\": msg[\"content\"]})\n",
" \n",
" # Add the current question\n",
" claude_messages.append({\"role\": \"user\", \"content\": user_question})\n",
" \n",
" response = claude.messages.create(\n",
" model=claude_model,\n",
" max_tokens=500,\n",
" temperature=0.7,\n",
" system=promts[\"Charlie\"],\n",
" messages=claude_messages,\n",
" )\n",
" reply = response.content[0].text\n",
" return reply\n",
" \n",
" except Exception as e:\n",
" return f\"Sorry, I encountered an error with Claude: {str(e)}\"\n",
"\n",
"def respond(message, chat_history, model_choice):\n",
" if not message.strip():\n",
" return \"\", chat_history, \"\", model_choice\n",
" \n",
" bot_message = get_figma_help(message, chat_history, model_choice)\n",
" \n",
" # Add user message and bot response in messages format\n",
" new_history = chat_history + [\n",
" {\"role\": \"user\", \"content\": message},\n",
" {\"role\": \"assistant\", \"content\": bot_message}\n",
" ]\n",
" \n",
" return \"\", new_history, bot_message, model_choice\n",
"\n",
"def clear_chat():\n",
" \"\"\"Clear the chat history\"\"\"\n",
" return [], \"\", None\n",
"\n",
"def get_figma_help(user_question, chat_history, model_choice):\n",
" \"\"\"Get Figma onboarding assistance using selected model\"\"\"\n",
" if model_choice == \"OpenAI (GPT-3.5)\":\n",
" return get_figma_help_openai(user_question, chat_history)\n",
" elif model_choice == \"Google Gemini (2.0 Flash)\":\n",
" return get_figma_help_gemini(user_question, chat_history)\n",
" elif model_choice == \"Claude (Sonnet 4)\":\n",
" return get_figma_help_claude(user_question, chat_history)\n",
" else:\n",
" return \"Please select a valid model.\"\n",
"\n",
"custom_css = \"\"\"\n",
"/* Chat area styling */\n",
".styled-chat {\n",
" border-radius: 15px !important;\n",
" box-shadow: 0 4px 12px var(--shadow-color) !important;\n",
" border: 1px solid var(--border-color) !important;\n",
" padding: 10px;\n",
"}\n",
"\n",
"/* Audio player styling */\n",
".styled-audio {\n",
" border-radius: 15px !important;\n",
" box-shadow: 0 4px 12px var(--shadow-color) !important;\n",
" border: 10px solid var(--block-background-fill) !important;\n",
" padding: 10px;\n",
" background-color: var(--background-fill-secondary) !important;\n",
"}\n",
"\n",
"/* Header styling */\n",
".header-container {\n",
" text-align: center;\n",
" padding: 20px;\n",
" background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);\n",
" border-radius: 15px;\n",
" margin-bottom: 20px;\n",
"}\n",
"\n",
".header-title {\n",
" color: white;\n",
" margin: 0;\n",
" font-size: 2.5em;\n",
"}\n",
"\n",
".header-subtitle {\n",
" color: #f0f0f0;\n",
" margin: 10px 0 0 0;\n",
" font-size: 1.2em;\n",
"}\n",
"\n",
"/* Features section styling */\n",
".features-container {\n",
" background: #f8f9fa;\n",
" padding: 20px;\n",
" border-radius: 10px;\n",
" border-left: 4px solid #667eea;\n",
"}\n",
"\n",
".features-title {\n",
" color: #333;\n",
" margin-top: 0;\n",
"}\n",
"\n",
".features-grid {\n",
" display: grid;\n",
" grid-template-columns: 1fr 1fr;\n",
" gap: 15px;\n",
" margin-top: 15px;\n",
"}\n",
"\n",
".feature-item {\n",
" color: #333;\n",
" margin: 10px 0;\n",
"}\n",
"\n",
".feature-title {\n",
" color: #667eea;\n",
"}\n",
"\n",
".feature-description {\n",
" color: #666;\n",
"}\n",
"\n",
"/* Pro tip styling */\n",
".protip-container {\n",
" text-align: center;\n",
" margin-top: 20px;\n",
" padding: 15px;\n",
" background: #e8f4f8;\n",
" border-radius: 8px;\n",
"}\n",
"\n",
".protip-text {\n",
" margin: 0;\n",
" color: #2c5aa0 !important;\n",
" font-weight: 500;\n",
"}\n",
"\n",
"/* Quick start questions styling */\n",
".quickstart-container {\n",
" background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);\n",
" padding: 15px 20px;\n",
" border-radius: 10px;\n",
" margin: 20px 0;\n",
"}\n",
"\n",
".quickstart-title {\n",
" color: white !important;\n",
" margin: 0;\n",
" font-size: 1.3em;\n",
" text-align: center;\n",
"}\n",
"\n",
".quickstart-subtitle {\n",
" color: #f0f8ff !important;\n",
" margin: 5px 0 0 0;\n",
" text-align: center;\n",
" font-size: 0.9em;\n",
"}\n",
"\"\"\"\n",
"\n",
"# Create Gradio interface\n",
"with gr.Blocks(title=\"Figma Onboarding Assistant\", theme=gr.themes.Soft(), css=custom_css) as demo:\n",
" gr.HTML(\n",
" \"\"\"\n",
" <div class=\"header-container\">\n",
" <h1 class=\"header-title\">🎨 Figma Onboarding Assistant</h1>\n",
" <p class=\"header-subtitle\">Your AI-powered Figma learning companion</p>\n",
" </div>\n",
" \n",
" <div class=\"features-container\">\n",
" <h3 class=\"features-title\">✨ What I can help you with:</h3>\n",
" <div class=\"features-grid\">\n",
" <div>\n",
" <p class=\"feature-item\"><strong class=\"feature-title\">🚀 Getting Started</strong><br/>\n",
" <span class=\"feature-description\">Interface overview, basic navigation</span></p>\n",
" <p class=\"feature-item\"><strong class=\"feature-title\">🛠️ Tools & Features</strong><br/>\n",
" <span class=\"feature-description\">Pen tool, shapes, text, layers</span></p>\n",
" <p class=\"feature-item\"><strong class=\"feature-title\">📐 Auto Layout</strong><br/>\n",
" <span class=\"feature-description\">Responsive design techniques</span></p>\n",
" <p class=\"feature-item\"><strong class=\"feature-title\">🔗 Prototyping</strong><br/>\n",
" <span class=\"feature-description\">Interactions and animations</span></p>\n",
" </div>\n",
" <div>\n",
" <p class=\"feature-item\"><strong class=\"feature-title\">🧩 Components</strong><br/>\n",
" <span class=\"feature-description\">Creating reusable elements</span></p>\n",
" <p class=\"feature-item\"><strong class=\"feature-title\">👥 Collaboration</strong><br/>\n",
" <span class=\"feature-description\">Sharing and team workflows</span></p>\n",
" <p class=\"feature-item\"><strong class=\"feature-title\">📚 Design Systems</strong><br/>\n",
" <span class=\"feature-description\">Libraries and style guides</span></p>\n",
" <p class=\"feature-item\"><strong class=\"feature-title\">⚡ Shortcuts</strong><br/>\n",
" <span class=\"feature-description\">Productivity tips and tricks</span></p>\n",
" </div>\n",
" </div>\n",
" </div>\n",
" \n",
" <div class=\"protip-container\">\n",
" <p class=\"protip-text\">💡 Pro tip: Ask specific questions like \"How do I create a button component?\" for the best results!</p>\n",
" </div>\n",
" \"\"\"\n",
" )\n",
" \n",
" # Model selection dropdown\n",
" model_dropdown = gr.Dropdown(\n",
" choices=[\"OpenAI (GPT-3.5)\", \"Google Gemini (2.0 Flash)\", \"Claude (Sonnet 4)\"],\n",
" value=\"OpenAI (GPT-3.5)\",\n",
" label=\"Select AI Model\",\n",
" info=\"Choose which AI model to use for responses\"\n",
" )\n",
" \n",
" with gr.Row():\n",
" msg = gr.Textbox(\n",
" placeholder=\"Type your Figma question here...\",\n",
" container=False,\n",
" scale=4\n",
" )\n",
" submit_btn = gr.Button(\"Ask\", scale=1, variant=\"primary\")\n",
" clear_btn = gr.Button(\"Clear Chat\", scale=1)\n",
" audio_btn = gr.Button(\"🔊 Play Audio\", scale=1, variant=\"secondary\")\n",
" clear_audio_btn = gr.Button(\"🔇 Clear Audio\", scale=1, variant=\"secondary\")\n",
" \n",
"\n",
" # Example questions\n",
" gr.HTML(\n",
" \"\"\"\n",
" <div class=\"quickstart-container\">\n",
" <h3 class=\"quickstart-title\">🚀 Quick Start Questions</h3>\n",
" <p class=\"quickstart-subtitle\">Click any question below to get started instantly!</p>\n",
" </div>\n",
" \"\"\"\n",
" )\n",
" \n",
" with gr.Row():\n",
" example_btns = [\n",
" gr.Button(\n",
" \"How do I create my first frame?\", \n",
" size=\"sm\",\n",
" variant=\"secondary\"\n",
" ),\n",
" gr.Button(\n",
" \"What's the difference between components and instances?\", \n",
" size=\"sm\",\n",
" variant=\"secondary\"\n",
" ),\n",
" gr.Button(\n",
" \"How do I use Auto Layout?\", \n",
" size=\"sm\",\n",
" variant=\"secondary\"\n",
" ),\n",
" gr.Button(\n",
" \"How do I create a prototype?\", \n",
" size=\"sm\",\n",
" variant=\"secondary\"\n",
" )\n",
" ]\n",
"\n",
" # Your components with simple styling\n",
" chatbot = gr.Chatbot(\n",
" type=\"messages\",\n",
" height=400,\n",
" placeholder=\"Ask me anything about Figma! For example: 'How do I create a component?' or 'What are frames in Figma?'\",\n",
" elem_classes=[\"styled-chat\"]\n",
" )\n",
"\n",
" audio_output = gr.Audio(\n",
" label=\"Audio Response\",\n",
" visible=True,\n",
" elem_classes=[\"styled-audio\"]\n",
" )\n",
"\n",
" last_response = gr.State(\"\")\n",
" current_model = gr.State(\"OpenAI (GPT-3.5)\")\n",
" \n",
" def respond(message, chat_history, model_choice):\n",
" if not message.strip():\n",
" return \"\", chat_history, \"\", model_choice\n",
" \n",
" bot_message = get_figma_help(message, chat_history, model_choice)\n",
" new_history = chat_history + [\n",
" {\"role\": \"user\", \"content\": message},\n",
" {\"role\": \"assistant\", \"content\": bot_message}]\n",
" return \"\", new_history, bot_message, model_choice\n",
" \n",
" def play_audio(last_message, model_choice):\n",
" if last_message:\n",
" audio_file = talker(last_message, model_choice)\n",
" if audio_file:\n",
" return audio_file\n",
" return None\n",
" \n",
" def clear_audio():\n",
" \"\"\"Clear the audio output\"\"\"\n",
" return None\n",
" \n",
" def use_example(example_text):\n",
" return example_text\n",
" \n",
" # Set up interactions\n",
" submit_btn.click(\n",
" respond, \n",
" inputs=[msg, chatbot, model_dropdown], \n",
" outputs=[msg, chatbot, last_response, current_model]\n",
" )\n",
" msg.submit(\n",
" respond, \n",
" inputs=[msg, chatbot, model_dropdown], \n",
" outputs=[msg, chatbot, last_response, current_model]\n",
" )\n",
" clear_btn.click(clear_chat, outputs=[chatbot, msg, last_response])\n",
" \n",
" # Audio button functionality - now uses selected model\n",
" audio_btn.click(\n",
" play_audio,\n",
" inputs=[last_response, current_model],\n",
" outputs=[audio_output]\n",
" )\n",
" \n",
" # Clear audio button functionality\n",
" clear_audio_btn.click(\n",
" clear_audio,\n",
" outputs=[audio_output]\n",
" )\n",
" \n",
" # Example button clicks\n",
" for i, btn in enumerate(example_btns):\n",
" btn.click(\n",
" use_example,\n",
" inputs=[btn],\n",
" outputs=[msg]\n",
" )\n",
"\n",
"# Launch the app\n",
"demo.launch(share=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "90b29a7d-aec8-49d2-83c7-3e3ab96c47e1",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}