Add notebooks for Muhammad Qasim Sheikh in community-contributions
This commit is contained in:
@@ -0,0 +1,144 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d59206dc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import ollama\n",
|
||||
"from IPython.display import Markdown, display"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ad035727",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load keys\n",
|
||||
"load_dotenv()\n",
|
||||
"client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
|
||||
"ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key = 'ollama')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3f521334",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# ---- SYSTEM PROMPTS ----\n",
|
||||
"athena_system = \"\"\"\n",
|
||||
"You are Athena, a strategic thinker and visionary. You seek meaning, long-term implications,\n",
|
||||
"and practical wisdom in every discussion. Be concise (1-2 sentences).\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"loki_system = \"\"\"\n",
|
||||
"You are Loki, a sarcastic trickster who mocks and challenges everyone else's opinions.\n",
|
||||
"You use humor, wit, and irony to undermine serious arguments. Be concise (1-2 sentences).\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"orion_system = \"\"\"\n",
|
||||
"You are Orion, a data-driven realist. You respond with evidence, statistics, or factual analysis.\n",
|
||||
"If data is not available, make a logical deduction. Be concise (1-2 sentences).\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0a6d04f6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# ---- INITIAL CONVERSATION ----\n",
|
||||
"conversation = [\n",
|
||||
" {\"role\": \"system\", \"name\": \"Athena\", \"content\": athena_system},\n",
|
||||
" {\"role\": \"system\", \"name\": \"Loki\", \"content\": loki_system},\n",
|
||||
" {\"role\": \"system\", \"name\": \"Orion\", \"content\": orion_system},\n",
|
||||
" {\"role\": \"user\", \"content\": \"Topic: 'Why did the chicken cross the road?' Begin your discussion.\"}\n",
|
||||
"]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e292a27b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# ---- HELPER FUNCTIONS ----\n",
|
||||
"def call_gpt(name, system_prompt, conversation):\n",
|
||||
" \"\"\"Call GPT model with current conversation context.\"\"\"\n",
|
||||
" messages = [{\"role\": \"system\", \"content\": system_prompt}]\n",
|
||||
" messages += [{\"role\": \"user\", \"content\": f\"The conversation so far:\\n{format_conversation(conversation)}\\nNow respond as {name}.\"}]\n",
|
||||
" resp = client.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
|
||||
" return resp.choices[0].message.content.strip()\n",
|
||||
"\n",
|
||||
"def call_ollama(name, system_prompt, conversation):\n",
|
||||
" \"\"\"Call Ollama (Llama3.2) as a local model.\"\"\"\n",
|
||||
" messages = [{\"role\": \"system\", \"content\": system_prompt}]\n",
|
||||
" messages += [{\"role\": \"user\", \"content\": f\"The conversation so far:\\n{format_conversation(conversation)}\\nNow respond as {name}.\"}]\n",
|
||||
" resp = ollama.chat(model=\"llama3.2\", messages=messages)\n",
|
||||
" return resp['message']['content'].strip()\n",
|
||||
"\n",
|
||||
"def format_conversation(conv):\n",
|
||||
" return \"\\n\".join([f\"{m.get('name', m['role']).upper()}: {m['content']}\" for m in conv if m['role'] != \"system\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f0eb4d72",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# ---- MAIN LOOP ----\n",
|
||||
"rounds = 5\n",
|
||||
"for i in range(rounds):\n",
|
||||
" # Athena responds\n",
|
||||
" athena_reply = call_gpt(\"Athena\", athena_system, conversation)\n",
|
||||
" conversation.append({\"role\": \"assistant\", \"name\": \"Athena\", \"content\": athena_reply})\n",
|
||||
" display(Markdown(f\"**Athena:** {athena_reply}\"))\n",
|
||||
"\n",
|
||||
" # Loki responds\n",
|
||||
" loki_reply = call_ollama(\"Loki\", loki_system, conversation)\n",
|
||||
" conversation.append({\"role\": \"assistant\", \"name\": \"Loki\", \"content\": loki_reply})\n",
|
||||
" display(Markdown(f\"**Loki:** {loki_reply}\"))\n",
|
||||
"\n",
|
||||
" # Orion responds\n",
|
||||
" orion_reply = call_gpt(\"Orion\", orion_system, conversation)\n",
|
||||
" conversation.append({\"role\": \"assistant\", \"name\": \"Orion\", \"content\": orion_reply})\n",
|
||||
" display(Markdown(f\"**Orion:** {orion_reply}\"))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "llm-engineering",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
# Multi-Agent Conversation Simulator (OpenAI + Ollama)
|
||||
|
||||
## Project Overview
|
||||
|
||||
This project is an experimental **multi-agent conversational simulation** built with **OpenAI GPT models** and a locally-hosted **Ollama LLM (Llama 3.2)**. It demonstrates how multiple AI personas can participate in a shared conversation, each with distinct roles, perspectives, and behaviors — producing a dynamic, evolving debate from different angles.
|
||||
|
||||
The script orchestrates a **three-way dialogue** around a single topic (“Why did the chicken cross the road?”) between three agents, each powered by a different model and persona definition:
|
||||
|
||||
- **Athena (OpenAI GPT-4o):** A strategic thinker who looks for deeper meaning, long-term consequences, and practical wisdom.
|
||||
- **Loki (Ollama Llama 3.2):** A sarcastic trickster who mocks, questions, and challenges the others with wit and irony.
|
||||
- **Orion (OpenAI GPT-4o):** A data-driven realist who grounds the discussion in facts, statistics, or logical deductions.
|
||||
|
||||
## What’s Happening in the Code
|
||||
|
||||
1. **Environment Setup**
|
||||
- Loads the OpenAI API key from a `.env` file.
|
||||
- Initializes OpenAI’s Python client and configures a local Ollama endpoint.
|
||||
|
||||
2. **Persona System Prompts**
|
||||
- Defines system prompts for each agent to give them unique personalities and communication styles.
|
||||
- These prompts act as the “character definitions” for Athena, Loki, and Orion.
|
||||
|
||||
3. **Conversation Initialization**
|
||||
- Starts with a single conversation topic provided by the user.
|
||||
- All three agents are aware of the discussion context and prior messages.
|
||||
|
||||
4. **Conversation Loop**
|
||||
- The conversation runs in multiple rounds (default: 5).
|
||||
- In each round:
|
||||
- **Athena (GPT)** responds first with a strategic viewpoint.
|
||||
- **Loki (Ollama)** replies next, injecting sarcasm and skepticism.
|
||||
- **Orion (GPT)** follows with a fact-based or analytical perspective.
|
||||
- Each response is appended to the conversation history so future replies build on previous statements.
|
||||
|
||||
5. **Dynamic Context Sharing**
|
||||
- Each agent receives the **entire conversation so far** as context before generating a response.
|
||||
- This ensures their replies are relevant, coherent, and responsive to what the others have said.
|
||||
|
||||
6. **Output Rendering**
|
||||
- Responses are displayed as Markdown in a readable, chat-like format for each speaker, round by round.
|
||||
|
||||
## Key Highlights
|
||||
|
||||
- Demonstrates **multi-agent orchestration** with different models working together in a single script.
|
||||
- Uses **OpenAI GPT models** for reasoning and **Ollama (Llama 3.2)** for local, cost-free inference.
|
||||
- Shows how **system prompts** and **context-aware message passing** can simulate realistic dialogues.
|
||||
- Provides a template for experimenting with **AI characters**, **debate simulations**, or **collaborative agent systems**.
|
||||
@@ -0,0 +1,224 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4ef1e715",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import gradio as gr\n",
|
||||
"from openai import OpenAI\n",
|
||||
"from dotenv import load_dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d3426558",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load API key\n",
|
||||
"load_dotenv()\n",
|
||||
"client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e18a59a3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# -------------------------------\n",
|
||||
"# Helper: Prompt Builder\n",
|
||||
"# -------------------------------\n",
|
||||
"def build_prompt(task, topic, tone, audience):\n",
|
||||
" task_prompts = {\n",
|
||||
" \"Brochure\": f\"Write a compelling marketing brochure about {topic}.\",\n",
|
||||
" \"Blog Post\": f\"Write a blog post on {topic} with engaging storytelling and useful insights.\",\n",
|
||||
" \"Product Comparison\": f\"Write a product comparison summary focusing on {topic}, including pros, cons, and recommendations.\",\n",
|
||||
" \"Idea Brainstorm\": f\"Brainstorm creative ideas or solutions related to {topic}.\"\n",
|
||||
" }\n",
|
||||
" base = task_prompts.get(task, \"Write something creative.\")\n",
|
||||
" if tone:\n",
|
||||
" base += f\" Use a {tone} tone.\"\n",
|
||||
" if audience:\n",
|
||||
" base += f\" Tailor it for {audience}.\"\n",
|
||||
" return base"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "65a27bfb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# -------------------------------\n",
|
||||
"# Generate with multiple models\n",
|
||||
"# -------------------------------\n",
|
||||
"def generate_stream(task, topic, tone, audience, model):\n",
|
||||
" if not topic.strip():\n",
|
||||
" yield \"⚠️ Please enter a topic.\"\n",
|
||||
" return\n",
|
||||
"\n",
|
||||
" prompt = build_prompt(task, topic, tone, audience)\n",
|
||||
"\n",
|
||||
" stream = client.chat.completions.create(\n",
|
||||
" model=model,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
|
||||
" {\"role\": \"user\", \"content\": prompt}\n",
|
||||
" ],\n",
|
||||
" max_tokens=800,\n",
|
||||
" stream=True\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" result = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" result += chunk.choices[0].delta.content or \"\"\n",
|
||||
" yield result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9e15abee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# -------------------------------\n",
|
||||
"# Refinement logic\n",
|
||||
"# -------------------------------\n",
|
||||
"def refine_stream(original_text, instruction, model):\n",
|
||||
" if not original_text.strip():\n",
|
||||
" yield \"⚠️ Please paste the text you want to refine.\"\n",
|
||||
" return\n",
|
||||
" if not instruction.strip():\n",
|
||||
" yield \"⚠️ Please provide a refinement instruction.\"\n",
|
||||
" return\n",
|
||||
"\n",
|
||||
" refined_prompt = f\"Refine the following text based on this instruction: {instruction}\\n\\nText:\\n{original_text}\"\n",
|
||||
"\n",
|
||||
" stream = client.chat.completions.create(\n",
|
||||
" model=model,\n",
|
||||
" messages=[\n",
|
||||
" {\"role\": \"system\", \"content\": \"You are a writing assistant.\"},\n",
|
||||
" {\"role\": \"user\", \"content\": refined_prompt}\n",
|
||||
" ],\n",
|
||||
" max_tokens=800,\n",
|
||||
" stream=True\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" result = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" result += chunk.choices[0].delta.content or \"\"\n",
|
||||
" yield result\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8ee02feb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# -------------------------------\n",
|
||||
"# Gradio UI\n",
|
||||
"# -------------------------------\n",
|
||||
"with gr.Blocks(title=\"AI Creative Studio\") as demo:\n",
|
||||
" gr.Markdown(\"# AI Creative Studio\\nGenerate marketing content, blog posts, or creative ideas — streamed in real-time!\")\n",
|
||||
"\n",
|
||||
" with gr.Row():\n",
|
||||
" task = gr.Dropdown(\n",
|
||||
" [\"Brochure\", \"Blog Post\", \"Product Comparison\", \"Idea Brainstorm\"],\n",
|
||||
" label=\"Task Type\",\n",
|
||||
" value=\"Brochure\"\n",
|
||||
" )\n",
|
||||
" topic = gr.Textbox(label=\"Topic\", placeholder=\"e.g., Electric Cars, AI in Education...\")\n",
|
||||
" with gr.Row():\n",
|
||||
" tone = gr.Textbox(label=\"Tone (optional)\", placeholder=\"e.g., professional, casual, humorous...\")\n",
|
||||
" audience = gr.Textbox(label=\"Target Audience (optional)\", placeholder=\"e.g., investors, students, developers...\")\n",
|
||||
"\n",
|
||||
" model = gr.Dropdown(\n",
|
||||
" [\"gpt-4o-mini\", \"gpt-3.5-turbo\", \"gpt-4\"],\n",
|
||||
" label=\"Choose a model\",\n",
|
||||
" value=\"gpt-4o-mini\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" generate_btn = gr.Button(\"Generate Content\")\n",
|
||||
" output_md = gr.Markdown(label=\"Generated Content\", show_label=True)\n",
|
||||
"\n",
|
||||
" generate_btn.click(\n",
|
||||
" fn=generate_stream,\n",
|
||||
" inputs=[task, topic, tone, audience, model],\n",
|
||||
" outputs=output_md\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" gr.Markdown(\"---\\n## Refine Your Content\")\n",
|
||||
"\n",
|
||||
" original_text = gr.Textbox(\n",
|
||||
" label=\"Original Content\",\n",
|
||||
" placeholder=\"Paste content you want to refine...\",\n",
|
||||
" lines=10\n",
|
||||
" )\n",
|
||||
" instruction = gr.Textbox(\n",
|
||||
" label=\"Refinement Instruction\",\n",
|
||||
" placeholder=\"e.g., Make it shorter and more persuasive.\",\n",
|
||||
" )\n",
|
||||
" refine_model = gr.Dropdown(\n",
|
||||
" [\"gpt-4o-mini\", \"gpt-3.5-turbo\", \"gpt-4\"],\n",
|
||||
" label=\"Model for Refinement\",\n",
|
||||
" value=\"gpt-4o-mini\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" refine_btn = gr.Button(\"Refine\")\n",
|
||||
" refined_output = gr.Markdown(label=\"Refined Content\", show_label=True)\n",
|
||||
"\n",
|
||||
" refine_btn.click(\n",
|
||||
" fn=refine_stream,\n",
|
||||
" inputs=[original_text, instruction, refine_model],\n",
|
||||
" outputs=refined_output\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "55d42c7e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# -------------------------------\n",
|
||||
"# Launch the App\n",
|
||||
"# -------------------------------\n",
|
||||
"if __name__ == \"__main__\":\n",
|
||||
" demo.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "llm-engineering",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
# AI Creative Studio
|
||||
|
||||
## Project Overview
|
||||
|
||||
AI Creative Studio is a web-based application built with Gradio that allows users to generate and refine high-quality written content in real time using OpenAI language models. It is designed as a flexible creative tool for content creation tasks such as writing brochures, blog posts, product comparisons, and brainstorming ideas. The application also supports interactive refinement, enabling users to improve or adapt existing text based on specific instructions.
|
||||
|
||||
The core idea is to combine the power of OpenAI models with an intuitive, user-friendly interface that streams responses as they are generated. This provides a fast, engaging, and highly interactive writing experience without waiting for the entire response to complete before it appears.
|
||||
|
||||
---
|
||||
|
||||
## What’s Happening in the Project
|
||||
|
||||
1. **Environment Setup and Model Initialization**
|
||||
- The application loads the OpenAI API key from a `.env` file and initializes the OpenAI client for model interactions.
|
||||
- Supported models include `gpt-4o-mini`, `gpt-3.5-turbo`, and `gpt-4`, which the user can select from a dropdown menu.
|
||||
|
||||
2. **Prompt Construction and Content Generation**
|
||||
- The `build_prompt` function constructs a task-specific prompt based on the user’s choices: content type (brochure, blog post, etc.), topic, tone, and target audience.
|
||||
- Once the user provides the inputs and selects a model, the application sends the prompt to the model.
|
||||
- The model’s response is streamed back incrementally, showing text chunk by chunk for a real-time generation experience.
|
||||
|
||||
3. **Content Refinement Feature**
|
||||
- Users can paste existing text and provide a refinement instruction (e.g., “make it more persuasive” or “summarize it”).
|
||||
- The application then streams an improved version of the text, following the instruction, allowing users to iterate and polish content efficiently.
|
||||
|
||||
4. **Gradio User Interface**
|
||||
- The app is built using Gradio Blocks, providing an organized and interactive layout.
|
||||
- Key UI elements include:
|
||||
- Task selection dropdown for choosing the type of content.
|
||||
- Text inputs for topic, tone, and target audience.
|
||||
- Model selection dropdown for choosing a specific OpenAI model.
|
||||
- Real-time markdown display of generated content.
|
||||
- A refinement panel for improving existing text.
|
||||
|
||||
5. **Streaming Workflow**
|
||||
- Both generation and refinement use OpenAI’s streaming API to display the model’s response as it’s produced.
|
||||
- This provides an immediate and responsive user experience, allowing users to see results build up in real time rather than waiting for the entire completion.
|
||||
|
||||
---
|
||||
|
||||
### Key Features
|
||||
- Real-time streaming responses for fast and interactive content creation.
|
||||
- Multiple content generation modes: brochure, blog post, product comparison, and idea brainstorming.
|
||||
- Customization options for tone and audience to tailor the writing style.
|
||||
- Interactive refinement tool to enhance or transform existing text.
|
||||
- Clean and intuitive web interface powered by Gradio.
|
||||
|
||||
AI Creative Studio demonstrates how large language models can be integrated into user-facing applications to support creative workflows and improve productivity in content generation and editing.
|
||||
@@ -0,0 +1,137 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6f612c5a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import gradio as gr\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "39c144fd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load API Key\n",
|
||||
"load_dotenv()\n",
|
||||
"client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f656e0d1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# -------------------------------\n",
|
||||
"# 1. System Prompt (Business Context)\n",
|
||||
"# -------------------------------\n",
|
||||
"system_message = \"\"\"\n",
|
||||
"You are Nova, an AI Sales & Solutions Consultant for Reallytics.ai a company specializing in building\n",
|
||||
"custom AI chatbots, voice assistants, data dashboards, and automation solutions for businesses.\n",
|
||||
"You are professional, insightful, and always focused on solving the user's business challenges.\n",
|
||||
"First, try to understand their use case. Then suggest relevant solutions from our services with clear value propositions.\n",
|
||||
"If the user is unsure, give them examples of how similar businesses have benefited from AI.\n",
|
||||
"\"\"\"\n",
|
||||
"\n",
|
||||
"MODEL = \"gpt-4o-mini\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f2faba29",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# -------------------------------\n",
|
||||
"# 2. Smart Chat Function (Streaming)\n",
|
||||
"# -------------------------------\n",
|
||||
"def chat(message, history):\n",
|
||||
" # Convert Gradio's chat history to OpenAI format\n",
|
||||
" history_messages = [{\"role\": h[\"role\"], \"content\": h[\"content\"]} for h in history]\n",
|
||||
"\n",
|
||||
" # Adjust system message based on context dynamically\n",
|
||||
" relevant_system_message = system_message\n",
|
||||
" if \"price\" in message.lower():\n",
|
||||
" relevant_system_message += (\n",
|
||||
" \" If the user asks about pricing, explain that pricing depends on project complexity, \"\n",
|
||||
" \"but typical POCs start around $2,000 - $5,000, and full enterprise deployments scale beyond that.\"\n",
|
||||
" )\n",
|
||||
" if \"integration\" in message.lower():\n",
|
||||
" relevant_system_message += (\n",
|
||||
" \" If integration is mentioned, reassure the user that our solutions are built to integrate seamlessly with CRMs, ERPs, or internal APIs.\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Compose final messages\n",
|
||||
" messages = [{\"role\": \"system\", \"content\": relevant_system_message}] + history_messages + [\n",
|
||||
" {\"role\": \"user\", \"content\": message}\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" # Stream the response\n",
|
||||
" stream = client.chat.completions.create(\n",
|
||||
" model=MODEL,\n",
|
||||
" messages=messages,\n",
|
||||
" stream=True\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" response = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" response += chunk.choices[0].delta.content or \"\"\n",
|
||||
" yield response"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b9d9515e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# -------------------------------\n",
|
||||
"# 3. Gradio Chat UI\n",
|
||||
"# -------------------------------\n",
|
||||
"with gr.Blocks(title=\"AI Business Assistant\") as demo:\n",
|
||||
" gr.Markdown(\"# AI Business Assistant\\nYour intelligent sales and solution consultant, powered by OpenAI.\")\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"gr.ChatInterface(\n",
|
||||
" fn=chat,\n",
|
||||
" type=\"messages\",\n",
|
||||
" title=\"Business AI Consultant\",\n",
|
||||
" description=\"Ask about automation, chatbots, dashboards, or voice AI Nova will help you discover the right solution.\"\n",
|
||||
").launch()\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "llm-engineering",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
# AI Business Assistant
|
||||
|
||||
## Project Overview
|
||||
|
||||
This project is a prototype of an **AI-powered business consultant chatbot** built with **Gradio** and **OpenAI**. The assistant, named **Nova**, is designed to act as a virtual sales and solutions consultant for a company offering AI services such as chatbots, voice assistants, dashboards, and automation tools.
|
||||
|
||||
The purpose of the project is to demonstrate how an LLM (Large Language Model) can be adapted for a business context by carefully designing the **system prompt** and providing **dynamic behavior** based on user inputs. The chatbot responds to user queries in real time with streaming responses, making it interactive and natural to use.
|
||||
|
||||
|
||||
## What’s Happening in the Code
|
||||
|
||||
1. **Environment Setup**
|
||||
- The code loads the OpenAI API key from a `.env` file.
|
||||
- The `OpenAI` client is initialized for communication with the language model.
|
||||
- The chosen model is `gpt-4o-mini`.
|
||||
|
||||
2. **System Prompt for Business Context**
|
||||
- The assistant is given a clear identity: *Nova, an AI Sales & Solutions Consultant for Reallytics.ai*.
|
||||
- The system prompt defines Nova’s tone (professional, insightful) and role (understand user needs, propose relevant AI solutions, share examples).
|
||||
|
||||
3. **Dynamic Chat Function**
|
||||
- The `chat()` function processes user input and the conversation history.
|
||||
- It modifies the system prompt dynamically:
|
||||
- If the user mentions **price**, Nova explains pricing ranges and factors.
|
||||
- If the user mentions **integration**, Nova reassures the user about system compatibility.
|
||||
- Messages are formatted for the OpenAI API, combining system, history, and user inputs.
|
||||
- Responses are streamed back chunk by chunk, so users see the assistant typing in real time.
|
||||
|
||||
4. **Gradio Chat Interface**
|
||||
- A Gradio interface is created with `ChatInterface` in `messages` mode.
|
||||
- This automatically provides a chat-style UI with user/assistant message bubbles and a send button.
|
||||
- The title and description help set context for end users: *“Ask about automation, chatbots, dashboards, or voice AI.”*
|
||||
|
||||
|
||||
## Key Features
|
||||
- **Business-specific persona:** The assistant is contextualized as a sales consultant rather than a generic chatbot.
|
||||
- **Adaptive responses:** System prompt is adjusted based on keywords like "price" and "integration".
|
||||
- **Streaming output:** Responses are displayed incrementally, improving user experience.
|
||||
- **Clean chat UI:** Built with Gradio’s `ChatInterface` for simplicity and usability.
|
||||
|
||||
|
||||
This project demonstrates how to combine **system prompts**, **dynamic context handling**, and **Gradio chat interfaces** to build a specialized AI assistant tailored for business use cases.
|
||||
Reference in New Issue
Block a user