Merge pull request #84 from l-jacques/ljacques-w2d2
Exercise week2 day 2: offer multi llms dropdown and reusable AISystem.py with stream option
This commit is contained in:
81
week2/community-contributions/AISystem.py
Normal file
81
week2/community-contributions/AISystem.py
Normal file
@@ -0,0 +1,81 @@
|
||||
|
||||
from enum import Enum, auto
|
||||
from openai import OpenAI
|
||||
import anthropic
|
||||
|
||||
def formatPrompt(role, content):
|
||||
return {"role": role, "content": content}
|
||||
|
||||
class AI(Enum):
|
||||
OPEN_AI = "OPEN_AI"
|
||||
CLAUDE = "CLAUDE"
|
||||
GEMINI = "GEMINI"
|
||||
OLLAMA = "OLLAMA"
|
||||
|
||||
class AISystem:
|
||||
def __init__(self, processor, system_string="", model="", type=AI.OPEN_AI):
|
||||
"""
|
||||
Initialize the ChatSystem with a system string and empty messages list.
|
||||
|
||||
:param system_string: Optional initial system string description
|
||||
"""
|
||||
self.processor = processor
|
||||
self.system = system_string
|
||||
self.model = model
|
||||
self.messages = []
|
||||
self.type = type
|
||||
|
||||
def call(self, message):
|
||||
self.messages.append(message)
|
||||
toSend = self.messages
|
||||
|
||||
if self.type == AI.CLAUDE:
|
||||
message = self.processor.messages.create(
|
||||
model=self.model,
|
||||
system=self.system,
|
||||
messages=self.messages,
|
||||
max_tokens=500
|
||||
)
|
||||
return message.content[0].text
|
||||
else:
|
||||
toSend.insert(0,self.system)
|
||||
completion = self.processor.chat.completions.create(
|
||||
model=self.model,
|
||||
messages= toSend
|
||||
)
|
||||
return completion.choices[0].message.content
|
||||
|
||||
def stream(self, message, usingGradio=False):
|
||||
self.messages.append(message)
|
||||
|
||||
if self.type == AI.CLAUDE:
|
||||
result = self.processor.messages.stream(
|
||||
model=self.model,
|
||||
system=self.system,
|
||||
messages=self.messages,
|
||||
temperature=0.7,
|
||||
max_tokens=500
|
||||
)
|
||||
response_chunks = ""
|
||||
with result as stream:
|
||||
for text in stream.text_stream:
|
||||
if usingGradio:
|
||||
response_chunks += text or ""
|
||||
yield response_chunks
|
||||
else:
|
||||
yield text
|
||||
else:
|
||||
toSend = self.messages
|
||||
toSend.insert(0,self.system)
|
||||
stream = self.processor.chat.completions.create(
|
||||
model=self.model,
|
||||
messages= toSend,
|
||||
stream=True
|
||||
)
|
||||
response_chunks = ""
|
||||
for chunk in stream:
|
||||
if usingGradio:
|
||||
response_chunks += chunk.choices[0].delta.content or "" # need to yield the total cumulative results to gradio and not chunk by chunk
|
||||
yield response_chunks
|
||||
else:
|
||||
yield chunk.choices[0].delta.content
|
||||
@@ -0,0 +1,202 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a473d607-073d-4963-bdc4-aba654523681",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Day 2 Exercise\n",
|
||||
"building upon the day1 exercise to offer a multi models via dropdown.\n",
|
||||
"externalized the common methods into a AISystem.py file to be reused down the line"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f761729f-3bd5-4dd7-9e63-cbe6b4368a66",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load env, check for api keys and load up the connections"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "fedb3d94-d096-43fd-8a76-9fdbc2d0d78e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"OpenAI API Key exists and begins sk-proj-\n",
|
||||
"Anthropic API Key exists and begins sk-ant-\n",
|
||||
"Google API Key exists and begins AIzaSyC-\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from enum import Enum, auto\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import anthropic\n",
|
||||
"from AISystem import formatPrompt, AI, AISystem\n",
|
||||
"import gradio as gr # oh yeah!\n",
|
||||
"\n",
|
||||
"# Load environment variables in a file called .env\n",
|
||||
"# Print the key prefixes to help with any debugging\n",
|
||||
"\n",
|
||||
"load_dotenv()\n",
|
||||
"openai_api_key = os.getenv('OPENAI_API_KEY')\n",
|
||||
"anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')\n",
|
||||
"google_api_key = os.getenv('GOOGLE_API_KEY')\n",
|
||||
"\n",
|
||||
"if openai_api_key:\n",
|
||||
" print(f\"OpenAI API Key exists and begins {openai_api_key[:8]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"OpenAI API Key not set\")\n",
|
||||
" \n",
|
||||
"if anthropic_api_key:\n",
|
||||
" print(f\"Anthropic API Key exists and begins {anthropic_api_key[:7]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Anthropic API Key not set\")\n",
|
||||
"\n",
|
||||
"if google_api_key:\n",
|
||||
" print(f\"Google API Key exists and begins {google_api_key[:8]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Google API Key not set\")\n",
|
||||
"\n",
|
||||
"openai = OpenAI()\n",
|
||||
"\n",
|
||||
"claude = anthropic.Anthropic()\n",
|
||||
"\n",
|
||||
"gemini_via_openai_client = OpenAI(\n",
|
||||
" api_key=google_api_key, \n",
|
||||
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\"\n",
|
||||
")\n",
|
||||
"ollama_via_openai = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
|
||||
"openai_model = \"gpt-4o-mini\"\n",
|
||||
"claude_model = \"claude-3-haiku-20240307\"\n",
|
||||
"gemini_model = \"gemini-1.5-flash\"\n",
|
||||
"ollama_model = \"llama3.2\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "17f7987b-2bdf-434a-8fce-6c367f148dde",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the systems for each llms"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "f92eef29-325e-418c-a444-879d83d5fbc9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"geminiSys = AISystem(gemini_via_openai_client,\n",
|
||||
" formatPrompt(\"system\",\"You are a chatbot. you always try to make conversation and get more in depth\"), \n",
|
||||
" gemini_model,\n",
|
||||
" AI.GEMINI)\n",
|
||||
"\n",
|
||||
"openAiSys = AISystem(openai,\n",
|
||||
" formatPrompt(\"system\",\"You are a chatbot. you always try to make conversation and get more in depth\"), \n",
|
||||
" openai_model,\n",
|
||||
" AI.OPEN_AI)\n",
|
||||
"\n",
|
||||
"claudeSys = AISystem(claude,\n",
|
||||
" \"You are a chatbot. you always try to make conversation and get more in depth\", \n",
|
||||
" claude_model,\n",
|
||||
" AI.CLAUDE)\n",
|
||||
"\n",
|
||||
"ollamaSys = AISystem(ollama_via_openai,\n",
|
||||
" formatPrompt(\"system\",\"You are a chatbot. you always try to make conversation and get more in depth\"), \n",
|
||||
" ollama_model,\n",
|
||||
" AI.OLLAMA)\n",
|
||||
"sys_dict = { AI.GEMINI: geminiSys, AI.OPEN_AI: openAiSys, AI.CLAUDE: claudeSys, AI.OLLAMA: ollamaSys}\n",
|
||||
"\n",
|
||||
"def stream_model(prompt, model):\n",
|
||||
" aiSystem = sys_dict.get(AI[model.upper()])\n",
|
||||
" yield from aiSystem.stream(formatPrompt(\"user\",prompt), True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f8ecd283-92b2-454d-b1ae-8016d41e3026",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create the gradio interface linking with the AI enum for the dropdown"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "9db8ed67-280a-400d-8543-4ab95863ce51",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"* Running on local URL: http://127.0.0.1:7873\n",
|
||||
"\n",
|
||||
"To create a public link, set `share=True` in `launch()`.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div><iframe src=\"http://127.0.0.1:7873/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.HTML object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": []
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"view = gr.Interface(\n",
|
||||
" fn=stream_model,\n",
|
||||
" inputs=[gr.Textbox(label=\"Your prompt:\", lines=6) , gr.Dropdown(choices=[ai.value for ai in AI], label=\"Select model\")],\n",
|
||||
" outputs=[gr.Markdown(label=\"Response:\")],\n",
|
||||
" flagging_mode=\"never\"\n",
|
||||
")\n",
|
||||
"view.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.11"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Reference in New Issue
Block a user