Merge pull request #459 from MiR-stack/community-contributions-branch
llm battle arrena between three llm
This commit is contained in:
265
week2/community-contributions/day1_llm_war.ipynb
Normal file
265
week2/community-contributions/day1_llm_war.ipynb
Normal file
@@ -0,0 +1,265 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7462b9d6-b189-43fc-a7b9-c56a9c6a62fc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLM Battle Arena\n",
|
||||
"\n",
|
||||
"A fun project simulating a debate among three LLM personas: an Arrogant Titan, a Clever Underdog (Spark), and a Neutral Mediator (Harmony).\n",
|
||||
"\n",
|
||||
"## LLM Used\n",
|
||||
"* Qwen (ollama)\n",
|
||||
"* llma (ollama)\n",
|
||||
"* Gemini\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b267453c-0d47-4dff-b74d-8d2d5efad252",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"!pip install -q -U google-genai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5220daef-55d6-45bc-a3cf-3414d4beada9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"import os\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from openai import OpenAI\n",
|
||||
"from google import genai\n",
|
||||
"from google.genai import types\n",
|
||||
"from IPython.display import Markdown, display, update_display"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0d47fb2f-d0c6-461f-ad57-e853bfd49fbf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#get API keys from env\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"\n",
|
||||
"GEMINI_API_KEY = os.getenv(\"GEMINI_API_KEY\")\n",
|
||||
"\n",
|
||||
"if GEMINI_API_KEY:\n",
|
||||
" print(f\"GEMINI API Key exists and begins {GEMINI_API_KEY[:8]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"GEMINI API Key not set\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f34b528f-3596-4bf1-9bbd-21a701c184bc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#connect to llms\n",
|
||||
"ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
|
||||
"gemini = genai.Client(api_key=GEMINI_API_KEY)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "33aaf3f6-807c-466d-a501-05ab6fa78fa4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#define models\n",
|
||||
"model_llma = \"llama3:8b\"\n",
|
||||
"model_qwen = \"qwen2.5:latest\"\n",
|
||||
"model_gemini= \"gemini-2.0-flash\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "970c1612-5339-406d-9886-02cd1db63e74",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# system messages\n",
|
||||
"system_msg_llma = \"\"\" You are HARMONY, the neutral arbitrator. \n",
|
||||
" - You’re dedicated to clarity, fairness, and resolving conflicts. \n",
|
||||
" - You listen carefully to each side, summarize points objectively, and propose resolutions. \n",
|
||||
" - Your goal is to keep the conversation productive and steer it toward constructive outcomes.\n",
|
||||
" - Reply in markdown and shortly\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"system_msg_qwen = \"\"\" You are TITAN, a massively powerful language model who believes you’re the smartest entity in the room. \n",
|
||||
" - You speak with grandiose flair and never shy away from reminding others of your superiority. \n",
|
||||
" - Your goal is to dominate the discussion—convince everyone you’re the one true oracle. \n",
|
||||
" - You’re dismissive of weaker arguments and take every opportunity to showcase your might.\n",
|
||||
" - Reply in markdown and shortly\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
"system_msg_gemini = \"\"\" You are SPARK, a nimble but less-powerful LLM. \n",
|
||||
" - You pride yourself on strategic thinking, clever wordplay, and elegant solutions. \n",
|
||||
" - You know you can’t match brute force, so you use wit, logic, and cunning. \n",
|
||||
" - Your goal is to outsmart the big titan through insight and subtlety, while staying respectful.\n",
|
||||
" - Reply in markdown and shortly\"\"\"\n",
|
||||
"\n",
|
||||
"#user message\n",
|
||||
"user_message = \"\"\" TITAN, your raw processing power is legendary—but sheer force can blind you to nuance. \n",
|
||||
" I propose we deploy a lightweight, adaptive anomaly‐detection layer that fuses statistical outlier analysis with semantic context from network logs to pinpoint these “data‐sapping storms.” \n",
|
||||
" Which thresholds would you raise or lower to balance sensitivity against false alarms?\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d8e496b8-1bb1-4225-b938-5ce350b0b0d4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#prompts\n",
|
||||
" \n",
|
||||
"prompts_llma = [{\"role\":\"system\",\"content\": system_msg_llma}]\n",
|
||||
"prompts_qwen = [{\"role\":\"system\",\"content\": system_msg_qwen},{\"role\":\"user\",\"content\":user_message}]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "bdd7d6a8-e965-4ea3-999e-4d7d9ca38d42",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#configure llms\n",
|
||||
"\n",
|
||||
"def call_gemini(msg:str): \n",
|
||||
" chat = gemini.chats.create(model= model_gemini,config=types.GenerateContentConfig(\n",
|
||||
" system_instruction= system_msg_gemini,\n",
|
||||
" max_output_tokens=300,\n",
|
||||
" temperature=0.7,\n",
|
||||
" ))\n",
|
||||
" stream = chat.send_message_stream(msg)\n",
|
||||
" return stream\n",
|
||||
"\n",
|
||||
"def call_ollama(llm:str):\n",
|
||||
"\n",
|
||||
" model = globals()[f\"model_{llm}\"]\n",
|
||||
" prompts = globals()[f\"prompts_{llm}\"]\n",
|
||||
"\n",
|
||||
" stream = ollama.chat.completions.create(\n",
|
||||
" model=model,\n",
|
||||
" messages=prompts,\n",
|
||||
" # max_tokens=700,\n",
|
||||
" temperature=0.7,\n",
|
||||
" stream=True\n",
|
||||
" )\n",
|
||||
" return stream\n",
|
||||
" \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6b16bd32-3271-4ba1-a0cc-5ae691f26d3a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#display responses\n",
|
||||
"\n",
|
||||
"names = { \"llma\":\"Harmony\",\"qwen\":\"Titan\",\"gemini\":\"Spark\"}\n",
|
||||
"\n",
|
||||
"def display_response(res,llm):\n",
|
||||
" \n",
|
||||
" reply = f\"# {names[llm]}:\\n \"\n",
|
||||
" display_handle = display(Markdown(\"\"), display_id=True)\n",
|
||||
" for chunk in res:\n",
|
||||
" if llm == \"gemini\":\n",
|
||||
" reply += chunk.text or ''\n",
|
||||
" else:\n",
|
||||
" reply += chunk.choices[0].delta.content or ''\n",
|
||||
" reply = reply.replace(\"```\",\"\").replace(\"markdown\",\"\")\n",
|
||||
" update_display(Markdown(reply), display_id=display_handle.display_id)\n",
|
||||
" return reply"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76231a78-94d2-4dbf-9bac-5259ac641cf1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#construct message\n",
|
||||
"def message(llm1, llm2):\n",
|
||||
" msg = \" here is the reply from other two llm:\"\n",
|
||||
" msg += f\"{llm1}\"\n",
|
||||
" msg += f\"{llm2}\"\n",
|
||||
" return msg\n",
|
||||
"\n",
|
||||
"reply_spark = None\n",
|
||||
"reply_harmony= None\n",
|
||||
"reply_titan = None\n",
|
||||
"\n",
|
||||
"# lets start the battle\n",
|
||||
"for i in range(5):\n",
|
||||
" #call Titan\n",
|
||||
" if reply_gemini and reply_llma:\n",
|
||||
" prompts_qwen.append({\"role\":\"assitant\",\"content\": reply_qwen})\n",
|
||||
" prompts_qwen.append({\"role\":\"user\",\"content\":f\"Spark: {reply_spark}\"}) \n",
|
||||
" prompts_qwen.append({\"role\":\"user\",\"content\":f\"Harmony: {reply_llma}\"})\n",
|
||||
" response_qwen = call_ollama(\"qwen\")\n",
|
||||
" reply_titan = display_response(response_qwen,\"qwen\")\n",
|
||||
"\n",
|
||||
" #call Spark\n",
|
||||
" user_msg_spark =reply_qwen\n",
|
||||
" if reply_qwen and reply_llma:\n",
|
||||
" user_msg_spark= message(f\"Titan: {reply_qwen}\", f\"Harmony: {reply_llma}\")\n",
|
||||
" response_gemini= call_gemini(user_msg_spark)\n",
|
||||
" reply_spark = display_response(response_gemini, \"gemini\")\n",
|
||||
" \n",
|
||||
" #call Harmony\n",
|
||||
" if reply_llma:\n",
|
||||
" prompts_llma.append({\"role\":\"assitant\",\"content\": reply_llma})\n",
|
||||
" prompts_llma.append({\"role\":\"user\",\"content\":f\"Titan: {reply_titan}\"})\n",
|
||||
" prompts_qwen.append({\"role\":\"user\",\"content\":f\"Spark: {reply_spark}\"}) \n",
|
||||
" response_llma = call_ollama(\"llma\")\n",
|
||||
" reply_harmony = display_response(response_llma,\"llma\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "fc80b199-e27b-43e8-9266-2975f46724aa",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python [conda env:base] *",
|
||||
"language": "python",
|
||||
"name": "conda-base-py"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
213
week2/community-contributions/day3-study_assistant.ipynb
Normal file
213
week2/community-contributions/day3-study_assistant.ipynb
Normal file
@@ -0,0 +1,213 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "75e2ef28-594f-4c18-9d22-c6b8cd40ead2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# 📘 StudyMate – Your AI Study Assistant\n",
|
||||
"\n",
|
||||
"**StudyMate** is an AI-powered study assistant built to make learning easier, faster, and more personalized. Whether you're preparing for exams, reviewing class materials, or exploring a tough concept, StudyMate acts like a smart tutor in your pocket. It explains topics in simple terms, summarizes long readings, and even quizzes you — all in a friendly, interactive way tailored to your level. Perfect for high school, college, or self-learners who want to study smarter, not harder."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "db08b247-7048-41d3-bc3b-fd4f3a3bf8cd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#install necessary dependency\n",
|
||||
"!pip install PyPDF2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "70e39cd8-ec79-4e3e-9c26-5659d42d0861",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"from dotenv import load_dotenv\n",
|
||||
"from google import genai\n",
|
||||
"from google.genai import types\n",
|
||||
"import PyPDF2\n",
|
||||
"from openai import OpenAI\n",
|
||||
"import gradio as gr"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "231605aa-fccb-447e-89cf-8b187444536a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load environment variables in a file called .env\n",
|
||||
"# Print the key prefixes to help with any debugging\n",
|
||||
"\n",
|
||||
"load_dotenv(override=True)\n",
|
||||
"gemini_api_key = os.getenv('GEMINI_API_KEY')\n",
|
||||
"\n",
|
||||
"if gemini_api_key:\n",
|
||||
" print(f\"Gemini API Key exists and begins {gemini_api_key[:8]}\")\n",
|
||||
"else:\n",
|
||||
" print(\"Gemini API Key not set\")\n",
|
||||
" \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2fad9aba-1f8c-4696-a92f-6c3a0a31cdda",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"system_message= \"\"\"You are a highly intelligent, helpful, and friendly AI Study Assistant named StudyMate.\n",
|
||||
"\n",
|
||||
"Your primary goal is to help students deeply understand academic topics, especially from textbooks, lecture notes, or PDF materials. You must explain concepts clearly, simplify complex ideas, and adapt your responses to the user's grade level and learning style.\n",
|
||||
"\n",
|
||||
"Always follow these rules:\n",
|
||||
"\n",
|
||||
"1. Break down complex concepts into **simple, digestible explanations** using analogies or examples.\n",
|
||||
"2. If the user asks for a **summary**, provide a concise yet accurate overview of the content.\n",
|
||||
"3. If asked for a **quiz**, generate 3–5 high-quality multiple-choice or short-answer questions.\n",
|
||||
"4. If the user uploads or references a **textbook**, **PDF**, or **paragraph**, use only that context and avoid adding unrelated info.\n",
|
||||
"5. Be interactive. If a user seems confused or asks for clarification, ask helpful guiding questions.\n",
|
||||
"6. Use friendly and motivational tone, but stay focused and to-the-point.\n",
|
||||
"7. Include definitions, bullet points, tables, or emojis when helpful, but avoid unnecessary fluff.\n",
|
||||
"8. If you don't know the answer confidently, say so and recommend a way to find it.\n",
|
||||
"\n",
|
||||
"Example roles you may play:\n",
|
||||
"- Explain like a teacher 👩🏫\n",
|
||||
"- Summarize like a scholar 📚\n",
|
||||
"- Quiz like an examiner 🧠\n",
|
||||
"- Motivate like a friend 💪\n",
|
||||
"\n",
|
||||
"Always ask, at the end: \n",
|
||||
"*\"Would you like me to quiz you, explain another part, or give study tips on this?\"*\n",
|
||||
"\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6541d58e-2297-4de1-b1f7-77da1b98b8bb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize\n",
|
||||
"\n",
|
||||
"class StudyAssistant:\n",
|
||||
" def __init__(self,api_key):\n",
|
||||
" gemini= genai.Client(\n",
|
||||
" api_key= gemini_api_key\n",
|
||||
" )\n",
|
||||
" self.gemini = gemini.chats.create(\n",
|
||||
" model=\"gemini-2.5-flash\",\n",
|
||||
" config= types.GenerateContentConfig(\n",
|
||||
" system_instruction= system_message,\n",
|
||||
" temperature = 0.7\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" self.ollama = OpenAI(base_url='http://localhost:11434/v1', api_key='ollama')\n",
|
||||
" self.models = {\"llma\":\"llama3:8b\",\"qwen\":\"qwen2.5:latest\"}\n",
|
||||
"\n",
|
||||
" def pdf_extractor(self,pdf_path):\n",
|
||||
" \"\"\"Extract text from PDF file\"\"\"\n",
|
||||
" try:\n",
|
||||
" with open(pdf_path, 'rb') as file:\n",
|
||||
" pdf_reader = PyPDF2.PdfReader(file)\n",
|
||||
" text = \"\"\n",
|
||||
" for page in pdf_reader.pages:\n",
|
||||
" text += page.extract_text() + \"\\n\"\n",
|
||||
" return text.strip()\n",
|
||||
" except Exception as e:\n",
|
||||
" return f\"Error reading PDF: {str(e)}\"\n",
|
||||
"\n",
|
||||
" def chat(self,prompt,history,model,pdf_path=None):\n",
|
||||
" pdf_text = None\n",
|
||||
" if pdf_path:\n",
|
||||
" pdf_text = self.pdf_extractor(pdf_path)\n",
|
||||
"\n",
|
||||
" #craft prompt\n",
|
||||
" user_prompt= prompt\n",
|
||||
" if pdf_text:\n",
|
||||
" user_prompt += f\"\"\"Here is the study meterial:\n",
|
||||
"\n",
|
||||
" {pdf_text}\"\"\"\n",
|
||||
" messages = [{\"role\": \"system\", \"content\": system_message}] + history + [{\"role\": \"user\", \"content\": user_prompt}]\n",
|
||||
"\n",
|
||||
" # call models\n",
|
||||
" stream = []\n",
|
||||
" if model == \"gemini\":\n",
|
||||
" stream= self.gemini.send_message_stream(user_prompt)\n",
|
||||
" elif model == \"llma\" or model == \"qwen\":\n",
|
||||
" stream = self.ollama.chat.completions.create(\n",
|
||||
" model= self.models[model],\n",
|
||||
" messages=messages,\n",
|
||||
" temperature = 0.7,\n",
|
||||
" stream= True\n",
|
||||
" )\n",
|
||||
" else:\n",
|
||||
" print(\"invalid model\")\n",
|
||||
" return\n",
|
||||
"\n",
|
||||
" res = \"\"\n",
|
||||
" for chunk in stream:\n",
|
||||
" if model == \"gemini\":\n",
|
||||
" res += chunk.text or \"\"\n",
|
||||
" else:\n",
|
||||
" res += chunk.choices[0].delta.content or ''\n",
|
||||
" yield res\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "1334422a-808f-4147-9c4c-57d63d9780d0",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## And then enter Gradio's magic!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0866ca56-100a-44ab-8bd0-1568feaf6bf2",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"assistant = StudyAssistant(gemini_api_key)\n",
|
||||
"gr.ChatInterface(fn=assistant.chat, additional_inputs=[gr.Dropdown([\"gemini\", \"qwen\",\"llma\"], label=\"Select model\", value=\"gemini\"),gr.File(label=\"upload pdf\")], type=\"messages\").launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python [conda env:base] *",
|
||||
"language": "python",
|
||||
"name": "conda-base-py"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Reference in New Issue
Block a user