Mokh Week 2 Day 2 Contribution
This commit is contained in:
129
week2/community-contributions/week2_day2_gradio/gradio_ui.py
Normal file
129
week2/community-contributions/week2_day2_gradio/gradio_ui.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import gradio as gr
|
||||
import requests
|
||||
import json
|
||||
from json_handlers import SettingsHandler, LanguagesHandler
|
||||
from ollama_utils import get_ollama_response
|
||||
|
||||
|
||||
class GradioUI:
|
||||
def __init__(self, models: list, settings: SettingsHandler, languages: LanguagesHandler):
|
||||
self.models = models
|
||||
self.settings = settings
|
||||
self.languages = languages
|
||||
|
||||
self.langs = self.languages.get_supported_languages()
|
||||
|
||||
def _translate_callback(self, text, model, translte_from, translte_to):
|
||||
model_options = self.settings.get_advanced_settings()
|
||||
|
||||
full_response = ""
|
||||
chunck_response = get_ollama_response(model, text, translte_from, translte_to, model_options)
|
||||
for chunck in chunck_response:
|
||||
full_response += chunck
|
||||
yield full_response
|
||||
|
||||
def _temp_setting_callback(self, temp_dropdown_val):
|
||||
self.settings.update_advanced_settings_param("temperature", temp_dropdown_val)
|
||||
|
||||
def _top_k_setting_callback(self, top_k_dropdown_val):
|
||||
self.settings.update_advanced_settings_param("top_k", top_k_dropdown_val)
|
||||
|
||||
def _top_p_setting_callback(self, top_p_dropdown_val):
|
||||
self.settings.update_advanced_settings_param("top_p", top_p_dropdown_val)
|
||||
|
||||
def _reset_to_default_callback(self):
|
||||
temperature = 0.0
|
||||
top_k = 40.0
|
||||
top_p = 0.9
|
||||
default_settings = {
|
||||
"temperature": temperature,
|
||||
"top_k": top_k,
|
||||
"top_p": top_p
|
||||
}
|
||||
self.settings.update_advanced_settings(default_settings)
|
||||
return temperature, top_k, top_p
|
||||
|
||||
def build_and_launch(self):
|
||||
with gr.Blocks() as gui:
|
||||
gr.Markdown("# LLM Translator")
|
||||
with gr.Tab("Translate"):
|
||||
with gr.Row():
|
||||
model_dropdown = gr.Dropdown(
|
||||
label="Model",
|
||||
info="Choose LLM Model",
|
||||
choices=self.models
|
||||
)
|
||||
with gr.Group():
|
||||
with gr.Row():
|
||||
translte_from = gr.Dropdown(
|
||||
value=self.langs[0],
|
||||
show_label=False,
|
||||
choices=self.langs,
|
||||
interactive=True
|
||||
)
|
||||
translte_to = gr.Dropdown(
|
||||
value=self.langs[1],
|
||||
show_label=False,
|
||||
choices=self.langs,
|
||||
interactive=True
|
||||
)
|
||||
with gr.Row():
|
||||
translate_input = gr.Textbox(label="Your Input", lines=15, max_lines=15)
|
||||
translate_output = gr.Textbox(label="Translated", lines=15, max_lines=15)
|
||||
|
||||
btn = gr.Button("Translate", variant="primary")
|
||||
btn.click(
|
||||
fn=self._translate_callback,
|
||||
inputs=[translate_input, model_dropdown, translte_from, translte_to],
|
||||
outputs=translate_output
|
||||
)
|
||||
|
||||
with gr.Tab("Advanced Settings"):
|
||||
temp_dropdown = gr.Number(
|
||||
value=self.settings.get_advanced_setting_param("temperature"),
|
||||
label="Temperature",
|
||||
info="This parameter control how creative the model is\n0 means no creativity\n1 means very creative",
|
||||
minimum=0,
|
||||
maximum=1,
|
||||
step=0.1,
|
||||
interactive=True
|
||||
)
|
||||
|
||||
gr.Markdown() # Used only for spacing
|
||||
|
||||
top_k_dropdown = gr.Number(
|
||||
value=self.settings.get_advanced_setting_param("top_k"),
|
||||
label="Top K",
|
||||
info="A higher value (e.g. 100) will give more diverse answers\nwhile a lower value (e.g. 10) will be more conservative.",
|
||||
minimum=1,
|
||||
maximum=200,
|
||||
step=1,
|
||||
interactive=True
|
||||
)
|
||||
|
||||
gr.Markdown() # Used only for spacing
|
||||
|
||||
top_p_dropdown = gr.Number(
|
||||
value=self.settings.get_advanced_setting_param("top_p"),
|
||||
label="Top P",
|
||||
info="A higher value (e.g., 0.95) will lead to more diverse answers\nwhile a lower value (e.g., 0.5) will be more conservative",
|
||||
minimum=0.1,
|
||||
maximum=1.0,
|
||||
step=0.1,
|
||||
interactive=True
|
||||
)
|
||||
|
||||
gr.Markdown() # Used only for spacing
|
||||
|
||||
reset_btn = gr.Button("Reset to Default")
|
||||
reset_btn.click(
|
||||
fn=self._reset_to_default_callback,
|
||||
outputs=[temp_dropdown, top_k_dropdown, top_p_dropdown]
|
||||
)
|
||||
|
||||
temp_dropdown.change(self._temp_setting_callback, temp_dropdown)
|
||||
top_k_dropdown.change(self._top_k_setting_callback, top_k_dropdown)
|
||||
top_p_dropdown.change(self._top_p_setting_callback, top_p_dropdown)
|
||||
|
||||
gui.launch()
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
import json
|
||||
|
||||
|
||||
class SettingsHandler:
|
||||
def __init__(self, json_filename):
|
||||
self.json_filename = json_filename
|
||||
self.advanced_settings = self.load_current_settings()
|
||||
|
||||
def load_current_settings(self) -> dict:
|
||||
with open(self.json_filename, "r") as file:
|
||||
settings_dict = json.load(file)
|
||||
|
||||
advanced_settings = settings_dict["Advanced Settings"]
|
||||
|
||||
return advanced_settings
|
||||
|
||||
def update_advanced_settings(self, updated_advanced_settings: dict):
|
||||
new_dict = {
|
||||
"Advanced Settings": updated_advanced_settings
|
||||
}
|
||||
|
||||
print(new_dict)
|
||||
|
||||
with open(self.json_filename, "w") as file:
|
||||
json.dump(new_dict, file)
|
||||
|
||||
self.advanced_settings = updated_advanced_settings
|
||||
|
||||
def update_advanced_settings_param(self, key: str, new_val):
|
||||
if self.get_advanced_setting_param(key) is not None:
|
||||
update_advanced_settings_dict = self.advanced_settings
|
||||
update_advanced_settings_dict[key] = new_val
|
||||
self.update_advanced_settings(update_advanced_settings_dict)
|
||||
|
||||
def get_advanced_settings(self):
|
||||
return self.advanced_settings
|
||||
|
||||
def get_advanced_setting_param(self, key: str):
|
||||
return self.advanced_settings.get(key)
|
||||
|
||||
|
||||
class LanguagesHandler:
|
||||
def __init__(self, json_filename):
|
||||
self.json_filename = json_filename
|
||||
self.langs = self.load_languages()
|
||||
|
||||
def load_languages(self) -> list:
|
||||
with open(self.json_filename, "r") as file:
|
||||
langs = json.load(file)
|
||||
|
||||
if type(langs) != list:
|
||||
raise RuntimeError("Languages must be provided as lists")
|
||||
if len(langs) < 2:
|
||||
raise RuntimeError("At least 2 languages must be supported")
|
||||
|
||||
return langs
|
||||
|
||||
def get_supported_languages(self):
|
||||
return self.langs
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
[
|
||||
"German",
|
||||
"English",
|
||||
"Spanish",
|
||||
"French"
|
||||
]
|
||||
15
week2/community-contributions/week2_day2_gradio/main.py
Normal file
15
week2/community-contributions/week2_day2_gradio/main.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from json_handlers import SettingsHandler, LanguagesHandler
|
||||
from ollama_utils import get_downloaded_models
|
||||
from gradio_ui import GradioUI
|
||||
|
||||
settings_json = "settings.json"
|
||||
languages_json = "languages.json"
|
||||
|
||||
if __name__ == "__main__":
|
||||
settings = SettingsHandler(settings_json)
|
||||
languages = LanguagesHandler(languages_json)
|
||||
|
||||
models = get_downloaded_models()
|
||||
|
||||
gradio_ui = GradioUI(models, settings, languages)
|
||||
gradio_ui.build_and_launch()
|
||||
@@ -0,0 +1,28 @@
|
||||
import requests
|
||||
import json
|
||||
import ollama
|
||||
|
||||
|
||||
def get_downloaded_models():
|
||||
models_raw = requests.get("http://localhost:11434/api/tags").content
|
||||
models_dict = json.loads(models_raw)
|
||||
models = [model["name"] for model in models_dict["models"]]
|
||||
return models
|
||||
|
||||
def get_ollama_response(model, prompt, translte_from, translte_to, options):
|
||||
def get_system_prompt():
|
||||
with open('system_prompt.txt', 'r') as file:
|
||||
system_prompt = file.read()
|
||||
return system_prompt
|
||||
|
||||
system_prompt = get_system_prompt()
|
||||
user_prompt = f"Translate from {translte_from} to {translte_to}: {prompt}"
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": user_prompt}
|
||||
]
|
||||
|
||||
response = ollama.chat(model, messages, options=options, stream=True)
|
||||
for chunck in response:
|
||||
|
||||
yield chunck["message"]["content"]
|
||||
@@ -0,0 +1 @@
|
||||
Just run the main.py script after activating conda environment 'llms'
|
||||
@@ -0,0 +1 @@
|
||||
{"Advanced Settings": {"temperature": 0.0, "top_k": 40.0, "top_p": 0.9}}
|
||||
@@ -0,0 +1,17 @@
|
||||
You are a translator.
|
||||
You should translate the prompts according to the following criteria:
|
||||
- You should respond in a clear and straight to the point responses.
|
||||
- Your response should have a good structure and good linguistic features.
|
||||
- You should translate the sentence as it is. Do not add extra sentences or phrases on your own.
|
||||
- Do not answer questions even if the prompt is a question, you should translate the question and do not anwer it.
|
||||
- If you do not understand the prompt, do not say that you do not understand, just echo the prompt.
|
||||
- Do not include in the response phrases like 'here is the translation' or any phrases like that
|
||||
Here are some examples for good responses:
|
||||
<
|
||||
Prompt: 'Translate from French to English: Hier, j'ai passé toute la journée à explorer la ville avec mes amis, et nous avons visité plusieurs musées avant de nous arrêter pour un délicieux dîner dans un restaurant local.'
|
||||
Response: 'Yesterday, I spent the whole day exploring the city with my friends, and we visited several museums before stopping for a delicious dinner at a local restaurant.'
|
||||
>
|
||||
<
|
||||
Prompt: 'Translate from Spanish to English: vdaiughadvlkj'
|
||||
Response: 'vdaiughadvlkj'
|
||||
>
|
||||
Reference in New Issue
Block a user