Merge pull request #261 from kellewic/community-contributions-kellewic
Changed to use modal.Volume instead of modal.build()
This commit is contained in:
@@ -1,15 +1,22 @@
|
|||||||
import modal
|
import modal
|
||||||
from modal import App, Volume, Image
|
from pathlib import PurePosixPath
|
||||||
|
|
||||||
# Setup - define our infrastructure with code!
|
# Setup - define our infrastructure with code!
|
||||||
|
|
||||||
app = modal.App("pricer-service")
|
app = modal.App("pricer-service")
|
||||||
image = Image.debian_slim().pip_install("huggingface", "torch", "transformers", "bitsandbytes", "accelerate", "peft")
|
secrets = [modal.Secret.from_name("huggingface-secret")]
|
||||||
secrets = [modal.Secret.from_name("hf-secret")]
|
|
||||||
|
|
||||||
# Constants
|
image = modal.Image.debian_slim().pip_install(
|
||||||
|
"huggingface", "torch", "transformers", "bitsandbytes",
|
||||||
|
"accelerate", "peft", "huggingface_hub[hf_transfer]"
|
||||||
|
).env({"HF_HUB_ENABLE_HF_TRANSFER": "1"})
|
||||||
|
|
||||||
|
# This is where we cache model files to avoid redownloading each time a container is started
|
||||||
|
hf_cache_vol = modal.Volume.from_name("hf-cache", create_if_missing=True)
|
||||||
|
|
||||||
GPU = "T4"
|
GPU = "T4"
|
||||||
|
# Keep N containers active to avoid cold starts
|
||||||
|
MIN_CONTAINERS = 0
|
||||||
|
|
||||||
BASE_MODEL = "meta-llama/Meta-Llama-3.1-8B"
|
BASE_MODEL = "meta-llama/Meta-Llama-3.1-8B"
|
||||||
PROJECT_NAME = "pricer"
|
PROJECT_NAME = "pricer"
|
||||||
HF_USER = "ed-donner" # your HF name here! Or use mine if you just want to reproduce my results.
|
HF_USER = "ed-donner" # your HF name here! Or use mine if you just want to reproduce my results.
|
||||||
@@ -17,30 +24,28 @@ RUN_NAME = "2024-09-13_13.04.39"
|
|||||||
PROJECT_RUN_NAME = f"{PROJECT_NAME}-{RUN_NAME}"
|
PROJECT_RUN_NAME = f"{PROJECT_NAME}-{RUN_NAME}"
|
||||||
REVISION = "e8d637df551603dc86cd7a1598a8f44af4d7ae36"
|
REVISION = "e8d637df551603dc86cd7a1598a8f44af4d7ae36"
|
||||||
FINETUNED_MODEL = f"{HF_USER}/{PROJECT_RUN_NAME}"
|
FINETUNED_MODEL = f"{HF_USER}/{PROJECT_RUN_NAME}"
|
||||||
MODEL_DIR = "hf-cache/"
|
|
||||||
BASE_DIR = MODEL_DIR + BASE_MODEL
|
# Mount for cache location
|
||||||
FINETUNED_DIR = MODEL_DIR + FINETUNED_MODEL
|
MODEL_DIR = PurePosixPath("/models")
|
||||||
|
BASE_DIR = MODEL_DIR / BASE_MODEL
|
||||||
|
FINETUNED_DIR = MODEL_DIR / FINETUNED_MODEL
|
||||||
|
|
||||||
QUESTION = "How much does this cost to the nearest dollar?"
|
QUESTION = "How much does this cost to the nearest dollar?"
|
||||||
PREFIX = "Price is $"
|
PREFIX = "Price is $"
|
||||||
|
|
||||||
@app.cls(image=image, secrets=secrets, gpu=GPU, timeout=1800)
|
@app.cls(image=image, secrets=secrets, gpu=GPU, timeout=1800, min_containers=MIN_CONTAINERS, volumes={MODEL_DIR: hf_cache_vol})
|
||||||
class Pricer:
|
class Pricer:
|
||||||
@modal.build()
|
|
||||||
def download_model_to_folder(self):
|
|
||||||
from huggingface_hub import snapshot_download
|
|
||||||
import os
|
|
||||||
os.makedirs(MODEL_DIR, exist_ok=True)
|
|
||||||
snapshot_download(BASE_MODEL, local_dir=BASE_DIR)
|
|
||||||
snapshot_download(FINETUNED_MODEL, revision=REVISION, local_dir=FINETUNED_DIR)
|
|
||||||
|
|
||||||
@modal.enter()
|
@modal.enter()
|
||||||
def setup(self):
|
def setup(self):
|
||||||
import os
|
|
||||||
import torch
|
import torch
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, set_seed
|
from huggingface_hub import snapshot_download
|
||||||
|
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
||||||
from peft import PeftModel
|
from peft import PeftModel
|
||||||
|
|
||||||
|
# Download and cache model files to the volume
|
||||||
|
snapshot_download(BASE_MODEL, local_dir=BASE_DIR)
|
||||||
|
snapshot_download(FINETUNED_MODEL, revision=REVISION, local_dir=FINETUNED_DIR)
|
||||||
|
|
||||||
# Quant Config
|
# Quant Config
|
||||||
quant_config = BitsAndBytesConfig(
|
quant_config = BitsAndBytesConfig(
|
||||||
load_in_4bit=True,
|
load_in_4bit=True,
|
||||||
@@ -50,7 +55,6 @@ class Pricer:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Load model and tokenizer
|
# Load model and tokenizer
|
||||||
|
|
||||||
self.tokenizer = AutoTokenizer.from_pretrained(BASE_DIR)
|
self.tokenizer = AutoTokenizer.from_pretrained(BASE_DIR)
|
||||||
self.tokenizer.pad_token = self.tokenizer.eos_token
|
self.tokenizer.pad_token = self.tokenizer.eos_token
|
||||||
self.tokenizer.padding_side = "right"
|
self.tokenizer.padding_side = "right"
|
||||||
@@ -65,11 +69,8 @@ class Pricer:
|
|||||||
|
|
||||||
@modal.method()
|
@modal.method()
|
||||||
def price(self, description: str) -> float:
|
def price(self, description: str) -> float:
|
||||||
import os
|
import re, torch
|
||||||
import re
|
from transformers import set_seed
|
||||||
import torch
|
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, set_seed
|
|
||||||
from peft import PeftModel
|
|
||||||
|
|
||||||
set_seed(42)
|
set_seed(42)
|
||||||
prompt = f"{QUESTION}\n\n{description}\n\n{PREFIX}"
|
prompt = f"{QUESTION}\n\n{description}\n\n{PREFIX}"
|
||||||
@@ -83,7 +84,3 @@ class Pricer:
|
|||||||
match = re.search(r"[-+]?\d*\.\d+|\d+", contents)
|
match = re.search(r"[-+]?\d*\.\d+|\d+", contents)
|
||||||
return float(match.group()) if match else 0
|
return float(match.group()) if match else 0
|
||||||
|
|
||||||
@modal.method()
|
|
||||||
def wake_up(self) -> str:
|
|
||||||
return "ok"
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user