groma / app.py
bluestarburst
app
5cef89c
raw
history blame contribute delete
579 Bytes
from huggingface_hub import hf_hub_download
config = hf_hub_download(repo_id="FoundationVision/groma-7b-finetune", filename="config.json")
from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM, LlamaTokenizer
from transformers.generation import GenerationConfig
import torch
torch.manual_seed(1234)
# Note: The default behavior now has injection attack prevention off.
# tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-VL-Chat", trust_remote_code=True)
model = LlamaForCausalLM.from_pretrained("FoundationVision/groma-7b-finetune", config=config)