File size: 579 Bytes
5cef89c
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
from huggingface_hub import hf_hub_download
config = hf_hub_download(repo_id="FoundationVision/groma-7b-finetune", filename="config.json")

from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM, LlamaTokenizer
from transformers.generation import GenerationConfig
import torch
torch.manual_seed(1234)

# Note: The default behavior now has injection attack prevention off.
# tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-VL-Chat", trust_remote_code=True)

model = LlamaForCausalLM.from_pretrained("FoundationVision/groma-7b-finetune", config=config)