Spaces:
Runtime error
Runtime error
Update models/videochat.py
Browse files- models/videochat.py +4 -2
models/videochat.py
CHANGED
@@ -109,7 +109,7 @@ class VideoChat(Blip2Base):
|
|
109 |
print('Loading Q-Former Done')
|
110 |
|
111 |
print('Loading LLAMA')
|
112 |
-
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False)
|
113 |
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
|
114 |
|
115 |
if self.low_resource:
|
@@ -117,12 +117,14 @@ class VideoChat(Blip2Base):
|
|
117 |
llama_model_path,
|
118 |
torch_dtype=torch.float16,
|
119 |
load_in_8bit=True,
|
120 |
-
device_map="auto"
|
|
|
121 |
)
|
122 |
else:
|
123 |
self.llama_model = LlamaForCausalLM.from_pretrained(
|
124 |
llama_model_path,
|
125 |
torch_dtype=torch.float16,
|
|
|
126 |
)
|
127 |
|
128 |
print("freeze LLAMA")
|
|
|
109 |
print('Loading Q-Former Done')
|
110 |
|
111 |
print('Loading LLAMA')
|
112 |
+
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False, use_auth_token=os.environ["HF_TOKEN"])
|
113 |
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
|
114 |
|
115 |
if self.low_resource:
|
|
|
117 |
llama_model_path,
|
118 |
torch_dtype=torch.float16,
|
119 |
load_in_8bit=True,
|
120 |
+
device_map="auto",
|
121 |
+
use_auth_token=os.environ["HF_TOKEN"],
|
122 |
)
|
123 |
else:
|
124 |
self.llama_model = LlamaForCausalLM.from_pretrained(
|
125 |
llama_model_path,
|
126 |
torch_dtype=torch.float16,
|
127 |
+
use_auth_token=os.environ["HF_TOKEN"],
|
128 |
)
|
129 |
|
130 |
print("freeze LLAMA")
|