Spaces:
Runtime error
Runtime error
Update models/videochat.py
Browse files- models/videochat.py +12 -1
models/videochat.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import os
|
2 |
import random
|
3 |
-
import
|
|
|
4 |
|
5 |
import torch
|
6 |
from torch.cuda.amp import autocast as autocast
|
@@ -11,6 +12,8 @@ from .modeling_llama import LlamaForCausalLM
|
|
11 |
from transformers import LlamaTokenizer, LlamaConfig
|
12 |
|
13 |
|
|
|
|
|
14 |
class VideoChat(Blip2Base):
|
15 |
"""
|
16 |
VideoChat model.
|
@@ -113,6 +116,14 @@ class VideoChat(Blip2Base):
|
|
113 |
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False, use_auth_token=os.environ["HF_TOKEN"])
|
114 |
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
|
115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
if self.low_resource:
|
117 |
self.llama_model = LlamaForCausalLM.from_pretrained(
|
118 |
llama_model_path,
|
|
|
1 |
import os
|
2 |
import random
|
3 |
+
import
|
4 |
+
logging
|
5 |
|
6 |
import torch
|
7 |
from torch.cuda.amp import autocast as autocast
|
|
|
12 |
from transformers import LlamaTokenizer, LlamaConfig
|
13 |
|
14 |
|
15 |
+
|
16 |
+
|
17 |
class VideoChat(Blip2Base):
|
18 |
"""
|
19 |
VideoChat model.
|
|
|
116 |
self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False, use_auth_token=os.environ["HF_TOKEN"])
|
117 |
self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token
|
118 |
|
119 |
+
import psutil
|
120 |
+
import os
|
121 |
+
print(u'ε½εθΏη¨ηε
εδ½Ώη¨οΌ%.4f GB' % (psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 / 1024) )
|
122 |
+
info = psutil.virtual_memory()
|
123 |
+
print( u'η΅θζ»ε
εοΌ%.4f GB' % (info.total / 1024 / 1024 / 1024) )
|
124 |
+
print(u'ε½εδ½Ώη¨ηζ»ε
εε ζ―οΌ',info.percent)
|
125 |
+
print(u'cpuδΈͺζ°οΌ',psutil.cpu_count())
|
126 |
+
|
127 |
if self.low_resource:
|
128 |
self.llama_model = LlamaForCausalLM.from_pretrained(
|
129 |
llama_model_path,
|