Spaces:
Runtime error
Runtime error
fix: device placement.
Browse files
app.py
CHANGED
@@ -21,10 +21,11 @@ from torchvision.transforms import (
|
|
21 |
from transformers import VideoMAEFeatureExtractor, VideoMAEForVideoClassification
|
22 |
|
23 |
MODEL_CKPT = "sayakpaul/videomae-base-finetuned-kinetics-finetuned-ucf101-subset"
|
24 |
-
MODEL = VideoMAEForVideoClassification.from_pretrained(MODEL_CKPT)
|
25 |
-
PROCESSOR = VideoMAEFeatureExtractor.from_pretrained(MODEL_CKPT)
|
26 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
27 |
|
|
|
|
|
|
|
28 |
RESIZE_TO = PROCESSOR.size
|
29 |
NUM_FRAMES_TO_SAMPLE = MODEL.config.num_frames
|
30 |
IMAGE_STATS = {"image_mean": [0.485, 0.456, 0.406], "image_std": [0.229, 0.224, 0.225]}
|
|
|
21 |
from transformers import VideoMAEFeatureExtractor, VideoMAEForVideoClassification
|
22 |
|
23 |
MODEL_CKPT = "sayakpaul/videomae-base-finetuned-kinetics-finetuned-ucf101-subset"
|
|
|
|
|
24 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
25 |
|
26 |
+
MODEL = VideoMAEForVideoClassification.from_pretrained(MODEL_CKPT).to(DEVICE)
|
27 |
+
PROCESSOR = VideoMAEFeatureExtractor.from_pretrained(MODEL_CKPT)
|
28 |
+
|
29 |
RESIZE_TO = PROCESSOR.size
|
30 |
NUM_FRAMES_TO_SAMPLE = MODEL.config.num_frames
|
31 |
IMAGE_STATS = {"image_mean": [0.485, 0.456, 0.406], "image_std": [0.229, 0.224, 0.225]}
|