Spaces:
Runtime error
Runtime error
fix: size accepted by the model because of the recent change in preprocessors
Browse files
app.py
CHANGED
@@ -26,7 +26,7 @@ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
26 |
MODEL = VideoMAEForVideoClassification.from_pretrained(MODEL_CKPT).to(DEVICE)
|
27 |
PROCESSOR = VideoMAEFeatureExtractor.from_pretrained(MODEL_CKPT)
|
28 |
|
29 |
-
RESIZE_TO = PROCESSOR.size
|
30 |
NUM_FRAMES_TO_SAMPLE = MODEL.config.num_frames
|
31 |
IMAGE_STATS = {"image_mean": [0.485, 0.456, 0.406], "image_std": [0.229, 0.224, 0.225]}
|
32 |
VAL_TRANSFORMS = Compose(
|
|
|
26 |
MODEL = VideoMAEForVideoClassification.from_pretrained(MODEL_CKPT).to(DEVICE)
|
27 |
PROCESSOR = VideoMAEFeatureExtractor.from_pretrained(MODEL_CKPT)
|
28 |
|
29 |
+
RESIZE_TO = PROCESSOR.size["shortest_edge"]
|
30 |
NUM_FRAMES_TO_SAMPLE = MODEL.config.num_frames
|
31 |
IMAGE_STATS = {"image_mean": [0.485, 0.456, 0.406], "image_std": [0.229, 0.224, 0.225]}
|
32 |
VAL_TRANSFORMS = Compose(
|