Spaces:
Configuration error
Configuration error
englert
commited on
Commit
·
581010c
1
Parent(s):
2e633e0
update app.py #5
Browse files
app.py
CHANGED
@@ -22,67 +22,6 @@ model.load_state_dict(torch.load("model.pt"))
|
|
22 |
model.eval()
|
23 |
avg_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
|
24 |
|
25 |
-
import urllib.request
|
26 |
-
urllib.request.urlretrieve("https://media.githubusercontent.com/media/commaai/speedchallenge/master/data/test.mp4", 'video_name.mp4')
|
27 |
-
input_file = 'video_name.mp4'
|
28 |
-
|
29 |
-
downsample_size = int(100)
|
30 |
-
|
31 |
-
base_directory = os.getcwd()
|
32 |
-
selected_directory = os.path.join(base_directory, "selected_images")
|
33 |
-
if os.path.isdir(selected_directory):
|
34 |
-
shutil.rmtree(selected_directory)
|
35 |
-
os.mkdir(selected_directory)
|
36 |
-
|
37 |
-
zip_path = os.path.join(selected_directory, input_file.split('/')[-1][:-4] + ".zip")
|
38 |
-
|
39 |
-
mean = np.asarray([0.3156024, 0.33569682, 0.34337464], dtype=np.float32)
|
40 |
-
std = np.asarray([0.16568947, 0.17827448, 0.18925823], dtype=np.float32)
|
41 |
-
|
42 |
-
img_vecs = []
|
43 |
-
with torch.no_grad():
|
44 |
-
for fp_i, file_path in enumerate([input_file]):
|
45 |
-
for i, in_img in enumerate(video_reader(file_path,
|
46 |
-
targetFPS=9,
|
47 |
-
targetWidth=100,
|
48 |
-
to_rgb=True)):
|
49 |
-
in_img = (in_img.astype(np.float32) / 255.)
|
50 |
-
in_img = (in_img - mean) / std
|
51 |
-
in_img = np.expand_dims(in_img, 0)
|
52 |
-
in_img = np.transpose(in_img, (0, 3, 1, 2))
|
53 |
-
in_img = torch.from_numpy(in_img).float()
|
54 |
-
encoded = avg_pool(model(in_img))[0, :, 0, 0].cpu().numpy()
|
55 |
-
img_vecs += [encoded]
|
56 |
-
img_vecs = np.asarray(img_vecs)
|
57 |
-
print("images encoded")
|
58 |
-
rv_indices, _ = furthest_neighbours(
|
59 |
-
img_vecs,
|
60 |
-
downsample_size,
|
61 |
-
seed=0)
|
62 |
-
indices = np.zeros((img_vecs.shape[0],))
|
63 |
-
indices[np.asarray(rv_indices)] = 1
|
64 |
-
print("images selected")
|
65 |
-
|
66 |
-
global_ctr = 0
|
67 |
-
for fp_i, file_path in enumerate([input_file]):
|
68 |
-
for i, img in enumerate(video_reader(file_path,
|
69 |
-
targetFPS=9,
|
70 |
-
targetWidth=None,
|
71 |
-
to_rgb=False)):
|
72 |
-
if indices[global_ctr] == 1:
|
73 |
-
cv2.imwrite(join(selected_directory, str(global_ctr) + ".jpg"), img)
|
74 |
-
global_ctr += 1
|
75 |
-
print("selected images extracted")
|
76 |
-
|
77 |
-
all_selected_imgs_path = [join(selected_directory, f) for f in os.listdir(selected_directory) if
|
78 |
-
isfile(join(selected_directory, f))]
|
79 |
-
|
80 |
-
zipf = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
|
81 |
-
for i, f in enumerate(all_selected_imgs_path):
|
82 |
-
zipf.write(f, basename(f))
|
83 |
-
zipf.close()
|
84 |
-
print("selected images zipped")
|
85 |
-
|
86 |
|
87 |
def predict(input_file, downsample_size):
|
88 |
downsample_size = int(downsample_size)
|
@@ -149,6 +88,8 @@ demo = gr.Interface(
|
|
149 |
fn=predict,
|
150 |
inputs=[gr.components.Video(label="Upload Video File"),
|
151 |
gr.components.Number(label="Downsample size")],
|
152 |
-
outputs=gr.components.File(label="Zip")
|
|
|
|
|
153 |
|
154 |
-
demo.launch(debug=True)
|
|
|
22 |
model.eval()
|
23 |
avg_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
def predict(input_file, downsample_size):
|
27 |
downsample_size = int(downsample_size)
|
|
|
88 |
fn=predict,
|
89 |
inputs=[gr.components.Video(label="Upload Video File"),
|
90 |
gr.components.Number(label="Downsample size")],
|
91 |
+
outputs=gr.components.File(label="Zip"),
|
92 |
+
live=True,
|
93 |
+
)
|
94 |
|
95 |
+
demo.launch(debug=True, share=True)
|