Og2 commited on
Commit
74b1c97
·
verified ·
1 Parent(s): 64a6aa4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -25
app.py CHANGED
@@ -84,31 +84,6 @@ def crop_center(frame):
84
  cropped = keras.ops.squeeze(cropped)
85
  return cropped
86
 
87
-
88
- # Following method is modified from this tutorial:
89
- # https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
90
- def load_video(path, max_frames=0, offload_to_cpu=False):
91
- cap = cv2.VideoCapture(path)
92
- frames = []
93
- try:
94
- while True:
95
- ret, frame = cap.read()
96
- if not ret:
97
- break
98
- frame = frame[:, :, [2, 1, 0]]
99
- frame = crop_center(frame)
100
- if offload_to_cpu and keras.backend.backend() == "torch":
101
- frame = frame.to("cpu")
102
- frames.append(frame)
103
-
104
- if len(frames) == max_frames:
105
- break
106
- finally:
107
- cap.release()
108
- if offload_to_cpu and keras.backend.backend() == "torch":
109
- return np.array([frame.to("cpu").numpy() for frame in frames])
110
- return np.array(frames)
111
-
112
  def build_feature_extractor():
113
  feature_extractor = DenseNet121(
114
  weights="imagenet",
@@ -188,7 +163,29 @@ with open(labels_file_path, "r") as file:
188
  #print("Tableau recréé à partir du fichier :")
189
  #print(class_labels)
190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
 
 
 
 
 
 
 
 
 
192
  # test on video from val dataset
193
  def prepare_single_video(frames):
194
  frame_features = np.zeros(shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32")
 
84
  cropped = keras.ops.squeeze(cropped)
85
  return cropped
86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  def build_feature_extractor():
88
  feature_extractor = DenseNet121(
89
  weights="imagenet",
 
163
  #print("Tableau recréé à partir du fichier :")
164
  #print(class_labels)
165
 
166
+ #read video
167
+ def load_video(path, max_frames=0, offload_to_cpu=False):
168
+ cap = cv2.VideoCapture(path)
169
+ frames = []
170
+ try:
171
+ while True:
172
+ ret, frame = cap.read()
173
+ if not ret:
174
+ break
175
+ frame = frame[:, :, [2, 1, 0]]
176
+ frame = crop_center(frame)
177
+ if offload_to_cpu and keras.backend.backend() == "torch":
178
+ frame = frame.to("cpu")
179
+ frames.append(frame)
180
 
181
+ if len(frames) == max_frames:
182
+ break
183
+ finally:
184
+ cap.release()
185
+ if offload_to_cpu and keras.backend.backend() == "torch":
186
+ return np.array([frame.to("cpu").numpy() for frame in frames])
187
+ return np.array(frames)
188
+
189
  # test on video from val dataset
190
  def prepare_single_video(frames):
191
  frame_features = np.zeros(shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32")