mfarre HF staff commited on
Commit
f38285f
·
1 Parent(s): 4bc123c
Files changed (1) hide show
  1. app.py +155 -163
app.py CHANGED
@@ -32,47 +32,6 @@ def format_duration(seconds: int) -> str:
32
  return f"{minutes}:{secs:02d}"
33
 
34
 
35
- # @spaces.GPU
36
- # def process_video(
37
- # video_path: str,
38
- # progress = gr.Progress()
39
- # ) -> Tuple[str, str, str, str]:
40
- # try:
41
- # # duration = get_video_duration_seconds(video_path)
42
- # # if duration > 1200: # 20 minutes
43
- # # return None, None, None, "Video must be shorter than 20 minutes"
44
-
45
- # progress(0.1, desc="Loading model...")
46
- # model, processor = load_model()
47
- # detector = BatchedVideoHighlightDetector(model, processor, batch_size=8)
48
-
49
- # progress(0.2, desc="Analyzing video content...")
50
- # video_description = detector.analyze_video_content(video_path)
51
-
52
- # progress(0.3, desc="Determining highlight types...")
53
- # highlight_types = detector.determine_highlights(video_description)
54
-
55
- # progress(0.4, desc="Detecting and extracting highlights...")
56
- # with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
57
- # output_path = tmp_file.name
58
-
59
- # detector.create_highlight_video(video_path, output_path)
60
-
61
- # # progress(0.9, desc="Adding watermark...")
62
- # # output_path = temp_output.replace('.mp4', '_watermark.mp4')
63
- # # add_watermark(temp_output, output_path)
64
-
65
- # os.unlink(output_path)
66
- # progress(1.0, desc="Complete!")
67
-
68
- # video_description = video_description[:500] + "..." if len(video_description) > 500 else video_description
69
- # highlight_types = highlight_types[:500] + "..." if len(highlight_types) > 500 else highlight_types
70
-
71
- # return output_path, video_description, highlight_types, None
72
-
73
- # except Exception as e:
74
- # return None, None, None, f"Error processing video: {str(e)}"
75
-
76
  def create_ui(examples_path: str):
77
  examples_data = load_examples(examples_path)
78
 
@@ -131,137 +90,149 @@ def create_ui(examples_path: str):
131
  with analysis_accordion:
132
  video_description = gr.Markdown("", elem_id="video_desc")
133
  highlight_types = gr.Markdown("", elem_id="highlight_types")
134
- # # Main interface section
135
- # gr.Markdown("## Try It Yourself!")
136
- # with gr.Row():
137
- # # Left column: Upload and Process
138
- # with gr.Column(scale=1):
139
- # input_video = gr.Video(
140
- # label="Upload your video (max 20 minutes)",
141
- # interactive=True
142
- # )
143
- # process_btn = gr.Button("Process Video", variant="primary")
144
-
145
- # # Right column: Progress and Analysis
146
- # with gr.Column(scale=1):
147
-
148
- # # Output video (initially hidden)
149
- # output_video = gr.Video(
150
- # label="Highlight Video",
151
- # visible=False,
152
- # interactive=False,
153
- # )
154
-
155
- # status = gr.Markdown()
156
-
157
- # with gr.Accordion("Model chain of thought details", open=True, visible=True) as analysis_accordion:
158
- # video_description = gr.Markdown("", elem_id="video_desc")
159
- # highlight_types = gr.Markdown("", elem_id="highlight_types")
160
-
161
 
162
  @spaces.GPU
163
- def on_process(video):
164
  if not video:
165
- return {
166
- status: "Please upload a video",
167
- video_description: "",
168
- highlight_types: "",
169
- output_video: gr.update(visible=False),
170
- analysis_accordion: gr.update(visible=False)
171
- }
172
 
173
  try:
174
  duration = get_video_duration_seconds(video)
175
  if duration > 1200: # 20 minutes
176
- return {
177
- status: "Video must be shorter than 20 minutes",
178
- video_description: "",
179
- highlight_types: "",
180
- output_video: gr.update(visible=False),
181
- analysis_accordion: gr.update(visible=False)
182
- }
183
-
184
- # Make accordion visible as soon as processing starts
185
- yield {
186
- status: "Loading model...",
187
- video_description: "",
188
- highlight_types: "",
189
- output_video: gr.update(visible=False),
190
- analysis_accordion: gr.update(visible=True)
191
- }
192
-
193
  model, processor = load_model()
194
  detector = BatchedVideoHighlightDetector(model, processor, batch_size=8)
195
 
196
- yield {
197
- status: "Analyzing video content...",
198
- video_description: "",
199
- highlight_types: "",
200
- output_video: gr.update(visible=False),
201
- analysis_accordion: gr.update(visible=True)
202
- }
203
-
204
  video_desc = detector.analyze_video_content(video)
205
  formatted_desc = f"#Summary: {video_desc[:500] + '...' if len(video_desc) > 500 else video_desc}"
206
 
207
- # Update description as soon as it's available
208
- yield {
209
- status: "Determining highlight types...",
210
- video_description: formatted_desc,
211
- highlight_types: "",
212
- output_video: gr.update(visible=False),
213
- analysis_accordion: gr.update(visible=True)
214
- }
215
-
216
  highlights = detector.determine_highlights(video_desc)
217
  formatted_highlights = f"#Highlights to search for: {highlights[:500] + '...' if len(highlights) > 500 else highlights}"
218
-
219
- # Update highlights as soon as they're available
220
- yield {
221
- status: "Detecting and extracting highlights...",
222
- video_description: formatted_desc,
223
- highlight_types: formatted_highlights,
224
- output_video: gr.update(visible=False),
225
- analysis_accordion: gr.update(visible=True)
226
- }
227
 
 
228
  with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
229
  temp_output = tmp_file.name
230
  detector.create_highlight_video(video, temp_output)
231
 
232
- return {
233
- status: "Processing complete!",
234
- video_description: formatted_desc,
235
- highlight_types: formatted_highlights,
236
- output_video: gr.update(value=temp_output, visible=True),
237
- analysis_accordion: gr.update(visible=True)
238
- }
239
 
240
  except Exception as e:
241
- return {
242
- status: f"Error processing video: {str(e)}",
243
- video_description: "",
244
- highlight_types: "",
245
- output_video: gr.update(visible=False),
246
- analysis_accordion: gr.update(visible=False)
247
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
  process_btn.click(
250
- on_process,
251
  inputs=[input_video],
252
- outputs=[status, video_description, highlight_types, output_video, analysis_accordion]
 
 
 
 
 
 
253
  )
254
 
255
  return app
256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  # @spaces.GPU
258
- # def on_process(video, progress=gr.Progress()):
259
  # if not video:
260
  # return {
261
  # status: "Please upload a video",
262
  # video_description: "",
263
  # highlight_types: "",
264
- # output_video: gr.update(visible=False)
 
265
  # }
266
 
267
  # try:
@@ -271,45 +242,64 @@ def create_ui(examples_path: str):
271
  # status: "Video must be shorter than 20 minutes",
272
  # video_description: "",
273
  # highlight_types: "",
274
- # output_video: gr.update(visible=False)
 
275
  # }
276
 
277
- # progress(0.1, desc="Loading model...")
278
- # status.value = "Loading model..."
 
 
 
 
 
 
 
279
  # model, processor = load_model()
280
  # detector = BatchedVideoHighlightDetector(model, processor, batch_size=8)
281
 
282
- # progress(0.2, desc="Analyzing video content...")
283
- # status.value = "Analyzing video content..."
 
 
 
 
 
 
284
  # video_desc = detector.analyze_video_content(video)
285
- # # Update description in real-time
286
- # video_description.value = f"#Summary: {video_desc[:500] + '...' if len(video_desc) > 500 else video_desc}"
 
 
 
 
 
 
 
 
287
 
288
- # progress(0.3, desc="Determining highlight types...")
289
- # status.value = "Determining highlight types..."
290
  # highlights = detector.determine_highlights(video_desc)
291
- # # Update highlights in real-time
292
- # highlight_types.value = f"#Highlights to search for: {highlights[:500] + '...' if len(highlights) > 500 else highlights}"
 
 
 
 
 
 
 
 
293
 
294
- # progress(0.4, desc="Detecting and extracting highlights...")
295
- # status.value = "Detecting and extracting highlights..."
296
  # with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
297
  # temp_output = tmp_file.name
298
  # detector.create_highlight_video(video, temp_output)
299
 
300
- # # progress(0.9, desc="Adding watermark...")
301
- # # status.value = "Adding watermark..."
302
- # # output_path = temp_output.replace('.mp4', '_watermark.mp4')
303
- # # add_watermark(temp_output, output_path)
304
-
305
- # # os.unlink(temp_output)
306
- # progress(1.0, desc="Complete!")
307
-
308
  # return {
309
  # status: "Processing complete!",
310
- # video_description: video_description.value,
311
- # highlight_types: highlight_types.value,
312
- # output_video: gr.update(value=temp_output, visible=True)
 
313
  # }
314
 
315
  # except Exception as e:
@@ -317,17 +307,19 @@ def create_ui(examples_path: str):
317
  # status: f"Error processing video: {str(e)}",
318
  # video_description: "",
319
  # highlight_types: "",
320
- # output_video: gr.update(visible=False)
 
321
  # }
322
 
323
  # process_btn.click(
324
  # on_process,
325
  # inputs=[input_video],
326
- # outputs=[status, video_description, highlight_types, output_video]
327
  # )
328
 
329
  # return app
330
 
 
331
  if __name__ == "__main__":
332
  # Initialize CUDA
333
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
32
  return f"{minutes}:{secs:02d}"
33
 
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  def create_ui(examples_path: str):
36
  examples_data = load_examples(examples_path)
37
 
 
90
  with analysis_accordion:
91
  video_description = gr.Markdown("", elem_id="video_desc")
92
  highlight_types = gr.Markdown("", elem_id="highlight_types")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  @spaces.GPU
95
+ def process_video(video):
96
  if not video:
97
+ return [
98
+ "Please upload a video",
99
+ "",
100
+ "",
101
+ None,
102
+ False
103
+ ]
104
 
105
  try:
106
  duration = get_video_duration_seconds(video)
107
  if duration > 1200: # 20 minutes
108
+ return [
109
+ "Video must be shorter than 20 minutes",
110
+ "",
111
+ "",
112
+ None,
113
+ False
114
+ ]
115
+
116
+ # Load model
 
 
 
 
 
 
 
 
117
  model, processor = load_model()
118
  detector = BatchedVideoHighlightDetector(model, processor, batch_size=8)
119
 
120
+ # Analyze content
 
 
 
 
 
 
 
121
  video_desc = detector.analyze_video_content(video)
122
  formatted_desc = f"#Summary: {video_desc[:500] + '...' if len(video_desc) > 500 else video_desc}"
123
 
124
+ # Determine highlights
 
 
 
 
 
 
 
 
125
  highlights = detector.determine_highlights(video_desc)
126
  formatted_highlights = f"#Highlights to search for: {highlights[:500] + '...' if len(highlights) > 500 else highlights}"
 
 
 
 
 
 
 
 
 
127
 
128
+ # Create highlight video
129
  with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
130
  temp_output = tmp_file.name
131
  detector.create_highlight_video(video, temp_output)
132
 
133
+ return [
134
+ "Processing complete!",
135
+ formatted_desc,
136
+ formatted_highlights,
137
+ temp_output,
138
+ True
139
+ ]
140
 
141
  except Exception as e:
142
+ return [
143
+ f"Error processing video: {str(e)}",
144
+ "",
145
+ "",
146
+ None,
147
+ False
148
+ ]
149
+
150
+ def process_with_updates(video):
151
+ # Initial state
152
+ yield [
153
+ "Loading model...",
154
+ "",
155
+ "",
156
+ None,
157
+ True # Show accordion
158
+ ]
159
+
160
+ # Analyzing video
161
+ yield [
162
+ "Analyzing video content...",
163
+ "",
164
+ "",
165
+ None,
166
+ True
167
+ ]
168
+
169
+ # Get final results
170
+ results = process_video(video)
171
+
172
+ # If we're still processing, show an intermediate state
173
+ if results[0] != "Processing complete!":
174
+ yield [
175
+ "Detecting and extracting highlights...",
176
+ results[1], # description
177
+ results[2], # highlights
178
+ None,
179
+ True
180
+ ]
181
+
182
+ # Return final state
183
+ yield results
184
 
185
  process_btn.click(
186
+ process_with_updates,
187
  inputs=[input_video],
188
+ outputs=[
189
+ status,
190
+ video_description,
191
+ highlight_types,
192
+ output_video,
193
+ analysis_accordion
194
+ ]
195
  )
196
 
197
  return app
198
 
199
+ # gr.Markdown("## Try It Yourself!")
200
+ # with gr.Row():
201
+ # with gr.Column(scale=1):
202
+ # input_video = gr.Video(
203
+ # label="Upload your video (max 20 minutes)",
204
+ # interactive=True
205
+ # )
206
+ # process_btn = gr.Button("Process Video", variant="primary")
207
+
208
+ # with gr.Column(scale=1):
209
+ # output_video = gr.Video(
210
+ # label="Highlight Video",
211
+ # visible=False,
212
+ # interactive=False,
213
+ # )
214
+
215
+ # status = gr.Markdown()
216
+
217
+ # analysis_accordion = gr.Accordion(
218
+ # "Model chain of thought details",
219
+ # open=True,
220
+ # visible=False
221
+ # )
222
+
223
+ # with analysis_accordion:
224
+ # video_description = gr.Markdown("", elem_id="video_desc")
225
+ # highlight_types = gr.Markdown("", elem_id="highlight_types")
226
+
227
  # @spaces.GPU
228
+ # def on_process(video):
229
  # if not video:
230
  # return {
231
  # status: "Please upload a video",
232
  # video_description: "",
233
  # highlight_types: "",
234
+ # output_video: gr.update(visible=False),
235
+ # analysis_accordion: gr.update(visible=False)
236
  # }
237
 
238
  # try:
 
242
  # status: "Video must be shorter than 20 minutes",
243
  # video_description: "",
244
  # highlight_types: "",
245
+ # output_video: gr.update(visible=False),
246
+ # analysis_accordion: gr.update(visible=False)
247
  # }
248
 
249
+ # # Make accordion visible as soon as processing starts
250
+ # yield {
251
+ # status: "Loading model...",
252
+ # video_description: "",
253
+ # highlight_types: "",
254
+ # output_video: gr.update(visible=False),
255
+ # analysis_accordion: gr.update(visible=True)
256
+ # }
257
+
258
  # model, processor = load_model()
259
  # detector = BatchedVideoHighlightDetector(model, processor, batch_size=8)
260
 
261
+ # yield {
262
+ # status: "Analyzing video content...",
263
+ # video_description: "",
264
+ # highlight_types: "",
265
+ # output_video: gr.update(visible=False),
266
+ # analysis_accordion: gr.update(visible=True)
267
+ # }
268
+
269
  # video_desc = detector.analyze_video_content(video)
270
+ # formatted_desc = f"#Summary: {video_desc[:500] + '...' if len(video_desc) > 500 else video_desc}"
271
+
272
+ # # Update description as soon as it's available
273
+ # yield {
274
+ # status: "Determining highlight types...",
275
+ # video_description: formatted_desc,
276
+ # highlight_types: "",
277
+ # output_video: gr.update(visible=False),
278
+ # analysis_accordion: gr.update(visible=True)
279
+ # }
280
 
 
 
281
  # highlights = detector.determine_highlights(video_desc)
282
+ # formatted_highlights = f"#Highlights to search for: {highlights[:500] + '...' if len(highlights) > 500 else highlights}"
283
+
284
+ # # Update highlights as soon as they're available
285
+ # yield {
286
+ # status: "Detecting and extracting highlights...",
287
+ # video_description: formatted_desc,
288
+ # highlight_types: formatted_highlights,
289
+ # output_video: gr.update(visible=False),
290
+ # analysis_accordion: gr.update(visible=True)
291
+ # }
292
 
 
 
293
  # with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
294
  # temp_output = tmp_file.name
295
  # detector.create_highlight_video(video, temp_output)
296
 
 
 
 
 
 
 
 
 
297
  # return {
298
  # status: "Processing complete!",
299
+ # video_description: formatted_desc,
300
+ # highlight_types: formatted_highlights,
301
+ # output_video: gr.update(value=temp_output, visible=True),
302
+ # analysis_accordion: gr.update(visible=True)
303
  # }
304
 
305
  # except Exception as e:
 
307
  # status: f"Error processing video: {str(e)}",
308
  # video_description: "",
309
  # highlight_types: "",
310
+ # output_video: gr.update(visible=False),
311
+ # analysis_accordion: gr.update(visible=False)
312
  # }
313
 
314
  # process_btn.click(
315
  # on_process,
316
  # inputs=[input_video],
317
+ # outputs=[status, video_description, highlight_types, output_video, analysis_accordion]
318
  # )
319
 
320
  # return app
321
 
322
+
323
  if __name__ == "__main__":
324
  # Initialize CUDA
325
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')