ghostsInTheMachine commited on
Commit
173c7e2
1 Parent(s): 73b0806

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -23,7 +23,7 @@ task_name = 'depth'
23
  pipe_g, pipe_d = load_models(task_name, device)
24
 
25
  # Preprocess the video to adjust resolution and frame rate
26
- def preprocess_video(video_path, target_fps=24, max_resolution=(512, 512)):
27
  """Preprocess the video to resize and adjust its frame rate."""
28
  video = mp.VideoFileClip(video_path)
29
 
@@ -38,7 +38,7 @@ def preprocess_video(video_path, target_fps=24, max_resolution=(512, 512)):
38
  return video
39
 
40
  # Process a batch of frames through the depth model
41
- def process_frames_batch(frames_batch, seed=0, target_size=(512, 512)):
42
  """Process a batch of frames and return depth maps."""
43
  try:
44
  torch.cuda.empty_cache() # Clear GPU cache
@@ -64,7 +64,7 @@ def process_video(video_path, fps=0, seed=0, batch_size=4):
64
  start_time = time.time()
65
 
66
  # Preprocess the video
67
- video = preprocess_video(video_path, target_fps=fps, max_resolution=(512, 512))
68
 
69
  # Use original video FPS if not specified
70
  if fps == 0:
@@ -84,14 +84,15 @@ def process_video(video_path, fps=0, seed=0, batch_size=4):
84
  # Process frames in batches
85
  for i in range(0, total_frames, batch_size):
86
  current_batch_size = batch_size
87
- while current_batch_size > 0:
 
88
  try:
89
  frames_batch = frames[i:i+current_batch_size]
90
  depth_maps = process_frames_batch(frames_batch, seed)
91
- break
92
  except RuntimeError as e:
93
  if 'out of memory' in str(e):
94
- current_batch_size = current_batch_size // 2
95
  logger.warning(f"Reducing batch size to {current_batch_size} due to out of memory error.")
96
  torch.cuda.empty_cache()
97
  else:
@@ -147,6 +148,10 @@ def process_video(video_path, fps=0, seed=0, batch_size=4):
147
  logger.error(f"Error: {e}")
148
  yield None, None, None, f"Error processing video: {e}"
149
 
 
 
 
 
150
  # Wrapper function with error handling
151
  def process_wrapper(video, fps=0, seed=0, batch_size=4):
152
  if video is None:
 
23
  pipe_g, pipe_d = load_models(task_name, device)
24
 
25
  # Preprocess the video to adjust resolution and frame rate
26
+ def preprocess_video(video_path, target_fps=24, max_resolution=(256, 256)):
27
  """Preprocess the video to resize and adjust its frame rate."""
28
  video = mp.VideoFileClip(video_path)
29
 
 
38
  return video
39
 
40
  # Process a batch of frames through the depth model
41
+ def process_frames_batch(frames_batch, seed=0, target_size=(256, 256)):
42
  """Process a batch of frames and return depth maps."""
43
  try:
44
  torch.cuda.empty_cache() # Clear GPU cache
 
64
  start_time = time.time()
65
 
66
  # Preprocess the video
67
+ video = preprocess_video(video_path, target_fps=fps, max_resolution=(256, 256))
68
 
69
  # Use original video FPS if not specified
70
  if fps == 0:
 
84
  # Process frames in batches
85
  for i in range(0, total_frames, batch_size):
86
  current_batch_size = batch_size
87
+ success = False
88
+ while current_batch_size > 0 and not success:
89
  try:
90
  frames_batch = frames[i:i+current_batch_size]
91
  depth_maps = process_frames_batch(frames_batch, seed)
92
+ success = True
93
  except RuntimeError as e:
94
  if 'out of memory' in str(e):
95
+ current_batch_size = max(1, current_batch_size // 2)
96
  logger.warning(f"Reducing batch size to {current_batch_size} due to out of memory error.")
97
  torch.cuda.empty_cache()
98
  else:
 
148
  logger.error(f"Error: {e}")
149
  yield None, None, None, f"Error processing video: {e}"
150
 
151
+ finally:
152
+ # Clean up temporary directory
153
+ pass # Remove if you decide to delete temp_dir
154
+
155
  # Wrapper function with error handling
156
  def process_wrapper(video, fps=0, seed=0, batch_size=4):
157
  if video is None: