pablovela5620 commited on
Commit
6d9153c
·
verified ·
1 Parent(s): fe06ee6

Upload gradio_app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. gradio_app.py +69 -38
gradio_app.py CHANGED
@@ -33,7 +33,6 @@ import numpy as np
33
  import PIL
34
  import torch
35
  from pathlib import Path
36
- import threading
37
  from queue import SimpleQueue
38
  import trimesh
39
  import subprocess
@@ -96,8 +95,31 @@ def svd_render_threaded(
96
  log_queue.put(frames)
97
 
98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  if IN_SPACES:
100
- svd_render_threaded = spaces.GPU(svd_render_threaded)
101
  image_to_depth = spaces.GPU(image_to_depth)
102
 
103
 
@@ -207,44 +229,53 @@ def gradio_warped_image(
207
  progress(0.15, desc="Starting diffusion")
208
 
209
  # to allow logging from a separate thread
210
- log_queue: SimpleQueue = SimpleQueue()
211
- handle = threading.Thread(
212
- target=svd_render_threaded,
213
- kwargs={
214
- "image_o": rgb_resized,
215
- "masks": masks,
216
- "cond_image": cond_image,
217
- "lambda_ts": lambda_ts,
218
- "num_denoise_iters": num_denoise_iters,
219
- "weight_clamp": 0.2,
220
- "log_queue": None,
221
- },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  )
223
 
224
- handle.start()
225
- i = 0
226
- while True:
227
- msg = log_queue.get()
228
- match msg:
229
- case frames if all(isinstance(frame, PIL.Image.Image) for frame in frames):
230
- break
231
- case entity_path, entity, times:
232
- i += 1
233
- rr.reset_time()
234
- for timeline, time in times:
235
- if isinstance(time, int):
236
- rr.set_time_sequence(timeline, time)
237
- else:
238
- rr.set_time_seconds(timeline, time)
239
- static = False
240
- if entity_path == "diffusion_step":
241
- static = True
242
- rr.log(entity_path, entity, static=static)
243
- yield stream.read(), None, [], f"{i} out of {num_denoise_iters}"
244
- case _:
245
- assert False
246
- handle.join()
247
-
248
  # all frames but the first one
249
  frame: np.ndarray
250
  for frame_id, (frame, cam_pararms) in enumerate(zip(frames, camera_list)):
 
33
  import PIL
34
  import torch
35
  from pathlib import Path
 
36
  from queue import SimpleQueue
37
  import trimesh
38
  import subprocess
 
95
  log_queue.put(frames)
96
 
97
 
98
+ def svd_render(
99
+ image_o: PIL.Image.Image,
100
+ masks: Float64[torch.Tensor, "b 72 128"],
101
+ cond_image: PIL.Image.Image,
102
+ lambda_ts: Float64[torch.Tensor, "n b"],
103
+ num_denoise_iters: Literal[2, 25, 50, 100],
104
+ weight_clamp: float,
105
+ log_queue: SimpleQueue | None = None,
106
+ ):
107
+ frames: list[PIL.Image.Image] = SVD_PIPE(
108
+ [image_o],
109
+ log_queue=None,
110
+ temp_cond=cond_image,
111
+ mask=masks,
112
+ lambda_ts=lambda_ts,
113
+ weight_clamp=weight_clamp,
114
+ num_frames=25,
115
+ decode_chunk_size=8,
116
+ num_inference_steps=num_denoise_iters,
117
+ ).frames[0]
118
+ return frames
119
+
120
+
121
  if IN_SPACES:
122
+ svd_render = spaces.GPU(svd_render)
123
  image_to_depth = spaces.GPU(image_to_depth)
124
 
125
 
 
229
  progress(0.15, desc="Starting diffusion")
230
 
231
  # to allow logging from a separate thread
232
+ # log_queue: SimpleQueue = SimpleQueue()
233
+ # handle = threading.Thread(
234
+ # target=svd_render_threaded,
235
+ # kwargs={
236
+ # "image_o": rgb_resized,
237
+ # "masks": masks,
238
+ # "cond_image": cond_image,
239
+ # "lambda_ts": lambda_ts,
240
+ # "num_denoise_iters": num_denoise_iters,
241
+ # "weight_clamp": 0.2,
242
+ # "log_queue": None,
243
+ # },
244
+ # )
245
+
246
+ # handle.start()
247
+ # i = 0
248
+ # while True:
249
+ # msg = log_queue.get()
250
+ # match msg:
251
+ # case frames if all(isinstance(frame, PIL.Image.Image) for frame in frames):
252
+ # break
253
+ # case entity_path, entity, times:
254
+ # i += 1
255
+ # rr.reset_time()
256
+ # for timeline, time in times:
257
+ # if isinstance(time, int):
258
+ # rr.set_time_sequence(timeline, time)
259
+ # else:
260
+ # rr.set_time_seconds(timeline, time)
261
+ # static = False
262
+ # if entity_path == "diffusion_step":
263
+ # static = True
264
+ # rr.log(entity_path, entity, static=static)
265
+ # yield stream.read(), None, [], f"{i} out of {num_denoise_iters}"
266
+ # case _:
267
+ # assert False
268
+ # handle.join()
269
+ frames = svd_render(
270
+ image_o=rgb_resized,
271
+ masks=masks,
272
+ cond_image=cond_image,
273
+ lambda_ts=lambda_ts,
274
+ num_denoise_iters=num_denoise_iters,
275
+ weight_clamp=0.2,
276
+ log_queue=None,
277
  )
278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  # all frames but the first one
280
  frame: np.ndarray
281
  for frame_id, (frame, cam_pararms) in enumerate(zip(frames, camera_list)):