Spaces:
Runtime error
Runtime error
zejunyang
commited on
Commit
•
4f3b622
1
Parent(s):
bf49b76
debug
Browse files
app.py
CHANGED
@@ -93,12 +93,12 @@ pipe = Pose2VideoPipeline(
|
|
93 |
)
|
94 |
pipe = pipe.to("cuda", dtype=weight_dtype)
|
95 |
|
96 |
-
lmk_extractor = LMKExtractor()
|
97 |
-
vis = FaceMeshVisualizer()
|
98 |
|
99 |
frame_inter_model = init_frame_interpolation_model()
|
100 |
|
101 |
-
@spaces.GPU
|
102 |
def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, length=60, seed=42):
|
103 |
print('=====Start processing======')
|
104 |
|
@@ -107,6 +107,9 @@ def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, l
|
|
107 |
fi_step = 3
|
108 |
|
109 |
generator = torch.manual_seed(seed)
|
|
|
|
|
|
|
110 |
|
111 |
width, height = size, size
|
112 |
|
@@ -226,7 +229,7 @@ def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, l
|
|
226 |
|
227 |
return save_path.replace('_noaudio.mp4', '.mp4'), ref_image_pil
|
228 |
|
229 |
-
@spaces.GPU
|
230 |
def video2video(ref_img, source_video, size=512, steps=25, length=60, seed=42):
|
231 |
print('=====Start processing======')
|
232 |
|
@@ -234,6 +237,9 @@ def video2video(ref_img, source_video, size=512, steps=25, length=60, seed=42):
|
|
234 |
fi_step = 3
|
235 |
|
236 |
generator = torch.manual_seed(seed)
|
|
|
|
|
|
|
237 |
|
238 |
width, height = size, size
|
239 |
|
@@ -247,7 +253,7 @@ def video2video(ref_img, source_video, size=512, steps=25, length=60, seed=42):
|
|
247 |
save_dir.mkdir(exist_ok=True, parents=True)
|
248 |
|
249 |
ref_image_np = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
|
250 |
-
ref_image_np = crop_face(ref_image_np, lmk_extractor)
|
251 |
if ref_image_np is None:
|
252 |
return None, Image.fromarray(ref_img)
|
253 |
|
|
|
93 |
)
|
94 |
pipe = pipe.to("cuda", dtype=weight_dtype)
|
95 |
|
96 |
+
# lmk_extractor = LMKExtractor()
|
97 |
+
# vis = FaceMeshVisualizer()
|
98 |
|
99 |
frame_inter_model = init_frame_interpolation_model()
|
100 |
|
101 |
+
@spaces.GPU
|
102 |
def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, length=60, seed=42):
|
103 |
print('=====Start processing======')
|
104 |
|
|
|
107 |
fi_step = 3
|
108 |
|
109 |
generator = torch.manual_seed(seed)
|
110 |
+
|
111 |
+
lmk_extractor = LMKExtractor()
|
112 |
+
vis = FaceMeshVisualizer()
|
113 |
|
114 |
width, height = size, size
|
115 |
|
|
|
229 |
|
230 |
return save_path.replace('_noaudio.mp4', '.mp4'), ref_image_pil
|
231 |
|
232 |
+
@spaces.GPU
|
233 |
def video2video(ref_img, source_video, size=512, steps=25, length=60, seed=42):
|
234 |
print('=====Start processing======')
|
235 |
|
|
|
237 |
fi_step = 3
|
238 |
|
239 |
generator = torch.manual_seed(seed)
|
240 |
+
|
241 |
+
lmk_extractor = LMKExtractor()
|
242 |
+
vis = FaceMeshVisualizer()
|
243 |
|
244 |
width, height = size, size
|
245 |
|
|
|
253 |
save_dir.mkdir(exist_ok=True, parents=True)
|
254 |
|
255 |
ref_image_np = cv2.cvtColor(ref_img, cv2.COLOR_RGB2BGR)
|
256 |
+
# ref_image_np = crop_face(ref_image_np, lmk_extractor)
|
257 |
if ref_image_np is None:
|
258 |
return None, Image.fromarray(ref_img)
|
259 |
|