jhj0517
commited on
Commit
·
cc3d06b
1
Parent(s):
050ea7e
Remove meaningless inputs
Browse files- app.py +1 -3
- modules/live_portrait/live_portrait_inferencer.py +2 -15
app.py
CHANGED
@@ -75,11 +75,9 @@ class App:
|
|
75 |
btn_openfolder = gr.Button('📂')
|
76 |
with gr.Accordion("Opt in features", visible=False):
|
77 |
img_sample = gr.Image()
|
78 |
-
img_motion_link = gr.Image()
|
79 |
-
tb_exp = gr.Textbox()
|
80 |
|
81 |
params = expression_parameters + [img_ref]
|
82 |
-
opt_in_features_params = [img_sample
|
83 |
|
84 |
gr.on(
|
85 |
triggers=[param.change for param in params],
|
|
|
75 |
btn_openfolder = gr.Button('📂')
|
76 |
with gr.Accordion("Opt in features", visible=False):
|
77 |
img_sample = gr.Image()
|
|
|
|
|
78 |
|
79 |
params = expression_parameters + [img_ref]
|
80 |
+
opt_in_features_params = [img_sample]
|
81 |
|
82 |
gr.on(
|
83 |
triggers=[param.change for param in params],
|
modules/live_portrait/live_portrait_inferencer.py
CHANGED
@@ -162,9 +162,7 @@ class LivePortraitInferencer:
|
|
162 |
sample_parts: str = SamplePart.ALL.value,
|
163 |
crop_factor: float = 2.3,
|
164 |
src_image: Optional[str] = None,
|
165 |
-
sample_image: Optional[str] = None,
|
166 |
-
motion_link: Optional[str] = None,
|
167 |
-
add_exp: Optional['ExpressionSet'] = None) -> None:
|
168 |
if isinstance(model_type, ModelType):
|
169 |
model_type = model_type.value
|
170 |
if model_type not in [mode.value for mode in ModelType]:
|
@@ -178,17 +176,11 @@ class LivePortraitInferencer:
|
|
178 |
try:
|
179 |
rotate_yaw = -rotate_yaw
|
180 |
|
181 |
-
|
182 |
-
if isinstance(motion_link, np.ndarray) and motion_link:
|
183 |
-
self.psi = motion_link[0]
|
184 |
-
new_editor_link = motion_link.copy()
|
185 |
-
elif src_image is not None:
|
186 |
if id(src_image) != id(self.src_image) or self.crop_factor != crop_factor:
|
187 |
self.crop_factor = crop_factor
|
188 |
self.psi = self.prepare_source(src_image, crop_factor)
|
189 |
self.src_image = src_image
|
190 |
-
new_editor_link = []
|
191 |
-
new_editor_link.append(self.psi)
|
192 |
else:
|
193 |
return None
|
194 |
|
@@ -226,9 +218,6 @@ class LivePortraitInferencer:
|
|
226 |
es.r = self.calc_fe(es.e, blink, eyebrow, wink, pupil_x, pupil_y, aaa, eee, woo, smile,
|
227 |
rotate_pitch, rotate_yaw, rotate_roll)
|
228 |
|
229 |
-
if isinstance(add_exp, ExpressionSet):
|
230 |
-
es.add(add_exp)
|
231 |
-
|
232 |
new_rotate = get_rotation_matrix(s_info['pitch'] + es.r[0], s_info['yaw'] + es.r[1],
|
233 |
s_info['roll'] + es.r[2])
|
234 |
x_d_new = (s_info['scale'] * (1 + es.s)) * ((s_exp + es.e) @ new_rotate) + s_info['t']
|
@@ -245,8 +234,6 @@ class LivePortraitInferencer:
|
|
245 |
save_image(numpy_array=crop_out, output_path=temp_out_img_path)
|
246 |
save_image(numpy_array=out, output_path=out_img_path)
|
247 |
|
248 |
-
new_editor_link.append(es)
|
249 |
-
|
250 |
return out
|
251 |
except Exception as e:
|
252 |
raise
|
|
|
162 |
sample_parts: str = SamplePart.ALL.value,
|
163 |
crop_factor: float = 2.3,
|
164 |
src_image: Optional[str] = None,
|
165 |
+
sample_image: Optional[str] = None,) -> None:
|
|
|
|
|
166 |
if isinstance(model_type, ModelType):
|
167 |
model_type = model_type.value
|
168 |
if model_type not in [mode.value for mode in ModelType]:
|
|
|
176 |
try:
|
177 |
rotate_yaw = -rotate_yaw
|
178 |
|
179 |
+
if src_image is not None:
|
|
|
|
|
|
|
|
|
180 |
if id(src_image) != id(self.src_image) or self.crop_factor != crop_factor:
|
181 |
self.crop_factor = crop_factor
|
182 |
self.psi = self.prepare_source(src_image, crop_factor)
|
183 |
self.src_image = src_image
|
|
|
|
|
184 |
else:
|
185 |
return None
|
186 |
|
|
|
218 |
es.r = self.calc_fe(es.e, blink, eyebrow, wink, pupil_x, pupil_y, aaa, eee, woo, smile,
|
219 |
rotate_pitch, rotate_yaw, rotate_roll)
|
220 |
|
|
|
|
|
|
|
221 |
new_rotate = get_rotation_matrix(s_info['pitch'] + es.r[0], s_info['yaw'] + es.r[1],
|
222 |
s_info['roll'] + es.r[2])
|
223 |
x_d_new = (s_info['scale'] * (1 + es.s)) * ((s_exp + es.e) @ new_rotate) + s_info['t']
|
|
|
234 |
save_image(numpy_array=crop_out, output_path=temp_out_img_path)
|
235 |
save_image(numpy_array=out, output_path=out_img_path)
|
236 |
|
|
|
|
|
237 |
return out
|
238 |
except Exception as e:
|
239 |
raise
|