Spaces:
Running
on
Zero
Running
on
Zero
Upload app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from modelscope.pipelines import pipeline
|
|
8 |
from modelscope.utils.constant import Tasks
|
9 |
from dressing_sd.pipelines.pipeline_sd import PipIpaControlNet
|
10 |
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
11 |
-
|
12 |
from torchvision import transforms
|
13 |
import cv2
|
14 |
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
@@ -89,7 +89,7 @@ unet = UNet2DConditionModel.from_pretrained("./ckpt/unet").to(
|
|
89 |
# image_face_fusion = pipeline('face_fusion_torch', model='damo/cv_unet_face_fusion_torch', model_revision='v1.0.3')
|
90 |
|
91 |
#face_model
|
92 |
-
app = FaceAnalysis(providers=[('CUDAExecutionProvider', {"device_id": args.device})]) ##使用GPU:0, 默认使用buffalo_l就可以了
|
93 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
94 |
|
95 |
# def ref proj weight
|
@@ -201,7 +201,7 @@ def resize_img(input_image, max_side=640, min_side=512, size=None,
|
|
201 |
return input_image
|
202 |
|
203 |
@spaces.GPU
|
204 |
-
def
|
205 |
face_guidance_scale,self_guidance_scale, cross_guidance_scale,if_ipa, if_post, if_control, denoise_steps, seed=42):
|
206 |
# prompt = prompt + ', confident smile expression, fashion, best quality, amazing quality, very aesthetic'
|
207 |
if prompt is None:
|
@@ -394,7 +394,7 @@ with image_blocks as demo:
|
|
394 |
denoise_steps = gr.Number(label="Denoising Steps", minimum=20, maximum=50, value=30, step=1)
|
395 |
seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=20240508)
|
396 |
|
397 |
-
try_button.click(fn=
|
398 |
-
outputs=[image_out], api_name='
|
399 |
|
400 |
image_blocks.launch()
|
|
|
8 |
from modelscope.utils.constant import Tasks
|
9 |
from dressing_sd.pipelines.pipeline_sd import PipIpaControlNet
|
10 |
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
11 |
+
import spaces
|
12 |
from torchvision import transforms
|
13 |
import cv2
|
14 |
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
|
|
89 |
# image_face_fusion = pipeline('face_fusion_torch', model='damo/cv_unet_face_fusion_torch', model_revision='v1.0.3')
|
90 |
|
91 |
#face_model
|
92 |
+
app = FaceAnalysis(model_path='./ckpt/buffalo_l.zip', providers=[('CUDAExecutionProvider', {"device_id": args.device})]) ##使用GPU:0, 默认使用buffalo_l就可以了
|
93 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
94 |
|
95 |
# def ref proj weight
|
|
|
201 |
return input_image
|
202 |
|
203 |
@spaces.GPU
|
204 |
+
def dress_process(garm_img, face_img, pose_img, prompt, cloth_guidance_scale, caption_guidance_scale,
|
205 |
face_guidance_scale,self_guidance_scale, cross_guidance_scale,if_ipa, if_post, if_control, denoise_steps, seed=42):
|
206 |
# prompt = prompt + ', confident smile expression, fashion, best quality, amazing quality, very aesthetic'
|
207 |
if prompt is None:
|
|
|
394 |
denoise_steps = gr.Number(label="Denoising Steps", minimum=20, maximum=50, value=30, step=1)
|
395 |
seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=20240508)
|
396 |
|
397 |
+
try_button.click(fn=dress_process, inputs=[garm_img, imgs, pose_img, prompt, cloth_guidance_scale, caption_guidance_scale, face_guidance_scale,self_guidance_scale, cross_guidance_scale, is_checked_face, is_checked_postprocess, is_checked_pose, denoise_steps, seed],
|
398 |
+
outputs=[image_out], api_name='IMAGDressing-v1')
|
399 |
|
400 |
image_blocks.launch()
|