Spaces:
Runtime error
Runtime error
yupeng.zhou
commited on
Commit
•
97e9c37
1
Parent(s):
630a6cc
fix
Browse files
app.py
CHANGED
@@ -432,21 +432,21 @@ sd_model_path = models_dict["Unstable"]#"SG161222/RealVisXL_V4.0"
|
|
432 |
use_safetensors= False
|
433 |
### LOAD Stable Diffusion Pipeline
|
434 |
pipe1 = StableDiffusionXLPipeline.from_pretrained(sd_model_path, torch_dtype=torch.float16, use_safetensors= use_safetensors)
|
435 |
-
pipe1 = pipe1.to("
|
436 |
pipe1.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
437 |
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
438 |
pipe1.scheduler.set_timesteps(50)
|
439 |
###
|
440 |
pipe2 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
|
441 |
sd_model_path, torch_dtype=torch.float16, use_safetensors=use_safetensors)
|
442 |
-
pipe2 = pipe2.to("
|
443 |
pipe2.load_photomaker_adapter(
|
444 |
os.path.dirname(photomaker_path),
|
445 |
subfolder="",
|
446 |
weight_name=os.path.basename(photomaker_path),
|
447 |
trigger_word="img" # define the trigger word
|
448 |
)
|
449 |
-
pipe2 = pipe2.to("
|
450 |
pipe2.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
451 |
pipe2.fuse_lora()
|
452 |
|
@@ -482,13 +482,15 @@ def change_visiale_by_model_type(_model_type):
|
|
482 |
|
483 |
|
484 |
######### Image Generation ##############
|
485 |
-
@spaces.GPU(duration=
|
486 |
def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_name, _Ip_Adapter_Strength ,_style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt,prompt_array,G_height,G_width,_comic_type):
|
487 |
_model_type = "Photomaker" if _model_type == "Using Ref Images" else "original"
|
488 |
if _model_type == "Photomaker" and "img" not in general_prompt:
|
489 |
raise gr.Error("Please add the triger word \" img \" behind the class word you want to customize, such as: man img or woman img")
|
490 |
if _upload_images is None and _model_type != "original":
|
491 |
raise gr.Error(f"Cannot find any input face image!")
|
|
|
|
|
492 |
global sa32, sa64,id_length,total_length,attn_procs,unet,cur_model_type,device
|
493 |
global write
|
494 |
global cur_step,attn_count
|
@@ -500,11 +502,11 @@ def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_nam
|
|
500 |
sd_model_path = models_dict[_sd_type]
|
501 |
use_safe_tensor = True
|
502 |
if _model_type == "original":
|
503 |
-
pipe = pipe1
|
504 |
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
505 |
elif _model_type == "Photomaker":
|
506 |
-
pipe = pipe2
|
507 |
-
|
508 |
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
509 |
else:
|
510 |
raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
|
@@ -567,7 +569,13 @@ def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_nam
|
|
567 |
captions = [caption.split('#')[-1] if "#" in caption else caption for caption in captions]
|
568 |
from PIL import ImageFont
|
569 |
total_results = get_comic(id_images + real_images, _comic_type,captions= captions,font=ImageFont.truetype("./fonts/Inkfree.ttf", int(45))) + total_results
|
570 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
571 |
yield total_results
|
572 |
|
573 |
|
@@ -645,14 +653,14 @@ with gr.Blocks(css=css) as demo:
|
|
645 |
G_height = gr.Slider(
|
646 |
label="height",
|
647 |
minimum=256,
|
648 |
-
maximum=
|
649 |
step=32,
|
650 |
value=768,
|
651 |
)
|
652 |
G_width = gr.Slider(
|
653 |
label="width",
|
654 |
minimum=256,
|
655 |
-
maximum=
|
656 |
step=32,
|
657 |
value=768,
|
658 |
)
|
@@ -706,7 +714,7 @@ with gr.Blocks(css=css) as demo:
|
|
706 |
"work in the company",
|
707 |
"Take a walk next to the company at noon",
|
708 |
"lying in bed at night"]),
|
709 |
-
"
|
710 |
],
|
711 |
[0,0.5,0.5,2,"a man, wearing black jacket",
|
712 |
"bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
|
@@ -717,7 +725,7 @@ with gr.Blocks(css=css) as demo:
|
|
717 |
"laughing happily",
|
718 |
"lying in bed at night"
|
719 |
]),
|
720 |
-
"
|
721 |
],
|
722 |
[0,0.3,0.5,2,"a girl, wearing white shirt, black skirt, black tie, yellow hair",
|
723 |
"bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
|
@@ -729,13 +737,9 @@ with gr.Blocks(css=css) as demo:
|
|
729 |
"look around in the park. # She looks around and enjoys the beauty of nature.",
|
730 |
"[NC]leaf falls from the tree, landing on the sketchbook.",
|
731 |
"picks up the leaf, examining its details closely.",
|
732 |
-
"starts sketching the leaf with intricate lines.",
|
733 |
-
"holds up the sketch drawing of the leaf.",
|
734 |
"[NC]The brown squirrel appear.",
|
735 |
"is very happy # She is very happy to see the squirrel again",
|
736 |
-
"[NC]The brown squirrel takes the cracker and scampers up a tree. # She gives the squirrel cracker",
|
737 |
-
"laughs and tucks the leaf into her book as a keepsake.",
|
738 |
-
"ready to leave.",]),
|
739 |
"Japanese Anime","Only Using Textual Description",get_image_path_list('./examples/taylor'),768,768
|
740 |
]
|
741 |
],
|
|
|
432 |
use_safetensors= False
|
433 |
### LOAD Stable Diffusion Pipeline
|
434 |
pipe1 = StableDiffusionXLPipeline.from_pretrained(sd_model_path, torch_dtype=torch.float16, use_safetensors= use_safetensors)
|
435 |
+
pipe1 = pipe1.to("cpu")
|
436 |
pipe1.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
437 |
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
438 |
pipe1.scheduler.set_timesteps(50)
|
439 |
###
|
440 |
pipe2 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
|
441 |
sd_model_path, torch_dtype=torch.float16, use_safetensors=use_safetensors)
|
442 |
+
pipe2 = pipe2.to("cpu")
|
443 |
pipe2.load_photomaker_adapter(
|
444 |
os.path.dirname(photomaker_path),
|
445 |
subfolder="",
|
446 |
weight_name=os.path.basename(photomaker_path),
|
447 |
trigger_word="img" # define the trigger word
|
448 |
)
|
449 |
+
pipe2 = pipe2.to("cpu")
|
450 |
pipe2.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
|
451 |
pipe2.fuse_lora()
|
452 |
|
|
|
482 |
|
483 |
|
484 |
######### Image Generation ##############
|
485 |
+
@spaces.GPU(duration=120)
|
486 |
def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_name, _Ip_Adapter_Strength ,_style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt,prompt_array,G_height,G_width,_comic_type):
|
487 |
_model_type = "Photomaker" if _model_type == "Using Ref Images" else "original"
|
488 |
if _model_type == "Photomaker" and "img" not in general_prompt:
|
489 |
raise gr.Error("Please add the triger word \" img \" behind the class word you want to customize, such as: man img or woman img")
|
490 |
if _upload_images is None and _model_type != "original":
|
491 |
raise gr.Error(f"Cannot find any input face image!")
|
492 |
+
if len(prompt_array) > 10:
|
493 |
+
raise gr.Error(f"No more than 10 prompts in huggface demo for Speed!")
|
494 |
global sa32, sa64,id_length,total_length,attn_procs,unet,cur_model_type,device
|
495 |
global write
|
496 |
global cur_step,attn_count
|
|
|
502 |
sd_model_path = models_dict[_sd_type]
|
503 |
use_safe_tensor = True
|
504 |
if _model_type == "original":
|
505 |
+
pipe = pipe1.to(device)
|
506 |
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
507 |
elif _model_type == "Photomaker":
|
508 |
+
pipe = pipe2.to(device)
|
509 |
+
pipe.id_encoder.to(device)
|
510 |
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
511 |
else:
|
512 |
raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
|
|
|
569 |
captions = [caption.split('#')[-1] if "#" in caption else caption for caption in captions]
|
570 |
from PIL import ImageFont
|
571 |
total_results = get_comic(id_images + real_images, _comic_type,captions= captions,font=ImageFont.truetype("./fonts/Inkfree.ttf", int(45))) + total_results
|
572 |
+
if _model_type == "original":
|
573 |
+
pipe = pipe1.to("cpu")
|
574 |
+
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
575 |
+
elif _model_type == "Photomaker":
|
576 |
+
pipe = pipe2.to("cpu")
|
577 |
+
pipe.id_encoder.to("cpu")
|
578 |
+
set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
|
579 |
yield total_results
|
580 |
|
581 |
|
|
|
653 |
G_height = gr.Slider(
|
654 |
label="height",
|
655 |
minimum=256,
|
656 |
+
maximum=768,
|
657 |
step=32,
|
658 |
value=768,
|
659 |
)
|
660 |
G_width = gr.Slider(
|
661 |
label="width",
|
662 |
minimum=256,
|
663 |
+
maximum=768,
|
664 |
step=32,
|
665 |
value=768,
|
666 |
)
|
|
|
714 |
"work in the company",
|
715 |
"Take a walk next to the company at noon",
|
716 |
"lying in bed at night"]),
|
717 |
+
"(No style)", "Using Ref Images",get_image_path_list('./examples/taylor'),768,768
|
718 |
],
|
719 |
[0,0.5,0.5,2,"a man, wearing black jacket",
|
720 |
"bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
|
|
|
725 |
"laughing happily",
|
726 |
"lying in bed at night"
|
727 |
]),
|
728 |
+
"(No style)","Only Using Textual Description",get_image_path_list('./examples/taylor'),768,768
|
729 |
],
|
730 |
[0,0.3,0.5,2,"a girl, wearing white shirt, black skirt, black tie, yellow hair",
|
731 |
"bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
|
|
|
737 |
"look around in the park. # She looks around and enjoys the beauty of nature.",
|
738 |
"[NC]leaf falls from the tree, landing on the sketchbook.",
|
739 |
"picks up the leaf, examining its details closely.",
|
|
|
|
|
740 |
"[NC]The brown squirrel appear.",
|
741 |
"is very happy # She is very happy to see the squirrel again",
|
742 |
+
"[NC]The brown squirrel takes the cracker and scampers up a tree. # She gives the squirrel cracker"]),
|
|
|
|
|
743 |
"Japanese Anime","Only Using Textual Description",get_image_path_list('./examples/taylor'),768,768
|
744 |
]
|
745 |
],
|