MonsterMMORPG commited on
Commit
a6f8521
·
1 Parent(s): 2bd9862

Upload web-ui.py

Browse files
Files changed (1) hide show
  1. web-ui.py +172 -0
web-ui.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import cv2
4
+ import numpy as np
5
+ import torch
6
+ from PIL import Image
7
+ from insightface.app import FaceAnalysis
8
+ from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
9
+ from ip_adapter.ip_adapter_faceid import IPAdapterFaceIDPlus
10
+ import argparse
11
+ import random
12
+ from insightface.utils import face_align
13
+
14
+ # Argument parser for command line options
15
+ parser = argparse.ArgumentParser()
16
+ parser.add_argument("--share", action="store_true", help="Enable Gradio share option")
17
+ parser.add_argument("--num_images", type=int, default=1, help="Number of images to generate")
18
+ parser.add_argument("--cache_limit", type=int, default=1, help="Limit for model cache")
19
+ args = parser.parse_args()
20
+
21
+ # Add new model names here
22
+ static_model_names = [
23
+ "SG161222/Realistic_Vision_V6.0_B1_noVAE",
24
+ "stablediffusionapi/rev-animated-v122-eol",
25
+ "Lykon/DreamShaper",
26
+ "stablediffusionapi/toonyou",
27
+ "stablediffusionapi/real-cartoon-3d",
28
+ "KBlueLeaf/kohaku-v2.1",
29
+ "nitrosocke/Ghibli-Diffusion",
30
+ "Linaqruf/anything-v3.0",
31
+ "jinaai/flat-2d-animerge",
32
+ "stablediffusionapi/realcartoon3d",
33
+ "stablediffusionapi/disney-pixar-cartoon",
34
+ "stablediffusionapi/pastel-mix-stylized-anime",
35
+ "stablediffusionapi/anything-v5",
36
+ "SG161222/Realistic_Vision_V2.0",
37
+ "SG161222/Realistic_Vision_V4.0_noVAE",
38
+ "SG161222/Realistic_Vision_V5.1_noVAE",
39
+ r"C:\Users\King\Downloads\New folder\3D Animation Diffusion"
40
+ ]
41
+
42
+ # Cache for loaded models
43
+ model_cache = {}
44
+ max_cache_size = args.cache_limit
45
+
46
+ # Function to load and cache model
47
+ def load_model(model_name):
48
+ if model_name in model_cache:
49
+ return model_cache[model_name]
50
+
51
+ # Limit cache size
52
+ if len(model_cache) >= max_cache_size:
53
+ model_cache.pop(next(iter(model_cache)))
54
+
55
+ device = "cuda"
56
+ noise_scheduler = DDIMScheduler(
57
+ num_train_timesteps=1000,
58
+ beta_start=0.00085,
59
+ beta_end=0.012,
60
+ beta_schedule="scaled_linear",
61
+ clip_sample=False,
62
+ set_alpha_to_one=False,
63
+ steps_offset=1,
64
+ )
65
+ vae_model_path = "stabilityai/sd-vae-ft-mse"
66
+ vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
67
+
68
+ # Load model based on the selected model name
69
+ pipe = StableDiffusionPipeline.from_pretrained(
70
+ model_name,
71
+ torch_dtype=torch.float16,
72
+ scheduler=noise_scheduler,
73
+ vae=vae,
74
+ feature_extractor=None,
75
+ safety_checker=None
76
+ ).to(device)
77
+
78
+ image_encoder_path = "h94/IP-Adapter/models/image_encoder"
79
+ ip_ckpt = "adapters/ip-adapter-faceid-plusv2_sd15.bin"
80
+ ip_model = IPAdapterFaceIDPlus(pipe, image_encoder_path, ip_ckpt, device)
81
+
82
+ model_cache[model_name] = ip_model
83
+ return ip_model
84
+
85
+ # Function to process image and generate output
86
+ def generate_image(input_image, positive_prompt, negative_prompt, width, height, model_name, num_inference_steps, seed, randomize_seed, num_images, batch_size, enable_shortcut, s_scale):
87
+ saved_images = []
88
+
89
+ # Load and prepare the model
90
+ ip_model = load_model(model_name)
91
+
92
+ # Convert input image to the format expected by the model
93
+ input_image = input_image.convert("RGB")
94
+ input_image = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR)
95
+ app = FaceAnalysis(
96
+ name="buffalo_l", providers=["CUDAExecutionProvider", "CPUExecutionProvider"]
97
+ )
98
+ app.prepare(ctx_id=0, det_size=(640, 640))
99
+ faces = app.get(input_image)
100
+ if not faces:
101
+ raise ValueError("No faces found in the image.")
102
+
103
+ faceid_embeds = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
104
+ face_image = face_align.norm_crop(input_image, landmark=faces[0].kps, image_size=224)
105
+
106
+ for image_index in range(num_images):
107
+ if randomize_seed or image_index > 0:
108
+ seed = random.randint(0, 2**32 - 1)
109
+
110
+ # Generate the image with the new parameters
111
+ generated_images = ip_model.generate(
112
+ prompt=positive_prompt,
113
+ negative_prompt=negative_prompt,
114
+ faceid_embeds=faceid_embeds,
115
+ face_image=face_image,
116
+ num_samples=batch_size,
117
+ shortcut=enable_shortcut,
118
+ s_scale=s_scale,
119
+ width=width,
120
+ height=height,
121
+ num_inference_steps=num_inference_steps,
122
+ seed=seed,
123
+ )
124
+
125
+ # Save and prepare the generated images for display
126
+ outputs_dir = "outputs"
127
+ if not os.path.exists(outputs_dir):
128
+ os.makedirs(outputs_dir)
129
+ for i, img in enumerate(generated_images, start=1):
130
+ image_path = os.path.join(outputs_dir, f"generated_{len(os.listdir(outputs_dir)) + i}.png")
131
+ img.save(image_path)
132
+ saved_images.append(image_path)
133
+
134
+ return saved_images, f"Saved images: {', '.join(saved_images)}", seed
135
+
136
+ # Gradio interface, using the static list of models
137
+ with gr.Blocks() as demo:
138
+ gr.Markdown("Developed by SECourses - only distributed on https://www.patreon.com/posts/95759342")
139
+ with gr.Row():
140
+ input_image = gr.Image(type="pil")
141
+ generate_btn = gr.Button("Generate")
142
+ with gr.Row():
143
+ width = gr.Number(value=512, label="Width")
144
+ height = gr.Number(value=768, label="Height")
145
+ with gr.Row():
146
+ num_inference_steps = gr.Number(value=30, label="Number of Inference Steps", step=1, minimum=10, maximum=100)
147
+ seed = gr.Number(value=2023, label="Seed")
148
+ randomize_seed = gr.Checkbox(value=True, label="Randomize Seed")
149
+ with gr.Row():
150
+ num_images = gr.Number(value=args.num_images, label="Number of Images to Generate", step=1, minimum=1)
151
+ batch_size = gr.Number(value=1, label="Batch Size", step=1)
152
+ with gr.Row():
153
+ enable_shortcut = gr.Checkbox(value=True, label="Enable Shortcut")
154
+ s_scale = gr.Number(value=1.0, label="Scale Factor (s_scale)", step=0.1, minimum=0.5, maximum=4.0)
155
+ with gr.Row():
156
+ positive_prompt = gr.Textbox(label="Positive Prompt")
157
+ negative_prompt = gr.Textbox(label="Negative Prompt")
158
+ with gr.Row():
159
+ model_selector = gr.Dropdown(label="Select Model", choices=static_model_names, value=static_model_names[0])
160
+
161
+ with gr.Column():
162
+ output_gallery = gr.Gallery(label="Generated Images")
163
+ output_text = gr.Textbox(label="Output Info")
164
+ display_seed = gr.Textbox(label="Used Seed", interactive=False)
165
+
166
+ generate_btn.click(
167
+ generate_image,
168
+ inputs=[input_image, positive_prompt, negative_prompt, width, height, model_selector, num_inference_steps, seed, randomize_seed, num_images, batch_size, enable_shortcut, s_scale],
169
+ outputs=[output_gallery, output_text, display_seed],
170
+ )
171
+
172
+ demo.launch(share=args.share, inbrowser=True)