openfree commited on
Commit
6b64eec
1 Parent(s): d06f403

Create app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +288 -0
app-backup.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import time
3
+ import os
4
+
5
+ import gradio as gr
6
+ import torch
7
+ from einops import rearrange
8
+ from PIL import Image
9
+
10
+ from flux.cli import SamplingOptions
11
+ from flux.sampling import denoise, get_noise, get_schedule, prepare, unpack
12
+ from flux.util import load_ae, load_clip, load_flow_model, load_t5
13
+ from pulid.pipeline_flux import PuLIDPipeline
14
+ from pulid.utils import resize_numpy_image_long
15
+
16
+
17
+ def get_models(name: str, device: torch.device, offload: bool):
18
+ t5 = load_t5(device, max_length=128)
19
+ clip = load_clip(device)
20
+ model = load_flow_model(name, device="cpu" if offload else device)
21
+ model.eval()
22
+ ae = load_ae(name, device="cpu" if offload else device)
23
+ return model, ae, t5, clip
24
+
25
+
26
+ class FluxGenerator:
27
+ def __init__(self):
28
+ self.device = torch.device('cuda')
29
+ self.offload = False
30
+ self.model_name = 'flux-dev'
31
+ self.model, self.ae, self.t5, self.clip = get_models(
32
+ self.model_name,
33
+ device=self.device,
34
+ offload=self.offload,
35
+ )
36
+ self.pulid_model = PuLIDPipeline(self.model, 'cuda', weight_dtype=torch.bfloat16)
37
+ self.pulid_model.load_pretrain()
38
+
39
+
40
+ flux_generator = FluxGenerator()
41
+
42
+
43
+ @spaces.GPU
44
+ @torch.inference_mode()
45
+ def generate_image(
46
+ width,
47
+ height,
48
+ num_steps,
49
+ start_step,
50
+ guidance,
51
+ seed,
52
+ prompt,
53
+ id_image=None,
54
+ id_weight=1.0,
55
+ neg_prompt="",
56
+ true_cfg=1.0,
57
+ timestep_to_start_cfg=1,
58
+ max_sequence_length=128,
59
+ ):
60
+ flux_generator.t5.max_length = max_sequence_length
61
+
62
+ seed = int(seed)
63
+ if seed == -1:
64
+ seed = None
65
+
66
+ opts = SamplingOptions(
67
+ prompt=prompt,
68
+ width=width,
69
+ height=height,
70
+ num_steps=num_steps,
71
+ guidance=guidance,
72
+ seed=seed,
73
+ )
74
+
75
+ if opts.seed is None:
76
+ opts.seed = torch.Generator(device="cpu").seed()
77
+ print(f"Generating '{opts.prompt}' with seed {opts.seed}")
78
+ t0 = time.perf_counter()
79
+
80
+ use_true_cfg = abs(true_cfg - 1.0) > 1e-2
81
+
82
+ if id_image is not None:
83
+ id_image = resize_numpy_image_long(id_image, 1024)
84
+ id_embeddings, uncond_id_embeddings = flux_generator.pulid_model.get_id_embedding(id_image, cal_uncond=use_true_cfg)
85
+ else:
86
+ id_embeddings = None
87
+ uncond_id_embeddings = None
88
+
89
+ print(id_embeddings)
90
+
91
+ # prepare input
92
+ x = get_noise(
93
+ 1,
94
+ opts.height,
95
+ opts.width,
96
+ device=flux_generator.device,
97
+ dtype=torch.bfloat16,
98
+ seed=opts.seed,
99
+ )
100
+ print(x)
101
+ timesteps = get_schedule(
102
+ opts.num_steps,
103
+ x.shape[-1] * x.shape[-2] // 4,
104
+ shift=True,
105
+ )
106
+
107
+ if flux_generator.offload:
108
+ flux_generator.t5, flux_generator.clip = flux_generator.t5.to(flux_generator.device), flux_generator.clip.to(flux_generator.device)
109
+ inp = prepare(t5=flux_generator.t5, clip=flux_generator.clip, img=x, prompt=opts.prompt)
110
+ inp_neg = prepare(t5=flux_generator.t5, clip=flux_generator.clip, img=x, prompt=neg_prompt) if use_true_cfg else None
111
+
112
+ # offload TEs to CPU, load model to gpu
113
+ if flux_generator.offload:
114
+ flux_generator.t5, flux_generator.clip = flux_generator.t5.cpu(), flux_generator.clip.cpu()
115
+ torch.cuda.empty_cache()
116
+ flux_generator.model = flux_generator.model.to(flux_generator.device)
117
+
118
+ # denoise initial noise
119
+ x = denoise(
120
+ flux_generator.model, **inp, timesteps=timesteps, guidance=opts.guidance, id=id_embeddings, id_weight=id_weight,
121
+ start_step=start_step, uncond_id=uncond_id_embeddings, true_cfg=true_cfg,
122
+ timestep_to_start_cfg=timestep_to_start_cfg,
123
+ neg_txt=inp_neg["txt"] if use_true_cfg else None,
124
+ neg_txt_ids=inp_neg["txt_ids"] if use_true_cfg else None,
125
+ neg_vec=inp_neg["vec"] if use_true_cfg else None,
126
+ )
127
+
128
+ # offload model, load autoencoder to gpu
129
+ if flux_generator.offload:
130
+ flux_generator.model.cpu()
131
+ torch.cuda.empty_cache()
132
+ flux_generator.ae.decoder.to(x.device)
133
+
134
+ # decode latents to pixel space
135
+ x = unpack(x.float(), opts.height, opts.width)
136
+ with torch.autocast(device_type=flux_generator.device.type, dtype=torch.bfloat16):
137
+ x = flux_generator.ae.decode(x)
138
+
139
+ if flux_generator.offload:
140
+ flux_generator.ae.decoder.cpu()
141
+ torch.cuda.empty_cache()
142
+
143
+ t1 = time.perf_counter()
144
+
145
+ print(f"Done in {t1 - t0:.1f}s.")
146
+ # bring into PIL format
147
+ x = x.clamp(-1, 1)
148
+ # x = embed_watermark(x.float())
149
+ x = rearrange(x[0], "c h w -> h w c")
150
+
151
+ img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
152
+ return img, str(opts.seed), flux_generator.pulid_model.debug_img_list
153
+
154
+
155
+ css = """
156
+ footer {
157
+ visibility: hidden;
158
+ }
159
+ """
160
+
161
+ def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_available() else "cpu",
162
+ offload: bool = False):
163
+
164
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
165
+
166
+
167
+ with gr.Row():
168
+ with gr.Column():
169
+ prompt = gr.Textbox(label="Prompt", value="portrait, color, cinematic")
170
+ id_image = gr.Image(label="ID Image")
171
+ id_weight = gr.Slider(0.0, 3.0, 1, step=0.05, label="id weight")
172
+
173
+
174
+ width = gr.Slider(256, 1536, 896, step=16, label="Width")
175
+ height = gr.Slider(256, 1536, 1152, step=16, label="Height")
176
+ num_steps = gr.Slider(1, 20, 20, step=1, label="Number of steps")
177
+ start_step = gr.Slider(0, 10, 0, step=1, label="timestep to start inserting ID")
178
+ guidance = gr.Slider(1.0, 10.0, 4, step=0.1, label="Guidance")
179
+ seed = gr.Textbox(-1, label="Seed (-1 for random)")
180
+ max_sequence_length = gr.Slider(128, 512, 128, step=128,
181
+ label="max_sequence_length for prompt (T5), small will be faster")
182
+
183
+ with gr.Accordion("Advanced Options (True CFG, true_cfg_scale=1 means use fake CFG, >1 means use true CFG, if using true CFG, we recommend set the guidance scale to 1)", open=False): # noqa E501
184
+ neg_prompt = gr.Textbox(
185
+ label="Negative Prompt",
186
+ value="bad quality, worst quality, text, signature, watermark, extra limbs")
187
+ true_cfg = gr.Slider(1.0, 10.0, 1, step=0.1, label="true CFG scale")
188
+ timestep_to_start_cfg = gr.Slider(0, 20, 1, step=1, label="timestep to start cfg", visible=args.dev)
189
+
190
+ generate_btn = gr.Button("Generate")
191
+
192
+ with gr.Column():
193
+ output_image = gr.Image(label="Generated Image")
194
+ seed_output = gr.Textbox(label="Used Seed")
195
+ intermediate_output = gr.Gallery(label='Output', elem_id="gallery", visible=args.dev)
196
+ # _CITE_ 관련 부분 제거
197
+
198
+
199
+ with gr.Row(), gr.Column():
200
+ gr.Markdown("## Examples")
201
+ example_inps = [
202
+ [
203
+ 'a woman holding sign with glowing green text \"PuLID for FLUX\"',
204
+ 'example_inputs/liuyifei.png',
205
+ 4, 4, 2680261499100305976, 1
206
+ ],
207
+ [
208
+ 'portrait, side view',
209
+ 'example_inputs/liuyifei.png',
210
+ 4, 4, 1205240166692517553, 1
211
+ ],
212
+ [
213
+ 'white-haired woman with vr technology atmosphere, revolutionary exceptional magnum with remarkable details', # noqa E501
214
+ 'example_inputs/liuyifei.png',
215
+ 4, 4, 6349424134217931066, 1
216
+ ],
217
+ [
218
+ 'a young child is eating Icecream',
219
+ 'example_inputs/liuyifei.png',
220
+ 4, 4, 10606046113565776207, 1
221
+ ],
222
+ [
223
+ 'a man is holding a sign with text \"PuLID for FLUX\", winter, snowing, top of the mountain',
224
+ 'example_inputs/pengwei.jpg',
225
+ 4, 4, 2410129802683836089, 1
226
+ ],
227
+ [
228
+ 'portrait, candle light',
229
+ 'example_inputs/pengwei.jpg',
230
+ 4, 4, 17522759474323955700, 1
231
+ ],
232
+ [
233
+ 'profile shot dark photo of a 25-year-old male with smoke escaping from his mouth, the backlit smoke gives the image an ephemeral quality, natural face, natural eyebrows, natural skin texture, award winning photo, highly detailed face, atmospheric lighting, film grain, monochrome', # noqa E501
234
+ 'example_inputs/pengwei.jpg',
235
+ 4, 4, 17733156847328193625, 1
236
+ ],
237
+ [
238
+ 'American Comics, 1boy',
239
+ 'example_inputs/pengwei.jpg',
240
+ 1, 4, 13223174453874179686, 1
241
+ ],
242
+ [
243
+ 'portrait, pixar',
244
+ 'example_inputs/pengwei.jpg',
245
+ 1, 4, 9445036702517583939, 1
246
+ ],
247
+ ]
248
+ gr.Examples(examples=example_inps, inputs=[prompt, id_image, start_step, guidance, seed, true_cfg],
249
+ label='fake CFG')
250
+
251
+ example_inps = [
252
+ [
253
+ 'portrait, made of ice sculpture',
254
+ 'example_inputs/lecun.jpg',
255
+ 1, 1, 3811899118709451814, 5
256
+ ],
257
+ ]
258
+ gr.Examples(examples=example_inps, inputs=[prompt, id_image, start_step, guidance, seed, true_cfg],
259
+ label='true CFG')
260
+
261
+ generate_btn.click(
262
+ fn=generate_image,
263
+ inputs=[width, height, num_steps, start_step, guidance, seed, prompt, id_image, id_weight, neg_prompt,
264
+ true_cfg, timestep_to_start_cfg, max_sequence_length],
265
+ outputs=[output_image, seed_output, intermediate_output],
266
+ )
267
+
268
+ return demo
269
+
270
+ if __name__ == "__main__":
271
+ import argparse
272
+
273
+ parser = argparse.ArgumentParser(description="PuLID for FLUX.1-dev")
274
+ parser.add_argument("--name", type=str, default="flux-dev", choices=list('flux-dev'),
275
+ help="currently only support flux-dev")
276
+ parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
277
+ help="Device to use")
278
+ parser.add_argument("--offload", action="store_true", help="Offload model to CPU when not in use")
279
+ parser.add_argument("--port", type=int, default=8080, help="Port to use")
280
+ parser.add_argument("--dev", action='store_true', help="Development mode")
281
+ parser.add_argument("--pretrained_model", type=str, help='for development')
282
+ args = parser.parse_args()
283
+
284
+ import huggingface_hub
285
+ huggingface_hub.login(os.getenv('HF_TOKEN'))
286
+
287
+ demo = create_demo(args, args.name, args.device, args.offload)
288
+ demo.launch()