Shaamallow commited on
Commit
903c049
1 Parent(s): 62470d0

add init demo

Browse files
Files changed (3) hide show
  1. README.md +6 -7
  2. main.py +372 -0
  3. requirements.txt +13 -0
README.md CHANGED
@@ -1,12 +1,11 @@
1
  ---
2
- title: Noisy Style
3
- emoji: 🏃
4
- colorFrom: yellow
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Noisy-Style
3
+ emoji: 🎨
4
+ colorFrom: blue
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.37.2
8
  app_file: app.py
9
  pinned: false
10
+ license: cc-by-nc-4.0
11
  ---
 
 
main.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from typing import Optional
4
+
5
+ import gradio as gr
6
+ import numpy as np
7
+ import PIL.Image
8
+ import spaces
9
+ import torch
10
+ from diffusers import (AutoencoderKL, DDIMInverseScheduler, DDIMScheduler,
11
+ StableDiffusionXLPipeline)
12
+ from torchvision.transforms import ToTensor
13
+
14
+ # pyright: reportPrivateImportUsage=false
15
+
16
+
17
+ DESCRIPTION = f"""
18
+ # 🎨 Noisy-Style 🎨
19
+ This is an interactive demo of noisy DDIM inversion capabilities on top of Instant-Style styling method
20
+
21
+ This method proposed in [Controllability of diffusion models]() *by Eyal Benaroche, Clément Chadebec, Onur Tasar, and Benjamin Aubin* from Jasper Research in the context of Eyal's internship with Ecole Polytechnique.
22
+
23
+ A style benchmark : [style-bench](https://gojasper.github.io/style-bench) was also provided to facilitate evaluation of diffusion models for styling purposes.
24
+ """
25
+
26
+ OPEN_SOURCE_PROMO = f"""
27
+ If you enjoy the space, please also promote *open-source* by giving a ⭐ to our repo [![GitHub Stars](https://img.shields.io/github/stars/gojasper/style-bench?style=social)](https://github.com/gojasper/style-bench)
28
+ """
29
+
30
+ DISCLAIMER = f"""
31
+ This demo is only for research purpose. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards. """
32
+
33
+ if not torch.cuda.is_available():
34
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
35
+
36
+ MAX_SEED = np.iinfo(np.int32).max
37
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
38
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
39
+
40
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
41
+
42
+ if gr.NO_RELOAD:
43
+ if torch.cuda.is_available():
44
+ vae = AutoencoderKL.from_pretrained(
45
+ "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
46
+ )
47
+ pipe = StableDiffusionXLPipeline.from_pretrained(
48
+ "stabilityai/stable-diffusion-xl-base-1.0",
49
+ vae=vae,
50
+ torch_dtype=torch.float16,
51
+ use_safetensors=True,
52
+ variant="fp16",
53
+ )
54
+ pipe.load_ip_adapter(
55
+ "h94/IP-Adapter",
56
+ subfolder="sdxl_models",
57
+ weight_name="ip-adapter_sdxl.safetensors"
58
+ )
59
+ pipe.to(device)
60
+
61
+ forward_scheduler = DDIMScheduler.from_pretrained(
62
+ "stabilityai/stable-diffusion-xl-base-1.0", subfolder="scheduler"
63
+ )
64
+ invert_scheduler = DDIMInverseScheduler(**forward_scheduler.config)
65
+
66
+ css = """
67
+ h1 {
68
+ text-align: center;
69
+ display:block;
70
+ }
71
+ p {
72
+ text-align: justify;
73
+ display:block;
74
+ }
75
+ """
76
+
77
+
78
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
79
+ if randomize_seed:
80
+ seed = random.randint(0, MAX_SEED)
81
+ return seed
82
+
83
+
84
+ def img_to_latents(x: torch.Tensor, vae: AutoencoderKL):
85
+ x = 2.0 * x - 1.0
86
+ posterior = vae.encode(x).latent_dist
87
+ latents = posterior.mean * 0.18215
88
+ return latents
89
+
90
+
91
+ def invert_image(model, image: np.ndarray, n_steps: int, width:int, height:int):
92
+
93
+ model.scheduler = invert_scheduler
94
+
95
+ image = PIL.Image.fromarray(image).resize((width, height))
96
+ image_tensor = ToTensor()(image).to(model.device, dtype=torch.float16)
97
+ image_tensor = image_tensor.unsqueeze(0)
98
+ latent = img_to_latents(image_tensor, model.vae)
99
+
100
+ print(latent)
101
+ model.set_ip_adapter_scale(0)
102
+
103
+ inv_latents = model(
104
+ prompt="",
105
+ negative_prompt="",
106
+ ip_adapter_image=image,
107
+ guidance_scale=1.0,
108
+ output_type="latent",
109
+ return_dict=False,
110
+ num_inference_steps=n_steps,
111
+ latents=latent,
112
+ )[0]
113
+
114
+ return inv_latents
115
+
116
+
117
+ @spaces.GPU
118
+ def generate(
119
+ prompt: str,
120
+ negative_prompt: str = "",
121
+ prompt_2: str = "",
122
+ negative_prompt_2: str = "",
123
+ use_negative_prompt: bool = False,
124
+ use_prompt_2: bool = False,
125
+ use_negative_prompt_2: bool = False,
126
+ seed: int = 0,
127
+ width: int = 1024,
128
+ height: int = 1024,
129
+ guidance_scale_base: float = 5.0,
130
+ num_inference_steps_base: int = 25,
131
+ style_image_value = None,
132
+ noise_scale: float = 1.5,
133
+ ) -> PIL.Image.Image:
134
+ torch.manual_seed(seed)
135
+
136
+ if not use_negative_prompt:
137
+ negative_prompt = None # type: ignore
138
+ if not use_prompt_2:
139
+ prompt_2 = None # type: ignore
140
+ if not use_negative_prompt_2:
141
+ negative_prompt_2 = None # type: ignore
142
+
143
+ # Add scaled noise to the latent
144
+ noise = torch.randn(1, 4, width // 8, height // 8).to(device, dtype=torch.float16)
145
+
146
+ # Invert the image and get the latent
147
+ if style_image_value is not None:
148
+ latent = invert_image(pipe, style_image_value, 30, width, height)
149
+ print("Image was inverted")
150
+ print(latent)
151
+
152
+ latent = latent + noise_scale * noise
153
+ latent = latent / torch.sqrt(torch.tensor(1 + noise_scale**2).to(device, dtype=torch.float16))
154
+
155
+ else:
156
+ latent = noise
157
+
158
+ print("Noise added")
159
+ print(latent)
160
+
161
+ scale = {
162
+ "up": {"block_0": [0.0, 1.0, 0.0]},
163
+ }
164
+ pipe.set_ip_adapter_scale(scale)
165
+
166
+ pipe.scheduler = forward_scheduler
167
+ image = pipe(
168
+ prompt=prompt,
169
+ negative_prompt=negative_prompt,
170
+ ip_adapter_image=style_image_value,
171
+ latents=latent,
172
+ prompt_2=prompt_2,
173
+ negative_prompt_2=negative_prompt_2,
174
+ guidance_scale=guidance_scale_base,
175
+ num_inference_steps=num_inference_steps_base,
176
+ output_type="pil",
177
+ ).images[0]
178
+
179
+ return image
180
+
181
+
182
+ examples = [
183
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
184
+ "An astronaut riding a green horse",
185
+ ]
186
+
187
+ with gr.Blocks(css=css) as demo:
188
+ gr.Markdown(DESCRIPTION)
189
+
190
+ gr.Markdown(OPEN_SOURCE_PROMO)
191
+
192
+ with gr.Row():
193
+
194
+ with gr.Blocks():
195
+
196
+ with gr.Column():
197
+
198
+ style_image = gr.Image()
199
+
200
+ noise_scale = gr.Slider(
201
+ label="Noise Scale",
202
+ minimum=0,
203
+ maximum=5,
204
+ step=0.1,
205
+ value=1.5,
206
+ )
207
+
208
+ with gr.Blocks():
209
+
210
+ with gr.Column():
211
+ with gr.Row():
212
+ prompt = gr.Text(
213
+ label="Prompt",
214
+ show_label=False,
215
+ max_lines=1,
216
+ placeholder="Enter your prompt",
217
+ container=False,
218
+ )
219
+ run_button = gr.Button("Run", scale=0)
220
+
221
+ result = gr.Image(label="Result", show_label=False)
222
+
223
+ with gr.Accordion("Advanced options", open=False):
224
+ with gr.Row():
225
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
226
+ use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
227
+ use_negative_prompt_2 = gr.Checkbox(
228
+ label="Use negative prompt 2", value=False
229
+ )
230
+ negative_prompt = gr.Text(
231
+ label="Negative prompt",
232
+ max_lines=1,
233
+ placeholder="Enter a negative prompt",
234
+ visible=False,
235
+ )
236
+ prompt_2 = gr.Text(
237
+ label="Prompt 2",
238
+ max_lines=1,
239
+ placeholder="Enter your prompt",
240
+ visible=False,
241
+ )
242
+ negative_prompt_2 = gr.Text(
243
+ label="Negative prompt 2",
244
+ max_lines=1,
245
+ placeholder="Enter a negative prompt",
246
+ visible=False,
247
+ )
248
+
249
+ seed = gr.Slider(
250
+ label="Seed",
251
+ minimum=0,
252
+ maximum=MAX_SEED,
253
+ step=1,
254
+ value=0,
255
+ )
256
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
257
+ with gr.Row():
258
+ width = gr.Slider(
259
+ label="Width",
260
+ minimum=256,
261
+ maximum=MAX_IMAGE_SIZE,
262
+ step=32,
263
+ value=1024,
264
+ )
265
+ height = gr.Slider(
266
+ label="Height",
267
+ minimum=256,
268
+ maximum=MAX_IMAGE_SIZE,
269
+ step=32,
270
+ value=1024,
271
+ )
272
+
273
+ with gr.Row():
274
+ guidance_scale_base = gr.Slider(
275
+ label="Guidance scale for base",
276
+ minimum=1,
277
+ maximum=20,
278
+ step=0.1,
279
+ value=5.0,
280
+ )
281
+ num_inference_steps_base = gr.Slider(
282
+ label="Number of inference steps for base",
283
+ minimum=10,
284
+ maximum=100,
285
+ step=1,
286
+ value=25,
287
+ )
288
+ with gr.Row(visible=False) as refiner_params:
289
+ guidance_scale_refiner = gr.Slider(
290
+ label="Guidance scale for refiner",
291
+ minimum=1,
292
+ maximum=20,
293
+ step=0.1,
294
+ value=5.0,
295
+ )
296
+ num_inference_steps_refiner = gr.Slider(
297
+ label="Number of inference steps for refiner",
298
+ minimum=10,
299
+ maximum=100,
300
+ step=1,
301
+ value=25,
302
+ )
303
+
304
+ gr.Examples(
305
+ examples=examples,
306
+ inputs=prompt,
307
+ outputs=result,
308
+ fn=generate,
309
+ )
310
+
311
+ gr.Markdown("## Disclaimer")
312
+ gr.Markdown(DISCLAIMER)
313
+
314
+ use_negative_prompt.change(
315
+ fn=lambda x: gr.update(visible=x),
316
+ inputs=use_negative_prompt,
317
+ outputs=negative_prompt,
318
+ queue=False,
319
+ api_name=False,
320
+ )
321
+ use_prompt_2.change(
322
+ fn=lambda x: gr.update(visible=x),
323
+ inputs=use_prompt_2,
324
+ outputs=prompt_2,
325
+ queue=False,
326
+ api_name=False,
327
+ )
328
+ use_negative_prompt_2.change(
329
+ fn=lambda x: gr.update(visible=x),
330
+ inputs=use_negative_prompt_2,
331
+ outputs=negative_prompt_2,
332
+ queue=False,
333
+ api_name=False,
334
+ )
335
+
336
+ gr.on(
337
+ triggers=[
338
+ prompt.submit,
339
+ negative_prompt.submit,
340
+ prompt_2.submit,
341
+ negative_prompt_2.submit,
342
+ run_button.click,
343
+ ],
344
+ fn=randomize_seed_fn,
345
+ inputs=[seed, randomize_seed],
346
+ outputs=seed,
347
+ queue=False,
348
+ api_name=False,
349
+ ).then(
350
+ fn=generate,
351
+ inputs=[
352
+ prompt,
353
+ negative_prompt,
354
+ prompt_2,
355
+ negative_prompt_2,
356
+ use_negative_prompt,
357
+ use_prompt_2,
358
+ use_negative_prompt_2,
359
+ seed,
360
+ width,
361
+ height,
362
+ guidance_scale_base,
363
+ num_inference_steps_base,
364
+ style_image,
365
+ noise_scale,
366
+ ],
367
+ outputs=result,
368
+ api_name="run",
369
+ )
370
+
371
+ if __name__ == "__main__":
372
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ diffusers
3
+ gradio==4.37.2
4
+ numpy==1.26.4
5
+ spaces
6
+ --extra-index-url https://download.pytorch.org/whl/cu118
7
+ torch==2.0.1
8
+ torchvision
9
+ transformers >= 4.34.0
10
+ xformers
11
+ ftfy
12
+ peft==0.6.0
13
+ optimum