Thafx Manjushri commited on
Commit
2b98ab4
0 Parent(s):

Duplicate from Manjushri/SDXL-1.0-CPU

Browse files

Co-authored-by: Manjushri Bodhisattva <[email protected]>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +47 -0
  4. requirements.txt +8 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: SDXL 1.0 CPU
3
+ emoji: 🐢
4
+ colorFrom: green
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: Manjushri/SDXL-1.0-CPU
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import modin.pandas as pd
4
+ from diffusers import DiffusionPipeline
5
+
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ if torch.cuda.is_available():
8
+ PYTORCH_CUDA_ALLOC_CONF={'max_split_size_mb': 6000}
9
+ torch.cuda.max_memory_allocated(device=device)
10
+ torch.cuda.empty_cache()
11
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
+ pipe.enable_xformers_memory_efficient_attention()
13
+ pipe = pipe.to(device)
14
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
15
+ torch.cuda.empty_cache()
16
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
17
+ refiner.enable_xformers_memory_efficient_attention()
18
+ refiner.enable_sequential_cpu_offload()
19
+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
20
+ else:
21
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", use_safetensors=True)
22
+ pipe = pipe.to(device)
23
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
24
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True)
25
+ refiner = refiner.to(device)
26
+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
27
+
28
+ def genie (prompt, negative_prompt, height, width, scale, steps, seed, prompt_2, negative_prompt_2, high_noise_frac):
29
+ generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
30
+ int_image = pipe(prompt, prompt_2=prompt_2, negative_prompt_2=negative_prompt_2, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images
31
+ image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, denoising_start=high_noise_frac).images[0]
32
+ return image
33
+
34
+ gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
35
+ gr.Textbox(label='What you Do Not want the AI to generate.'),
36
+ gr.Slider(512, 1024, 768, step=128, label='Height'),
37
+ gr.Slider(512, 1024, 768, step=128, label='Width'),
38
+ gr.Slider(1, 15, 10, label='Guidance Scale'),
39
+ gr.Slider(25, maximum=50, value=25, step=1, label='Number of Iterations'),
40
+ gr.Slider(minimum=1, step=1, maximum=999999999999999999, randomize=True),
41
+ gr.Textbox(label='Embedded Prompt'),
42
+ gr.Textbox(label='Embedded Negative Prompt'),
43
+ gr.Slider(minimum=.7, maximum=.99, value=.95, step=.01, label='Refiner Denoise Start %')],
44
+ outputs='image',
45
+ title="Stable Diffusion XL 1.0 CPU or GPU",
46
+ description="SDXL 1.0 CPU or GPU. Currently running on CPU. <br><br><b>WARNING:</b> Extremely Slow. 65s/Iteration. Expect 25-50mins an image for 25-50 iterations respectively. This model is capable of producing NSFW (Softcore) images.",
47
+ article = "If You Enjoyed this Demo and would like to Donate, you can send to any of these Wallets. <br>BTC: bc1qzdm9j73mj8ucwwtsjx4x4ylyfvr6kp7svzjn84 <br>3LWRoKYx6bCLnUrKEdnPo3FCSPQUSFDjFP <br>DOGE: DK6LRc4gfefdCTRk9xPD239N31jh9GjKez <br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ diffusers
3
+ transformers
4
+ accelerate
5
+ ftfy
6
+ xformers
7
+ modin[all]
8
+ invisible_watermark