lanzhiwang commited on
Commit
2e1269a
1 Parent(s): 9530eec
Files changed (3) hide show
  1. app.py +81 -25
  2. app1.py +26 -0
  3. requirements.txt +5 -3
app.py CHANGED
@@ -1,26 +1,82 @@
1
- # from diffusers import DiffusionPipeline
2
- from diffusers import DDPMPipeline, DDIMPipeline, PNDMPipeline
3
- import torch
4
  import gradio as gr
5
- import random
6
-
7
- pipeline = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
8
- # pipeline.to("cuda")
9
-
10
- def predict(steps, seed):
11
- generator = torch.manual_seed(seed)
12
- for i in range(1,steps):
13
- yield pipeline(generator=generator, num_inference_steps=i).images[0]
14
-
15
- random_seed = random.randint(0, 2147483647)
16
- gr.Interface(
17
- predict,
18
- inputs=[
19
- gr.inputs.Slider(1, 100, label='Inference Steps', default=5, step=1),
20
- gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
21
- ],
22
- outputs=gr.Image(shape=[128,128], type="pil", elem_id="output_image"),
23
- css="#output_image{width: 256px}",
24
- title="Unconditional butterflies",
25
- description="图片生成器",
26
- ).queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from diffusers import DiffusionPipeline
3
+ import torch
4
+ #für die komplexere Variante der Erzeugung
5
+ from diffusers import DDPMScheduler, UNet2DModel
6
+ from PIL import Image
7
+ import numpy as np
8
+
9
+
10
+ ##############################################
11
+ #Hilfsfunktionen
12
+ ##############################################
13
+
14
+ #######################################
15
+ #Bild nach dem eingegebenen prompt erzeugen - mit Pipeline
16
+ def erzeuge(prompt):
17
+ return pipeline(prompt).images #[0]
18
+
19
+ ########################################
20
+ #Bild erzeugen - nich über Pipeline sondern mit mehr Einstellungsmöglichkeiten
21
+ def erzeuge_komplex(prompt):
22
+ scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
23
+ model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
24
+ scheduler.set_timesteps(50)
25
+
26
+ sample_size = model.config.sample_size
27
+ noise = torch.randn((1, 3, sample_size, sample_size)).to("cuda")
28
+ input = noise
29
+
30
+ for t in scheduler.timesteps:
31
+ with torch.no_grad():
32
+ noisy_residual = model(input, t).sample
33
+ prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
34
+ input = prev_noisy_sample
35
+
36
+ image = (input / 2 + 0.5).clamp(0, 1)
37
+ image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
38
+ image = Image.fromarray((image * 255).round().astype("uint8"))
39
+ return image
40
+
41
+ ######################################################
42
+ #Modelle laden
43
+ #######################################
44
+ #Alternativ erzeugen
45
+ #gr.Interface.load("models/stabilityai/stable-diffusion-2").launch()
46
+
47
+ #######################################
48
+ #Alternativ: Model über pipeline laden
49
+ pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2")
50
+ pipeline.to("cuda")
51
+
52
+ ########################################
53
+ #Alternativ: Bild erzeugen - nicht über Pipeline sondern mit mehr Einstellungsmöglichkeiten
54
+ #scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
55
+ #model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
56
+
57
+
58
+ ######################################################
59
+ #Gradio UI erzeugen
60
+ ######################################################
61
+ with gr.Blocks() as demo:
62
+ with gr.Column(variant="panel"):
63
+ with gr.Row(variant="compact"):
64
+ text = gr.Textbox(
65
+ label="Deine Beschreibung:",
66
+ show_label=False,
67
+ max_lines=1,
68
+ placeholder="Bildbeschreibung",
69
+ ).style(
70
+ container=False,
71
+ )
72
+ btn = gr.Button("erzeuge Bild").style(full_width=False, min_width=100)
73
+
74
+ gallery = gr.Gallery(
75
+ label="Erzeugtes Bild", show_label=False, elem_id="gallery"
76
+ ).style(columns=[2], rows=[2], object_fit="contain", height="auto")
77
+
78
+ btn.click(erzeuge, inputs=[text], outputs=[gallery])
79
+ text.submit(erzeuge, inputs=[text], outputs=[gallery])
80
+
81
+ if __name__ == "__main__":
82
+ demo.launch()
app1.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from diffusers import DiffusionPipeline
2
+ from diffusers import DDPMPipeline, DDIMPipeline, PNDMPipeline
3
+ import torch
4
+ import gradio as gr
5
+ import random
6
+
7
+ pipeline = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
8
+ # pipeline.to("cuda")
9
+
10
+ def predict(steps, seed):
11
+ generator = torch.manual_seed(seed)
12
+ for i in range(1,steps):
13
+ yield pipeline(generator=generator, num_inference_steps=i).images[0]
14
+
15
+ random_seed = random.randint(0, 2147483647)
16
+ gr.Interface(
17
+ predict,
18
+ inputs=[
19
+ gr.inputs.Slider(1, 100, label='Inference Steps', default=5, step=1),
20
+ gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
21
+ ],
22
+ outputs=gr.Image(shape=[128,128], type="pil", elem_id="output_image"),
23
+ css="#output_image{width: 256px}",
24
+ title="Unconditional butterflies",
25
+ description="图片生成器",
26
+ ).queue().launch()
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
- diffusers==0.23.1
2
- gradio==4.4.1
3
- torch
 
 
 
1
+ diffusers[torch]==0.23.1
2
+ gradio=4.4.1
3
+ torch==2.1.1
4
+ accelerate
5
+ transformers==4.29.1