ChenWu98 commited on
Commit
e602685
1 Parent(s): 90d82c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -95
app.py CHANGED
@@ -7,69 +7,39 @@ import streamlit as st
7
 
8
  is_colab = utils.is_google_colab()
9
 
10
- scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear",
11
- num_train_timesteps=1000, clip_sample=False, set_alpha_to_one=False)
 
12
 
13
- model_id_or_path = "CompVis/stable-diffusion-v1-4"
14
- pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path,
15
- use_auth_token=st.secrets["USER_TOKEN"],
16
- scheduler=scheduler)
17
 
18
- if torch.cuda.is_available():
19
- pipe = pipe.to("cuda")
20
 
21
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
22
 
23
 
24
- def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", inpaint_image=None):
 
25
 
26
- global current_model
27
- for model in models:
28
- if model.name == model_name:
29
- current_model = model
30
- model_path = current_model.path
31
 
32
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
33
-
34
- if img is not None:
35
- return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator)
36
- else:
37
- return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator, inpaint_image)
38
-
39
-
40
- def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator=None):
41
-
42
- global last_mode
43
- global pipe
44
- global current_model_path
45
- if model_path != current_model_path or last_mode != "img2img":
46
- current_model_path = model_path
47
-
48
- if is_colab or current_model == custom_model:
49
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
50
- else:
51
- pipe.to("cpu")
52
- pipe = current_model.pipe_i2i
53
-
54
- if torch.cuda.is_available():
55
- pipe = pipe.to("cuda")
56
- last_mode = "img2img"
57
-
58
- prompt = current_model.prefix + prompt
59
  ratio = min(height / img.height, width / img.width)
60
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
61
- result = pipe(
62
- prompt,
63
- negative_prompt = neg_prompt,
64
- # num_images_per_prompt=n_images,
65
- init_image = img,
66
- num_inference_steps = int(steps),
67
- strength = strength,
68
- guidance_scale = guidance,
69
- width = width,
70
- height = height,
71
- generator = generator)
72
-
73
  return replace_nsfw_images(result)
74
 
75
 
@@ -99,60 +69,52 @@ with gr.Blocks(css=css) as demo:
99
  """
100
  )
101
  with gr.Row():
102
-
103
- with gr.Column(scale=55):
104
- with gr.Group():
105
-
106
- with gr.Row():
107
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
108
- generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
109
 
110
- image_out = gr.Image(height=512)
111
- # gallery = gr.Gallery(
112
- # label="Generated images", show_label=False, elem_id="gallery"
113
- # ).style(grid=[1], height="auto")
114
-
115
- with gr.Column(scale=45):
116
- with gr.Tab("Options"):
117
  with gr.Group():
118
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
119
 
120
- # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
 
 
121
 
122
- with gr.Row():
123
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
124
- steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
 
125
 
126
- with gr.Row():
127
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
128
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
 
 
129
 
130
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
 
 
131
 
132
- with gr.Tab("Image to image"):
133
- with gr.Group():
134
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
135
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
136
 
137
- with gr.Tab("Inpainting"):
138
- inpaint_image = gr.Image(source='upload', tool='sketch', type="pil", label="Upload").style(height=256)
 
139
 
140
- model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_group)
141
- if is_colab:
142
- custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
143
- # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
144
 
145
- inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, inpaint_image]
 
146
  prompt.submit(inference, inputs=inputs, outputs=image_out)
147
  generate.click(inference, inputs=inputs, outputs=image_out)
148
 
149
- ex = gr.Examples([
150
- [models[1].name, "jason bateman disassembling the demon core", 7.5, 50],
151
- [models[4].name, "portrait of dwayne johnson", 7.0, 75],
152
- [models[5].name, "portrait of a beautiful alyx vance half life", 10, 50],
153
- [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
154
- [models[5].name, "fantasy portrait painting, digital art", 4.0, 30],
155
- ], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=False)
 
156
 
157
  gr.Markdown('''
158
  Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@haruu1367](https://twitter.com/haruu1367), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❤️<br>
 
7
 
8
  is_colab = utils.is_google_colab()
9
 
10
+ if False:
11
+ scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear",
12
+ num_train_timesteps=1000, clip_sample=False, set_alpha_to_one=False)
13
 
14
+ model_id_or_path = "CompVis/stable-diffusion-v1-4"
15
+ pipe = CycleDiffusionPipeline.from_pretrained(model_id_or_path,
16
+ use_auth_token=st.secrets["USER_TOKEN"],
17
+ scheduler=scheduler)
18
 
19
+ if torch.cuda.is_available():
20
+ pipe = pipe.to("cuda")
21
 
22
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
23
 
24
 
25
+ def inference(source_prompt, target_prompt, source_guidance_scale=1, guidance_scale=5, num_inference_steps=100,
26
+ width=512, height=512, seed=0, img=None, strength=0.7):
27
 
28
+ torch.manual_seed(seed)
 
 
 
 
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  ratio = min(height / img.height, width / img.width)
31
+ img = img.resize((int(img.width * ratio), int(img.height * ratio)))
32
+
33
+ result = pipe(prompt=target_prompt,
34
+ source_prompt=source_prompt,
35
+ init_image=img,
36
+ num_inference_steps=num_inference_steps,
37
+ eta=0.1,
38
+ strength=strength,
39
+ guidance_scale=guidance_scale,
40
+ source_guidance_scale=source_guidance_scale,
41
+ ).images[0]
42
+
 
43
  return replace_nsfw_images(result)
44
 
45
 
 
69
  """
70
  )
71
  with gr.Row():
 
 
 
 
 
 
 
72
 
73
+ with gr.Column(scale=55):
 
 
 
 
 
 
74
  with gr.Group():
 
75
 
76
+ with gr.Row():
77
+ generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
78
+ image = gr.Image(label="Source image", height=256, tool="editor", type="pil")
79
 
80
+ image_out = gr.Image(height=512)
81
+ # gallery = gr.Gallery(
82
+ # label="Generated images", show_label=False, elem_id="gallery"
83
+ # ).style(grid=[1], height="auto")
84
 
85
+ with gr.Column(scale=45):
86
+ with gr.Tab("Options"):
87
+ with gr.Group():
88
+ source_prompt = gr.Textbox(label="Source prompt", placeholder="Source prompt describes the input image")
89
+ target_prompt = gr.Textbox(label="Target prompt", placeholder="Target prompt describes the output image")
90
 
91
+ with gr.Row():
92
+ source_guidance_scale = gr.Slider(label="Source guidance scale", value=1, minimum=1, maximum=10)
93
+ guidance_scale = gr.Slider(label="Target guidance scale", value=5, minimum=1, maximum=10)
94
 
95
+ with gr.Row():
96
+ steps = gr.Slider(label="Number of inference steps", value=100, minimum=25, maximum=500, step=1)
97
+ strength = gr.Slider(label="Strength", value=0.7, minimum=0.5, maximum=1, step=0.01)
 
98
 
99
+ with gr.Row():
100
+ width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
101
+ height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
102
 
103
+ seed = gr.Slider(0, 2147483647, label='Seed', value=0, step=1)
 
 
 
104
 
105
+ inputs = [source_prompt, target_prompt, source_guidance_scale, guidance_scale, num_inference_steps,
106
+ width, height, seed, img, strength]
107
  prompt.submit(inference, inputs=inputs, outputs=image_out)
108
  generate.click(inference, inputs=inputs, outputs=image_out)
109
 
110
+ ex = gr.Examples(
111
+ [
112
+ ["A", "B", 7.5, 50, None], # TODO: load image from a file.
113
+ [],
114
+ ],
115
+ [source_prompt, target_prompt, source_guidance_scale, guidance_scale, num_inference_steps,
116
+ width, height, seed, img, strength],
117
+ image_out, inference, cache_examples=False)
118
 
119
  gr.Markdown('''
120
  Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@haruu1367](https://twitter.com/haruu1367), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❤️<br>