vilarin commited on
Commit
f14baf4
1 Parent(s): d0f928e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -11
app.py CHANGED
@@ -1,17 +1,21 @@
1
  import gradio as gr
2
  import torch
3
- from diffusers import StableDiffusionXLPipeline, AutoencoderKL, KDPM2AncestralDiscreteScheduler
4
  from huggingface_hub import hf_hub_download
5
  import spaces
6
  from PIL import Image
7
  import requests
8
  from translatepy import Translator
 
 
 
9
 
10
  translator = Translator()
11
 
12
  # Constants
13
  model = "Corcelio/mobius"
14
  vae_model = "madebyollin/sdxl-vae-fp16-fix"
 
15
 
16
  CSS = """
17
  .gradio-container {
@@ -37,7 +41,8 @@ vae = AutoencoderKL.from_pretrained(
37
 
38
  # Ensure model and scheduler are initialized in GPU-enabled function
39
  if torch.cuda.is_available():
40
- pipe = StableDiffusionXLPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16).to("cuda")
 
41
 
42
  pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
43
 
@@ -49,13 +54,21 @@ def generate_image(
49
  negative="low quality",
50
  width=1024,
51
  height=1024,
 
 
52
  scale=1.5,
53
  steps=30,
54
  clip=3):
55
 
 
 
 
 
 
56
  prompt = str(translator.translate(prompt, 'English'))
57
 
58
  print(f'prompt:{prompt}')
 
59
 
60
  image = pipe(
61
  prompt,
@@ -63,10 +76,12 @@ def generate_image(
63
  width=width,
64
  height=height,
65
  guidance_scale=scale,
 
66
  num_inference_steps=steps,
 
67
  clip_skip=clip,
68
- )
69
- return image.images[0]
70
 
71
 
72
  examples = [
@@ -84,15 +99,15 @@ examples = [
84
 
85
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
86
  gr.HTML("<h1><center>Mobius💠</center></h1>")
87
- gr.HTML("<p><center><a href='https://huggingface.co/Corcelio/mobius'>mobius</a> text-to-image generation</center><br><center>Multi-Languages. Adding default prompts to enhance.</center></p>")
88
  with gr.Group():
89
  with gr.Row():
90
- prompt = gr.Textbox(label='Enter Your Prompt', value="best quality, HD, aesthetic", scale=6)
91
  submit = gr.Button(scale=1, variant='primary')
92
- img = gr.Image(label='Mobius Generated Image')
93
  with gr.Accordion("Advanced Options", open=False):
94
  with gr.Row():
95
- negative = gr.Textbox(label="Negative prompt", value="low quality")
96
  with gr.Row():
97
  width = gr.Slider(
98
  label="Width",
@@ -108,6 +123,23 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
108
  step=8,
109
  value=1024,
110
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  with gr.Row():
112
  scale = gr.Slider(
113
  label="Guidance",
@@ -129,7 +161,7 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
129
  maximum=10,
130
  step=1,
131
  value=3,
132
- )
133
  gr.Examples(
134
  examples=examples,
135
  inputs=prompt,
@@ -139,11 +171,11 @@ with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
139
  )
140
 
141
  prompt.submit(fn=generate_image,
142
- inputs=[prompt, negative, width, height, scale, steps, clip],
143
  outputs=img,
144
  )
145
  submit.click(fn=generate_image,
146
- inputs=[prompt, negative, width, height, scale, steps, clip],
147
  outputs=img,
148
  )
149
 
 
1
  import gradio as gr
2
  import torch
3
+ from diffusers import StableDiffusionXLPipeline, AutoencoderKL, KDPM2AncestralDiscreteScheduler, UNet2DConditionModel
4
  from huggingface_hub import hf_hub_download
5
  import spaces
6
  from PIL import Image
7
  import requests
8
  from translatepy import Translator
9
+ import numpy as np
10
+ import random
11
+
12
 
13
  translator = Translator()
14
 
15
  # Constants
16
  model = "Corcelio/mobius"
17
  vae_model = "madebyollin/sdxl-vae-fp16-fix"
18
+ MAX_SEED = np.iinfo(np.int32).max
19
 
20
  CSS = """
21
  .gradio-container {
 
41
 
42
  # Ensure model and scheduler are initialized in GPU-enabled function
43
  if torch.cuda.is_available():
44
+ unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet").to("cuda", torch.float16)
45
+ pipe = StableDiffusionXLPipeline.from_pretrained(model, vae=vae, unet=unet, torch_dtype=torch.float16).to("cuda")
46
 
47
  pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
48
 
 
54
  negative="low quality",
55
  width=1024,
56
  height=1024,
57
+ seed=-1,
58
+ nums=1,
59
  scale=1.5,
60
  steps=30,
61
  clip=3):
62
 
63
+ if seed == -1:
64
+ seed = random.randint(0, MAX_SEED)
65
+
66
+ generator = torch.Generator().manual_seed(seed)
67
+
68
  prompt = str(translator.translate(prompt, 'English'))
69
 
70
  print(f'prompt:{prompt}')
71
+
72
 
73
  image = pipe(
74
  prompt,
 
76
  width=width,
77
  height=height,
78
  guidance_scale=scale,
79
+ generator = generator,
80
  num_inference_steps=steps,
81
+ num_images_per_prompt=nums,
82
  clip_skip=clip,
83
+ ).images
84
+ return image, seed
85
 
86
 
87
  examples = [
 
99
 
100
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
101
  gr.HTML("<h1><center>Mobius💠</center></h1>")
102
+ gr.HTML("<p><center><a href='https://huggingface.co/Corcelio/mobius'>mobius</a> text-to-image generation</center><br><center>Adding default prompts to enhance.</center></p>")
103
  with gr.Group():
104
  with gr.Row():
105
+ prompt = gr.Textbox(label='Enter Your Prompt(Multi-Languages)', value="best quality, HD, aesthetic", scale=6)
106
  submit = gr.Button(scale=1, variant='primary')
107
+ img = gr.Gallery(label='Mobius Generated Image',columns = 1, preview=True)
108
  with gr.Accordion("Advanced Options", open=False):
109
  with gr.Row():
110
+ negative = gr.Textbox(label="Negative prompt", value="low quality, ugly, blurry, poor face, bad anatomy")
111
  with gr.Row():
112
  width = gr.Slider(
113
  label="Width",
 
123
  step=8,
124
  value=1024,
125
  )
126
+ with gr.Row():
127
+ seed = gr.Slider(
128
+ label="Seed (-1 Get Random)",
129
+ minimum=-1,
130
+ maximum=MAX_SEED,
131
+ step=1,
132
+ value=-1,
133
+ scale=2,
134
+ )
135
+ nums = gr.Slider(
136
+ label="Image Numbers",
137
+ minimum=1,
138
+ maximum=4,
139
+ step=1,
140
+ value=1,
141
+ scale=1,
142
+ )
143
  with gr.Row():
144
  scale = gr.Slider(
145
  label="Guidance",
 
161
  maximum=10,
162
  step=1,
163
  value=3,
164
+ )
165
  gr.Examples(
166
  examples=examples,
167
  inputs=prompt,
 
171
  )
172
 
173
  prompt.submit(fn=generate_image,
174
+ inputs=[prompt, negative, width, height, seed, nums, scale, steps, clip],
175
  outputs=img,
176
  )
177
  submit.click(fn=generate_image,
178
+ inputs=[prompt, negative, width, height, seed, nums, scale, steps, clip],
179
  outputs=img,
180
  )
181