None1145 commited on
Commit
6eddcef
1 Parent(s): ab16c0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -12
app.py CHANGED
@@ -5,7 +5,9 @@ import time
5
  from optimum.intel import OVStableDiffusionXLPipeline
6
  import torch
7
  from diffusers import EulerDiscreteScheduler
8
- from diffusers import LCMScheduler
 
 
9
 
10
  model_id = "None1145/noobai-XL-Vpred-0.65s-openvino"
11
 
@@ -21,13 +23,9 @@ def reload_model(new_model_id):
21
  try:
22
  print(f"{model_id}...")
23
  pipe = OVStableDiffusionXLPipeline.from_pretrained(model_id, compile=False)
24
- # pipe.to("gpu")
25
  if model_id == "None1145/noobai-XL-Vpred-0.65s-openvino":
26
  scheduler_args = {"prediction_type": "v_prediction", "rescale_betas_zero_snr": True}
27
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, **scheduler_args)
28
- # pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, **scheduler_args)
29
- # pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
30
- # pipe.fuse_lora()
31
  pipe.reshape(batch_size=1, height=prev_height, width=prev_width, num_images_per_prompt=1)
32
  pipe.compile()
33
  print(f"{model_id}!!!")
@@ -35,7 +33,7 @@ def reload_model(new_model_id):
35
  except Exception as e:
36
  return f"Failed to load model: {str(e)}"
37
  reload_model(model_id)
38
-
39
  def infer(
40
  prompt,
41
  negative_prompt,
@@ -70,7 +68,11 @@ def infer(
70
  generator=generator,
71
  ).images[0]
72
 
73
- return image, seed
 
 
 
 
74
 
75
 
76
  examples = ["murasame \(senren\), senren banka",]
@@ -99,6 +101,8 @@ with gr.Blocks() as img:
99
 
100
  result = gr.Image(label="Result", show_label=False)
101
 
 
 
102
  with gr.Accordion("Advanced Settings", open=False):
103
  negative_prompt = gr.Text(
104
  label="Negative prompt",
@@ -144,7 +148,7 @@ with gr.Blocks() as img:
144
  )
145
 
146
  gr.Examples(examples=examples, inputs=[prompt])
147
-
148
  gr.Markdown("### Model Reload")
149
  with gr.Row():
150
  new_model_id = gr.Text(label="New Model ID", placeholder="Enter model ID", value=model_id)
@@ -157,8 +161,7 @@ with gr.Blocks() as img:
157
  outputs=reload_status,
158
  )
159
 
160
- gr.on(
161
- triggers=[run_button.click, prompt.submit],
162
  fn=infer,
163
  inputs=[
164
  prompt,
@@ -170,8 +173,18 @@ with gr.Blocks() as img:
170
  guidance_scale,
171
  num_inference_steps,
172
  ],
173
- outputs=[result, seed],
174
  )
175
 
 
 
 
 
 
 
 
 
 
 
176
  if __name__ == "__main__":
177
- img.queue(max_size=10, timeout=None).launch()
 
5
  from optimum.intel import OVStableDiffusionXLPipeline
6
  import torch
7
  from diffusers import EulerDiscreteScheduler
8
+ from io import BytesIO
9
+ from PIL import Image
10
+ import base64
11
 
12
  model_id = "None1145/noobai-XL-Vpred-0.65s-openvino"
13
 
 
23
  try:
24
  print(f"{model_id}...")
25
  pipe = OVStableDiffusionXLPipeline.from_pretrained(model_id, compile=False)
 
26
  if model_id == "None1145/noobai-XL-Vpred-0.65s-openvino":
27
  scheduler_args = {"prediction_type": "v_prediction", "rescale_betas_zero_snr": True}
28
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, **scheduler_args)
 
 
 
29
  pipe.reshape(batch_size=1, height=prev_height, width=prev_width, num_images_per_prompt=1)
30
  pipe.compile()
31
  print(f"{model_id}!!!")
 
33
  except Exception as e:
34
  return f"Failed to load model: {str(e)}"
35
  reload_model(model_id)
36
+
37
  def infer(
38
  prompt,
39
  negative_prompt,
 
68
  generator=generator,
69
  ).images[0]
70
 
71
+ # Save image as Base64
72
+ buffered = BytesIO()
73
+ image.save(buffered, format="PNG")
74
+ base64_image = base64.b64encode(buffered.getvalue()).decode("utf-8")
75
+ return image, seed, f"data:image/png;base64,{base64_image}"
76
 
77
 
78
  examples = ["murasame \(senren\), senren banka",]
 
101
 
102
  result = gr.Image(label="Result", show_label=False)
103
 
104
+ base64_view = gr.HTML(label="Base64 Image Preview", interactive=True)
105
+
106
  with gr.Accordion("Advanced Settings", open=False):
107
  negative_prompt = gr.Text(
108
  label="Negative prompt",
 
148
  )
149
 
150
  gr.Examples(examples=examples, inputs=[prompt])
151
+
152
  gr.Markdown("### Model Reload")
153
  with gr.Row():
154
  new_model_id = gr.Text(label="New Model ID", placeholder="Enter model ID", value=model_id)
 
161
  outputs=reload_status,
162
  )
163
 
164
+ run_button.click(
 
165
  fn=infer,
166
  inputs=[
167
  prompt,
 
173
  guidance_scale,
174
  num_inference_steps,
175
  ],
176
+ outputs=[result, seed, base64_view],
177
  )
178
 
179
+ # JavaScript logic to dynamically update HTML with Base64
180
+ js_script = """
181
+ <script>
182
+ function updateBase64(html_id, base64_src) {
183
+ document.getElementById(html_id).innerHTML = `<img src="${base64_src}" alt="Generated Image"/>`;
184
+ }
185
+ </script>
186
+ """
187
+ gr.HTML(js_script)
188
+
189
  if __name__ == "__main__":
190
+ img.launch()