Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ import random
|
|
4 |
from optimum.intel import OVStableDiffusionXLPipeline
|
5 |
import torch
|
6 |
from diffusers import EulerDiscreteScheduler
|
|
|
7 |
|
8 |
model_id = "None1145/noobai-XL-Vpred-0.65s-openvino"
|
9 |
|
@@ -19,9 +20,12 @@ def reload_model(new_model_id):
|
|
19 |
try:
|
20 |
print(f"{model_id}...")
|
21 |
pipe = OVStableDiffusionXLPipeline.from_pretrained(model_id, compile=False)
|
22 |
-
if model_id == "None1145/noobai-XL-Vpred-0.65s-openvino"
|
23 |
scheduler_args = {"prediction_type": "v_prediction", "rescale_betas_zero_snr": True}
|
24 |
-
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, **scheduler_args)
|
|
|
|
|
|
|
25 |
# pipe.to("gpu")
|
26 |
pipe.reshape(batch_size=1, height=prev_height, width=prev_width, num_images_per_prompt=1)
|
27 |
pipe.compile()
|
@@ -82,6 +86,14 @@ with gr.Blocks() as img:
|
|
82 |
container=False,
|
83 |
)
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
run_button = gr.Button("Run", scale=0, variant="primary")
|
86 |
|
87 |
result = gr.Image(label="Result", show_label=False)
|
@@ -130,14 +142,6 @@ with gr.Blocks() as img:
|
|
130 |
value=5.0,
|
131 |
)
|
132 |
|
133 |
-
num_inference_steps = gr.Slider(
|
134 |
-
label="Number of inference steps",
|
135 |
-
minimum=1,
|
136 |
-
maximum=60,
|
137 |
-
step=1,
|
138 |
-
value=28,
|
139 |
-
)
|
140 |
-
|
141 |
gr.Examples(examples=examples, inputs=[prompt])
|
142 |
|
143 |
gr.Markdown("### Model Reload")
|
|
|
4 |
from optimum.intel import OVStableDiffusionXLPipeline
|
5 |
import torch
|
6 |
from diffusers import EulerDiscreteScheduler
|
7 |
+
from diffusers import LCMScheduler
|
8 |
|
9 |
model_id = "None1145/noobai-XL-Vpred-0.65s-openvino"
|
10 |
|
|
|
20 |
try:
|
21 |
print(f"{model_id}...")
|
22 |
pipe = OVStableDiffusionXLPipeline.from_pretrained(model_id, compile=False)
|
23 |
+
if model_id == "None1145/noobai-XL-Vpred-0.65s-openvino":
|
24 |
scheduler_args = {"prediction_type": "v_prediction", "rescale_betas_zero_snr": True}
|
25 |
+
# pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, **scheduler_args)
|
26 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, **scheduler_args)
|
27 |
+
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
|
28 |
+
pipe.fuse_lora()
|
29 |
# pipe.to("gpu")
|
30 |
pipe.reshape(batch_size=1, height=prev_height, width=prev_width, num_images_per_prompt=1)
|
31 |
pipe.compile()
|
|
|
86 |
container=False,
|
87 |
)
|
88 |
|
89 |
+
num_inference_steps = gr.Slider(
|
90 |
+
label="Number of inference steps",
|
91 |
+
minimum=1,
|
92 |
+
maximum=60,
|
93 |
+
step=1,
|
94 |
+
value=5,
|
95 |
+
)
|
96 |
+
|
97 |
run_button = gr.Button("Run", scale=0, variant="primary")
|
98 |
|
99 |
result = gr.Image(label="Result", show_label=False)
|
|
|
142 |
value=5.0,
|
143 |
)
|
144 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
gr.Examples(examples=examples, inputs=[prompt])
|
146 |
|
147 |
gr.Markdown("### Model Reload")
|