Spaces:
Paused
Paused
gokaygokay
commited on
Commit
•
ad1b9a7
1
Parent(s):
2991135
Update app.py
Browse files
app.py
CHANGED
@@ -135,14 +135,17 @@ lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
|
|
135 |
lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
|
136 |
|
137 |
@timer_func
|
138 |
-
def resize_and_upscale(input_image,
|
139 |
-
scale = 2 if resolution <= 2048 else 4
|
140 |
input_image = input_image.convert("RGB")
|
141 |
W, H = input_image.size
|
142 |
-
|
|
|
|
|
|
|
143 |
H = int(round(H * k / 64.0)) * 64
|
144 |
W = int(round(W * k / 64.0)) * 64
|
145 |
img = input_image.resize((W, H), resample=Image.LANCZOS)
|
|
|
146 |
if scale == 2:
|
147 |
img = lazy_realesrgan_x2.predict(img)
|
148 |
else:
|
@@ -166,18 +169,18 @@ def create_hdr_effect(original_image, hdr):
|
|
166 |
lazy_pipe = LazyLoadPipeline()
|
167 |
lazy_pipe.load()
|
168 |
|
169 |
-
def prepare_image(input_image,
|
170 |
-
condition_image = resize_and_upscale(input_image,
|
171 |
condition_image = create_hdr_effect(condition_image, hdr)
|
172 |
return condition_image
|
173 |
|
174 |
@spaces.GPU
|
175 |
@timer_func
|
176 |
-
def gradio_process_image(input_image,
|
177 |
print("Starting image processing...")
|
178 |
torch.cuda.empty_cache()
|
179 |
|
180 |
-
condition_image = prepare_image(input_image,
|
181 |
|
182 |
prompt = "masterpiece, best quality, highres"
|
183 |
negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
|
@@ -222,24 +225,24 @@ with gr.Blocks() as demo:
|
|
222 |
with gr.Column():
|
223 |
output_slider = ImageSlider(label="Before / After", type="numpy")
|
224 |
with gr.Accordion("Advanced Options", open=False):
|
225 |
-
|
226 |
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
|
227 |
strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
|
228 |
hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
|
229 |
guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
|
230 |
|
231 |
run_button.click(fn=gradio_process_image,
|
232 |
-
inputs=[input_image,
|
233 |
-
outputs=
|
234 |
|
235 |
# Add examples with all required inputs
|
236 |
gr.Examples(
|
237 |
examples=[
|
238 |
-
["image1.jpg",
|
239 |
-
["image2.png",
|
240 |
-
["image3.png",
|
241 |
],
|
242 |
-
inputs=[input_image,
|
243 |
outputs=output_slider,
|
244 |
fn=gradio_process_image,
|
245 |
cache_examples=True,
|
|
|
135 |
lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
|
136 |
|
137 |
@timer_func
|
138 |
+
def resize_and_upscale(input_image, scale_factor):
|
|
|
139 |
input_image = input_image.convert("RGB")
|
140 |
W, H = input_image.size
|
141 |
+
target_size = int(min(H, W) * scale_factor)
|
142 |
+
scale = 2 if target_size <= 2048 else 4
|
143 |
+
|
144 |
+
k = float(target_size) / min(H, W)
|
145 |
H = int(round(H * k / 64.0)) * 64
|
146 |
W = int(round(W * k / 64.0)) * 64
|
147 |
img = input_image.resize((W, H), resample=Image.LANCZOS)
|
148 |
+
|
149 |
if scale == 2:
|
150 |
img = lazy_realesrgan_x2.predict(img)
|
151 |
else:
|
|
|
169 |
lazy_pipe = LazyLoadPipeline()
|
170 |
lazy_pipe.load()
|
171 |
|
172 |
+
def prepare_image(input_image, scale_factor, hdr):
|
173 |
+
condition_image = resize_and_upscale(input_image, scale_factor)
|
174 |
condition_image = create_hdr_effect(condition_image, hdr)
|
175 |
return condition_image
|
176 |
|
177 |
@spaces.GPU
|
178 |
@timer_func
|
179 |
+
def gradio_process_image(input_image, scale_factor, num_inference_steps, strength, hdr, guidance_scale):
|
180 |
print("Starting image processing...")
|
181 |
torch.cuda.empty_cache()
|
182 |
|
183 |
+
condition_image = prepare_image(input_image, scale_factor, hdr)
|
184 |
|
185 |
prompt = "masterpiece, best quality, highres"
|
186 |
negative_prompt = "low quality, normal quality, ugly, blurry, blur, lowres, bad anatomy, bad hands, cropped, worst quality, verybadimagenegative_v1.3, JuggernautNegative-neg"
|
|
|
225 |
with gr.Column():
|
226 |
output_slider = ImageSlider(label="Before / After", type="numpy")
|
227 |
with gr.Accordion("Advanced Options", open=False):
|
228 |
+
scale_factor = gr.Slider(minimum=1, maximum=4, value=2, step=0.1, label="Upscale Factor")
|
229 |
num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
|
230 |
strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
|
231 |
hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
|
232 |
guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
|
233 |
|
234 |
run_button.click(fn=gradio_process_image,
|
235 |
+
inputs=[input_image, scale_factor, num_inference_steps, strength, hdr, guidance_scale],
|
236 |
+
outputs=output_image)
|
237 |
|
238 |
# Add examples with all required inputs
|
239 |
gr.Examples(
|
240 |
examples=[
|
241 |
+
["image1.jpg", 2, 20, 0.4, 0, 3],
|
242 |
+
["image2.png", 16, 20, 0.4, 0, 3],
|
243 |
+
["image3.png", 2, 20, 0.4, 0, 3],
|
244 |
],
|
245 |
+
inputs=[input_image, scale_factor, num_inference_steps, strength, hdr, guidance_scale],
|
246 |
outputs=output_slider,
|
247 |
fn=gradio_process_image,
|
248 |
cache_examples=True,
|