Spaces:
Runtime error
Runtime error
Commit
·
148bb9f
1
Parent(s):
5c80eae
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
# os.system('pip install pip==23.3.0')
|
3 |
# os.system('pip uninstall spaces -y')
|
@@ -42,465 +43,14 @@ controlnet = ControlNetModel.from_pretrained(
|
|
42 |
torch_dtype=torch.float16
|
43 |
)
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
last_files = {} # Dictionary to store the last file for each path
|
53 |
-
|
54 |
-
for file_path in file_paths:
|
55 |
-
# Split the file path into directory and file components
|
56 |
-
directory, file_name = file_path.rsplit('/', 1)
|
57 |
-
|
58 |
-
# Update the last file for the current path
|
59 |
-
last_files[directory] = file_name
|
60 |
-
|
61 |
-
# Extract the last files from the dictionary
|
62 |
-
result = list(last_files.values())
|
63 |
-
|
64 |
-
return result
|
65 |
-
|
66 |
-
def load_model(model_name):
|
67 |
-
|
68 |
-
if model_name == "":
|
69 |
-
gr.Warning("If you want to use a private model, you need to duplicate this space on your personal account.")
|
70 |
-
raise gr.Error("You forgot to define Model ID.")
|
71 |
-
|
72 |
-
# Get instance_prompt a.k.a trigger word
|
73 |
-
card = ModelCard.load(model_name)
|
74 |
-
repo_data = card.data.to_dict()
|
75 |
-
instance_prompt = repo_data.get("instance_prompt")
|
76 |
-
|
77 |
-
if instance_prompt is not None:
|
78 |
-
print(f"Trigger word: {instance_prompt}")
|
79 |
-
else:
|
80 |
-
instance_prompt = "no trigger word needed"
|
81 |
-
print(f"Trigger word: no trigger word needed")
|
82 |
-
|
83 |
-
# List all ".safetensors" files in repo
|
84 |
-
sfts_available_files = fs.glob(f"{model_name}/*safetensors")
|
85 |
-
sfts_available_files = get_files(sfts_available_files)
|
86 |
-
|
87 |
-
if sfts_available_files == []:
|
88 |
-
sfts_available_files = ["NO SAFETENSORS FILE"]
|
89 |
-
|
90 |
-
print(f"Safetensors available: {sfts_available_files}")
|
91 |
-
|
92 |
-
return model_name, "Model Ready", gr.update(choices=sfts_available_files, value=sfts_available_files[0], visible=True), gr.update(value=instance_prompt, visible=True)
|
93 |
-
|
94 |
-
def custom_model_changed(model_name, previous_model):
|
95 |
-
if model_name == "" and previous_model == "" :
|
96 |
-
status_message = ""
|
97 |
-
elif model_name != previous_model:
|
98 |
-
status_message = "model changed, please reload before any new run"
|
99 |
-
else:
|
100 |
-
status_message = "model ready"
|
101 |
-
return status_message
|
102 |
-
|
103 |
-
def resize_image(input_path, output_path, target_height):
|
104 |
-
# Open the input image
|
105 |
-
img = Image.open(input_path)
|
106 |
-
|
107 |
-
# Calculate the aspect ratio of the original image
|
108 |
-
original_width, original_height = img.size
|
109 |
-
original_aspect_ratio = original_width / original_height
|
110 |
-
|
111 |
-
# Calculate the new width while maintaining the aspect ratio and the target height
|
112 |
-
new_width = int(target_height * original_aspect_ratio)
|
113 |
-
|
114 |
-
# Resize the image while maintaining the aspect ratio and fixing the height
|
115 |
-
img = img.resize((new_width, target_height), Image.LANCZOS)
|
116 |
-
|
117 |
-
# Save the resized image
|
118 |
-
img.save(output_path)
|
119 |
-
|
120 |
-
return output_path
|
121 |
-
|
122 |
-
def predict(image):
|
123 |
-
inputs = feature_extractor(images=image, return_tensors="pt")
|
124 |
-
with torch.no_grad():
|
125 |
-
outputs = modeld(**inputs)
|
126 |
-
predicted_depth = outputs.predicted_depth
|
127 |
-
# interpolate to original size
|
128 |
-
prediction = torch.nn.functional.interpolate(
|
129 |
-
predicted_depth.unsqueeze(1),
|
130 |
-
size=image.size[::-1],
|
131 |
-
mode="bicubic",
|
132 |
-
align_corners=False,
|
133 |
-
)
|
134 |
-
# visualize the prediction
|
135 |
-
output = prediction.squeeze().cpu().numpy()
|
136 |
-
formatted = (output * 255 / np.max(output)).astype("uint8")
|
137 |
-
depth_image = Image.fromarray(formatted)
|
138 |
-
depth_image.save(f"depth.png")
|
139 |
-
return depth_image
|
140 |
-
|
141 |
-
|
142 |
-
@spaces.GPU
|
143 |
-
def infer(use_custom_model, model_name, weight_name, custom_lora_weight, image_in, prompt, negative_prompt, preprocessor, controlnet_conditioning_scale, guidance_scale, inf_steps, seed, progress=gr.Progress(track_tqdm=True)):
|
144 |
-
|
145 |
-
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
146 |
-
"stabilityai/stable-diffusion-xl-base-1.0",
|
147 |
-
controlnet=controlnet,
|
148 |
-
vae=vae,
|
149 |
-
torch_dtype=torch.float16,
|
150 |
-
variant="fp16",
|
151 |
-
use_safetensors=True
|
152 |
-
)
|
153 |
-
|
154 |
-
pipe.to(device)
|
155 |
-
|
156 |
-
prompt = prompt
|
157 |
-
negative_prompt = negative_prompt
|
158 |
-
|
159 |
-
if seed < 0 :
|
160 |
-
seed = random.randint(0, 423538377342)
|
161 |
-
|
162 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
163 |
-
|
164 |
-
if image_in == None:
|
165 |
-
raise gr.Error("You forgot to upload a source image.")
|
166 |
-
|
167 |
-
image_in = resize_image(image_in, "resized_input.jpg", 1024)
|
168 |
-
|
169 |
-
if preprocessor == "canny":
|
170 |
-
|
171 |
-
image = load_image(image_in)
|
172 |
-
|
173 |
-
image = np.array(image)
|
174 |
-
image = cv2.Canny(image, 100, 200)
|
175 |
-
image = image[:, :, None]
|
176 |
-
image = np.concatenate([image, image, image], axis=2)
|
177 |
-
image = Image.fromarray(image)
|
178 |
-
|
179 |
-
if use_custom_model:
|
180 |
-
|
181 |
-
if model_name == "":
|
182 |
-
raise gr.Error("you forgot to set a custom model name.")
|
183 |
-
|
184 |
-
custom_model = model_name
|
185 |
-
|
186 |
-
# This is where you load your trained weights
|
187 |
-
if weight_name == "NO SAFETENSORS FILE":
|
188 |
-
pipe.load_lora_weights(
|
189 |
-
custom_model,
|
190 |
-
low_cpu_mem_usage = True,
|
191 |
-
use_auth_token = True
|
192 |
-
)
|
193 |
-
|
194 |
-
else:
|
195 |
-
pipe.load_lora_weights(
|
196 |
-
custom_model,
|
197 |
-
weight_name = weight_name,
|
198 |
-
low_cpu_mem_usage = True,
|
199 |
-
use_auth_token = True
|
200 |
-
)
|
201 |
-
|
202 |
-
lora_scale=custom_lora_weight
|
203 |
-
|
204 |
-
images = pipe(
|
205 |
-
prompt,
|
206 |
-
negative_prompt=negative_prompt,
|
207 |
-
image=image,
|
208 |
-
controlnet_conditioning_scale=float(controlnet_conditioning_scale),
|
209 |
-
guidance_scale = float(guidance_scale),
|
210 |
-
num_inference_steps=inf_steps,
|
211 |
-
generator=generator,
|
212 |
-
cross_attention_kwargs={"scale": lora_scale}
|
213 |
-
).images
|
214 |
-
else:
|
215 |
-
images = pipe(
|
216 |
-
prompt,
|
217 |
-
negative_prompt=negative_prompt,
|
218 |
-
image=image,
|
219 |
-
controlnet_conditioning_scale=float(controlnet_conditioning_scale),
|
220 |
-
guidance_scale = float(guidance_scale),
|
221 |
-
num_inference_steps=inf_steps,
|
222 |
-
generator=generator,
|
223 |
-
).images
|
224 |
-
|
225 |
-
images[0].save(f"result.png")
|
226 |
-
print("HELP")
|
227 |
-
predict(images[0])
|
228 |
-
# create_visual_demo();
|
229 |
-
return f"result.png", seed
|
230 |
-
|
231 |
-
|
232 |
-
css="""
|
233 |
-
.{
|
234 |
-
height: 20%;
|
235 |
-
}
|
236 |
-
#col-container{
|
237 |
-
margin: 0 auto;
|
238 |
-
max-width: 720px;
|
239 |
-
text-align: left;
|
240 |
-
}
|
241 |
-
div#warning-duplicate {
|
242 |
-
background-color: #ebf5ff;
|
243 |
-
padding: 0 10px 5px;
|
244 |
-
margin: 20px 0;
|
245 |
-
}
|
246 |
-
div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
|
247 |
-
color: #0f4592!important;
|
248 |
-
}
|
249 |
-
div#warning-duplicate strong {
|
250 |
-
color: #0f4592;
|
251 |
-
}
|
252 |
-
p.actions {
|
253 |
-
display: flex;
|
254 |
-
align-items: center;
|
255 |
-
margin: 20px 0;
|
256 |
-
}
|
257 |
-
div#warning-duplicate .actions a {
|
258 |
-
display: inline-block;
|
259 |
-
margin-right: 10px;
|
260 |
-
}
|
261 |
-
button#load_model_btn{
|
262 |
-
height: 45px !important;
|
263 |
-
border: none;
|
264 |
-
background-color: #99F6E4; !important;
|
265 |
-
border-radius: 10px !important;
|
266 |
-
padding: 10px !important;
|
267 |
-
cursor: pointer;
|
268 |
-
display: block;
|
269 |
-
position: relative;
|
270 |
-
top: -20px;
|
271 |
-
z-index: 100;
|
272 |
-
}
|
273 |
-
#status_info{
|
274 |
-
font-size: 0.9em;
|
275 |
-
}
|
276 |
-
"""
|
277 |
-
|
278 |
-
theme = gr.themes.Soft(
|
279 |
-
primary_hue="teal",
|
280 |
-
secondary_hue="gray",
|
281 |
-
).set(
|
282 |
-
body_text_color_dark='*neutral_800',
|
283 |
-
background_fill_primary_dark='*neutral_50',
|
284 |
-
background_fill_secondary_dark='*neutral_50',
|
285 |
-
border_color_accent_dark='*primary_300',
|
286 |
-
border_color_primary_dark='*neutral_200',
|
287 |
-
color_accent_soft_dark='*neutral_50',
|
288 |
-
link_text_color_dark='*secondary_600',
|
289 |
-
link_text_color_active_dark='*secondary_600',
|
290 |
-
link_text_color_hover_dark='*secondary_700',
|
291 |
-
link_text_color_visited_dark='*secondary_500',
|
292 |
-
code_background_fill_dark='*neutral_100',
|
293 |
-
shadow_spread_dark='6px',
|
294 |
-
block_background_fill_dark='white',
|
295 |
-
block_label_background_fill_dark='*primary_100',
|
296 |
-
block_label_text_color_dark='*primary_500',
|
297 |
-
block_title_text_color_dark='*primary_500',
|
298 |
-
checkbox_background_color_dark='*background_fill_primary',
|
299 |
-
checkbox_background_color_selected_dark='*primary_600',
|
300 |
-
checkbox_border_color_dark='*neutral_100',
|
301 |
-
checkbox_border_color_focus_dark='*primary_500',
|
302 |
-
checkbox_border_color_hover_dark='*neutral_300',
|
303 |
-
checkbox_border_color_selected_dark='*primary_600',
|
304 |
-
checkbox_label_background_fill_selected_dark='*primary_500',
|
305 |
-
checkbox_label_text_color_selected_dark='white',
|
306 |
-
error_background_fill_dark='#fef2f2',
|
307 |
-
error_border_color_dark='#b91c1c',
|
308 |
-
error_text_color_dark='#b91c1c',
|
309 |
-
error_icon_color_dark='#b91c1c',
|
310 |
-
input_background_fill_dark='white',
|
311 |
-
input_background_fill_focus_dark='*secondary_500',
|
312 |
-
input_border_color_dark='*neutral_50',
|
313 |
-
input_border_color_focus_dark='*secondary_300',
|
314 |
-
input_placeholder_color_dark='*neutral_400',
|
315 |
-
slider_color_dark='*primary_500',
|
316 |
-
stat_background_fill_dark='*primary_300',
|
317 |
-
table_border_color_dark='*neutral_300',
|
318 |
-
table_even_background_fill_dark='white',
|
319 |
-
table_odd_background_fill_dark='*neutral_50',
|
320 |
-
button_primary_background_fill_dark='*primary_500',
|
321 |
-
button_primary_background_fill_hover_dark='*primary_400',
|
322 |
-
button_primary_border_color_dark='*primary_00',
|
323 |
-
button_secondary_background_fill_dark='whiite',
|
324 |
-
button_secondary_background_fill_hover_dark='*neutral_100',
|
325 |
-
button_secondary_border_color_dark='*neutral_200',
|
326 |
-
button_secondary_text_color_dark='*neutral_800'
|
327 |
-
)
|
328 |
-
|
329 |
-
#examples = [["examples/" + img] for img in os.listdir("examples/")]
|
330 |
-
im = gr.Image(visible=False)
|
331 |
-
|
332 |
-
with gr.Blocks(theme=theme, css=css) as demo:
|
333 |
-
with gr.Row():
|
334 |
-
with gr.Column(elem_id="col-container"):
|
335 |
-
|
336 |
-
gr.HTML("""
|
337 |
-
<h2 style="text-align: left;">Choose a Style</h2>
|
338 |
-
<p style="text-align: left;">Our Pretrained Models can be found on Huggingface</p>
|
339 |
-
""")
|
340 |
-
|
341 |
-
use_custom_model = gr.Checkbox(label="Use a custom pre-trained LoRa model ? (optional)", visible = False, value=False, info="To use a private model, you'll need to duplicate the space with your own access token.")
|
342 |
-
|
343 |
-
with gr.Blocks(visible=False) as custom_model_box:
|
344 |
-
with gr.Row():
|
345 |
-
with gr.Column():
|
346 |
-
if not is_shared_ui:
|
347 |
-
your_username = api.whoami()["name"]
|
348 |
-
my_models = api.list_models(author=your_username, filter=["diffusers", "stable-diffusion-xl", 'lora'])
|
349 |
-
model_names = [item.modelId for item in my_models]
|
350 |
-
|
351 |
-
if not is_shared_ui:
|
352 |
-
custom_model = gr.Dropdown(
|
353 |
-
label = "Your custom model ID",
|
354 |
-
info="You can pick one of your private models",
|
355 |
-
choices = model_names,
|
356 |
-
allow_custom_value = True
|
357 |
-
#placeholder = "username/model_id"
|
358 |
-
)
|
359 |
-
else:
|
360 |
-
custom_model = gr.Textbox(
|
361 |
-
label="Your custom model ID",
|
362 |
-
placeholder="your_username/your_trained_model_name",
|
363 |
-
info="Make sure your model is set to PUBLIC"
|
364 |
-
)
|
365 |
-
|
366 |
-
weight_name = gr.Dropdown(
|
367 |
-
label="Safetensors file",
|
368 |
-
#value="pytorch_lora_weights.safetensors",
|
369 |
-
info="specify which one if model has several .safetensors files",
|
370 |
-
allow_custom_value=True,
|
371 |
-
visible = False
|
372 |
-
)
|
373 |
-
with gr.Column():
|
374 |
-
with gr.Group():
|
375 |
-
# load_model_btn = gr.Button("Load my model", elem_id="load_model_btn")
|
376 |
-
previous_model = gr.Textbox(
|
377 |
-
visible = False
|
378 |
-
)
|
379 |
-
|
380 |
-
model_status = gr.Textbox(
|
381 |
-
label = "model status",
|
382 |
-
show_label = False,
|
383 |
-
elem_id = "status_info"
|
384 |
-
)
|
385 |
-
trigger_word = gr.Textbox(label="Trigger word", interactive=False, visible=False)
|
386 |
-
|
387 |
-
load_model_btn = gr.Button("Load my model", elem_id="load_model_btn")
|
388 |
-
image_in = gr.Image(sources="upload", type="filepath", value=( "shop1.jpg"))
|
389 |
-
# gr.Examples(
|
390 |
-
# examples=[[os.path.join(os.path.dirname(__file__), "shop2.jpg")],[os.path.join(os.path.dirname(__file__), "shop3.jpg")]], inputs=im)
|
391 |
-
|
392 |
-
|
393 |
-
with gr.Column(elem_id="col-container"):
|
394 |
-
gr.HTML("""
|
395 |
-
<h2 style="text-align: left;">Input a Prompt!</h2>
|
396 |
-
<p style="text-align: left;">Negative prompts and other settings can be found in advanced options</p>
|
397 |
-
""")
|
398 |
|
399 |
-
with gr.Row():
|
400 |
-
|
401 |
-
with gr.Column():
|
402 |
-
# with gr.Group():
|
403 |
-
prompt = gr.Textbox(label="Prompt", placeholder="Add your trigger word here + prompt")
|
404 |
-
|
405 |
-
with gr.Accordion(label="Advanced Options", open=False, visible=False):
|
406 |
-
# with gr.Group():
|
407 |
-
negative_prompt = gr.Textbox(label="Negative prompt", value="extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured")
|
408 |
-
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=8.8)
|
409 |
-
inf_steps = gr.Slider(label="Inference Steps", minimum="25", maximum="50", step=1, value=25)
|
410 |
-
custom_lora_weight = gr.Slider(label="Custom model weights", minimum=0.1, maximum=0.9, step=0.1, value=0.7)
|
411 |
-
preprocessor = gr.Dropdown(label="Preprocessor", choices=["canny"], value="canny", interactive=False, info="For the moment, only canny is available")
|
412 |
-
controlnet_conditioning_scale = gr.Slider(label="Controlnet conditioning Scale", minimum=0.1, maximum=0.9, step=0.01, value=0.3)
|
413 |
-
seed = gr.Slider(
|
414 |
-
label="Seed",
|
415 |
-
info = "-1 denotes a random seed",
|
416 |
-
minimum=-1,
|
417 |
-
maximum=423538377342,
|
418 |
-
step=1,
|
419 |
-
value=-1
|
420 |
-
)
|
421 |
-
last_used_seed = gr.Number(
|
422 |
-
label = "Last used seed",
|
423 |
-
info = "the seed used in the last generation",
|
424 |
-
)
|
425 |
-
|
426 |
-
submit_btn = gr.Button("Submit")
|
427 |
-
|
428 |
-
# label = gr.Label(label="Loader")
|
429 |
-
# submit_btn.click(infer, outputs=[label])
|
430 |
-
|
431 |
-
result = gr.Image(label="Result", visible=False)
|
432 |
-
|
433 |
-
use_custom_model.change(
|
434 |
-
fn = check_use_custom_or_no,
|
435 |
-
inputs =[use_custom_model],
|
436 |
-
outputs = [custom_model_box],
|
437 |
-
queue = False
|
438 |
-
)
|
439 |
-
custom_model.blur(
|
440 |
-
fn=custom_model_changed,
|
441 |
-
inputs = [custom_model, previous_model],
|
442 |
-
outputs = [model_status],
|
443 |
-
queue = False
|
444 |
-
)
|
445 |
-
load_model_btn.click(
|
446 |
-
fn = load_model,
|
447 |
-
inputs=[custom_model],
|
448 |
-
outputs = [previous_model, model_status, weight_name, trigger_word],
|
449 |
-
queue = False
|
450 |
-
)
|
451 |
-
submit_btn.click(
|
452 |
-
fn = infer,
|
453 |
-
inputs = [use_custom_model,custom_model, weight_name, custom_lora_weight, image_in, prompt, negative_prompt, preprocessor, controlnet_conditioning_scale, guidance_scale, inf_steps, seed],
|
454 |
-
outputs = [result, last_used_seed]
|
455 |
-
)
|
456 |
-
|
457 |
-
|
458 |
-
# return demo
|
459 |
-
|
460 |
-
|
461 |
-
demo.queue().launch(share=True)import os
|
462 |
-
# os.system('pip install pip==23.3.0')
|
463 |
-
# os.system('pip uninstall spaces -y')
|
464 |
-
# os.system('pip install spaces==0.18.0')
|
465 |
-
# os.system('pip install gradio==4.0.2')
|
466 |
-
|
467 |
-
|
468 |
-
import gradio as gr
|
469 |
-
from huggingface_hub import login, HfFileSystem, HfApi, ModelCard
|
470 |
-
import os
|
471 |
-
import spaces
|
472 |
-
import random
|
473 |
-
import torch
|
474 |
-
|
475 |
-
from transformers import GLPNFeatureExtractor, GLPNForDepthEstimation
|
476 |
-
from transformers import AutoFeatureExtractor, AutoModelForDepthEstimation
|
477 |
-
feature_extractor = AutoFeatureExtractor.from_pretrained("Intel/dpt-large")
|
478 |
-
modeld = AutoModelForDepthEstimation.from_pretrained("Intel/dpt-large")
|
479 |
-
|
480 |
-
# from depthGAN.app import create_visual_demo
|
481 |
-
|
482 |
-
is_shared_ui = False
|
483 |
-
hf_token = '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'
|
484 |
-
login(token=hf_token)
|
485 |
-
|
486 |
-
fs = HfFileSystem(token=hf_token)
|
487 |
-
api = HfApi()
|
488 |
-
|
489 |
-
device="cuda" if torch.cuda.is_available() else "cpu"
|
490 |
-
|
491 |
-
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
|
492 |
-
from diffusers.utils import load_image
|
493 |
-
from PIL import Image
|
494 |
-
import torch
|
495 |
-
import numpy as np
|
496 |
-
import cv2
|
497 |
-
|
498 |
-
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
499 |
-
|
500 |
-
controlnet = ControlNetModel.from_pretrained(
|
501 |
-
"diffusers/controlnet-canny-sdxl-1.0",
|
502 |
-
torch_dtype=torch.float16
|
503 |
-
)
|
504 |
|
505 |
def check_use_custom_or_no(value):
|
506 |
if value is True:
|
@@ -579,7 +129,7 @@ def resize_image(input_path, output_path, target_height):
|
|
579 |
|
580 |
return output_path
|
581 |
|
582 |
-
def predict(image):
|
583 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
584 |
with torch.no_grad():
|
585 |
outputs = modeld(**inputs)
|
@@ -595,7 +145,7 @@ def predict(image):
|
|
595 |
output = prediction.squeeze().cpu().numpy()
|
596 |
formatted = (output * 255 / np.max(output)).astype("uint8")
|
597 |
depth_image = Image.fromarray(formatted)
|
598 |
-
depth_image.save(f"depth.png")
|
599 |
return depth_image
|
600 |
|
601 |
|
@@ -682,11 +232,15 @@ def infer(use_custom_model, model_name, weight_name, custom_lora_weight, image_i
|
|
682 |
generator=generator,
|
683 |
).images
|
684 |
|
685 |
-
|
|
|
686 |
print("HELP")
|
687 |
-
predict(images[0])
|
|
|
|
|
|
|
688 |
# create_visual_demo();
|
689 |
-
return f"result.png", seed
|
690 |
|
691 |
|
692 |
css="""
|
@@ -888,7 +442,7 @@ with gr.Blocks(theme=theme, css=css) as demo:
|
|
888 |
# label = gr.Label(label="Loader")
|
889 |
# submit_btn.click(infer, outputs=[label])
|
890 |
|
891 |
-
result = gr.Image(label="Result", visible=
|
892 |
|
893 |
use_custom_model.change(
|
894 |
fn = check_use_custom_or_no,
|
|
|
1 |
+
|
2 |
import os
|
3 |
# os.system('pip install pip==23.3.0')
|
4 |
# os.system('pip uninstall spaces -y')
|
|
|
43 |
torch_dtype=torch.float16
|
44 |
)
|
45 |
|
46 |
+
# for file naming
|
47 |
+
counter_file_path = "counter.txt"
|
48 |
+
if os.path.exists(counter_file_path):
|
49 |
+
with open(counter_file_path, "r") as file:
|
50 |
+
counter = int(file.read())
|
51 |
+
else:
|
52 |
+
counter = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
def check_use_custom_or_no(value):
|
56 |
if value is True:
|
|
|
129 |
|
130 |
return output_path
|
131 |
|
132 |
+
def predict(image, counter):
|
133 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
134 |
with torch.no_grad():
|
135 |
outputs = modeld(**inputs)
|
|
|
145 |
output = prediction.squeeze().cpu().numpy()
|
146 |
formatted = (output * 255 / np.max(output)).astype("uint8")
|
147 |
depth_image = Image.fromarray(formatted)
|
148 |
+
depth_image.save(f"viteGradio/images/depth{counter}.png")
|
149 |
return depth_image
|
150 |
|
151 |
|
|
|
232 |
generator=generator,
|
233 |
).images
|
234 |
|
235 |
+
global counter
|
236 |
+
images[0].save(f"viteGradio/images/result{counter}.png")
|
237 |
print("HELP")
|
238 |
+
predict(images[0], counter)
|
239 |
+
counter+=1
|
240 |
+
with open(counter_file_path, "w") as file:
|
241 |
+
file.write(str(counter))
|
242 |
# create_visual_demo();
|
243 |
+
return f"viteGradio/images/result{counter-1}.png", seed
|
244 |
|
245 |
|
246 |
css="""
|
|
|
442 |
# label = gr.Label(label="Loader")
|
443 |
# submit_btn.click(infer, outputs=[label])
|
444 |
|
445 |
+
result = gr.Image(label="Result", visible=True)
|
446 |
|
447 |
use_custom_model.change(
|
448 |
fn = check_use_custom_or_no,
|