Spaces:
Running
Running
salomonsky
commited on
Commit
•
a521e90
1
Parent(s):
8fb8596
Update app.py
Browse files
app.py
CHANGED
@@ -37,37 +37,43 @@ def get_upscale_finegrain(prompt, img_path, upscale_factor):
|
|
37 |
return None
|
38 |
|
39 |
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
|
|
44 |
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
59 |
else:
|
60 |
return [image_path, image_path]
|
61 |
-
|
62 |
-
return [
|
|
|
|
|
|
|
63 |
|
64 |
async def improve_prompt(prompt):
|
65 |
try:
|
66 |
-
instruction = "
|
67 |
-
formatted_prompt = f"{
|
68 |
-
response = llm_client.text_generation(formatted_prompt, max_new_tokens=300)
|
69 |
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
|
70 |
-
|
71 |
return improved_text
|
72 |
except Exception as e:
|
73 |
return f"Error mejorando el prompt: {e}"
|
@@ -90,20 +96,22 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
|
|
90 |
process_lora = gr.Checkbox(label="Procesar LORA")
|
91 |
process_upscale = gr.Checkbox(label="Procesar Escalador")
|
92 |
|
93 |
-
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
|
94 |
-
|
95 |
improved_prompt = gr.Textbox(label="Prompt Mejorado", interactive=False)
|
96 |
-
|
97 |
improve_btn = gr.Button("Mejora mi prompt")
|
98 |
-
improve_btn.click(fn=improve_prompt, inputs=[prompt], outputs=improved_prompt)
|
|
|
|
|
99 |
|
100 |
with gr.Accordion(label="Opciones Avanzadas", open=False):
|
101 |
width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
|
102 |
height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
|
|
|
103 |
scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
|
104 |
steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
|
105 |
seed = gr.Number(label="Semilla", value=-1)
|
|
|
|
|
106 |
|
107 |
btn = gr.Button("Generar")
|
108 |
-
btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=output_res)
|
109 |
demo.launch()
|
|
|
37 |
return None
|
38 |
|
39 |
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
|
40 |
+
try:
|
41 |
+
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
|
42 |
+
|
43 |
+
improved_prompt = await improve_prompt(prompt)
|
44 |
+
combined_prompt = f"{prompt} {improved_prompt}"
|
45 |
|
46 |
+
image, seed = await generate_image(combined_prompt, model, "", width, height, scales, steps, seed)
|
47 |
+
|
48 |
+
if isinstance(image, str) and image.startswith("Error"):
|
49 |
+
return [image, None]
|
50 |
+
|
51 |
+
image_path = "temp_image.jpg"
|
52 |
+
image.save(image_path, format="JPEG")
|
53 |
+
|
54 |
+
if process_upscale:
|
55 |
+
upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
|
56 |
+
if upscale_image_path is not None:
|
57 |
+
upscale_image = Image.open(upscale_image_path)
|
58 |
+
upscale_image.save("upscale_image.jpg", format="JPEG")
|
59 |
+
return [image_path, "upscale_image.jpg"]
|
60 |
+
else:
|
61 |
+
return [image_path, image_path]
|
62 |
else:
|
63 |
return [image_path, image_path]
|
64 |
+
except Exception as e:
|
65 |
+
return [f"Error: {e}", None]
|
66 |
+
|
67 |
+
def error_handler(err):
|
68 |
+
return f"Error: {err}"
|
69 |
|
70 |
async def improve_prompt(prompt):
|
71 |
try:
|
72 |
+
instruction = "With this idea, describe in English a detailed img2vid prompt in a single paragraph of up to 300 characters, developing atmosphere, characters, lighting, and cameras."
|
73 |
+
formatted_prompt = f"{prompt}: {instruction}"
|
74 |
+
response = llm_client.text_generation(formatted_prompt, max_new_tokens=300, language="english")
|
75 |
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
|
76 |
+
|
77 |
return improved_text
|
78 |
except Exception as e:
|
79 |
return f"Error mejorando el prompt: {e}"
|
|
|
96 |
process_lora = gr.Checkbox(label="Procesar LORA")
|
97 |
process_upscale = gr.Checkbox(label="Procesar Escalador")
|
98 |
|
|
|
|
|
99 |
improved_prompt = gr.Textbox(label="Prompt Mejorado", interactive=False)
|
|
|
100 |
improve_btn = gr.Button("Mejora mi prompt")
|
101 |
+
improve_btn.click(fn=lambda prompt: improve_prompt(prompt), inputs=[prompt], outputs=[improved_prompt, prompt])
|
102 |
+
reset_btn = gr.Button("Reset")
|
103 |
+
reset_btn.click(fn=lambda: [prompt.update(""), improved_prompt.update("")], inputs=None, outputs=[prompt, improved_prompt])
|
104 |
|
105 |
with gr.Accordion(label="Opciones Avanzadas", open=False):
|
106 |
width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
|
107 |
height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
|
108 |
+
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
|
109 |
scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
|
110 |
steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
|
111 |
seed = gr.Number(label="Semilla", value=-1)
|
112 |
+
reset_advanced = gr.Button("Reset")
|
113 |
+
reset_advanced.click(fn=lambda: [width.update(1280), height.update(768), scales.update(10), steps.update(20), seed.update(-1)], inputs=None, outputs=[width, height, scales, steps, seed])
|
114 |
|
115 |
btn = gr.Button("Generar")
|
116 |
+
btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=[output_res], error_handler=error_handler)
|
117 |
demo.launch()
|