Spaces:
Running
Running
salomonsky
commited on
Commit
•
5d264e2
1
Parent(s):
67e8080
Update app.py
Browse files
app.py
CHANGED
@@ -38,7 +38,11 @@ def get_upscale_finegrain(prompt, img_path, upscale_factor):
|
|
38 |
|
39 |
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
|
40 |
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
|
41 |
-
|
|
|
|
|
|
|
|
|
42 |
|
43 |
if isinstance(image, str) and image.startswith("Error"):
|
44 |
return [image, None]
|
@@ -47,7 +51,7 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
|
|
47 |
image.save(image_path, format="JPEG")
|
48 |
|
49 |
if process_upscale:
|
50 |
-
upscale_image_path = get_upscale_finegrain(
|
51 |
if upscale_image_path is not None:
|
52 |
upscale_image = Image.open(upscale_image_path)
|
53 |
upscale_image.save("upscale_image.jpg", format="JPEG")
|
@@ -57,9 +61,9 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
|
|
57 |
else:
|
58 |
return [image_path, image_path]
|
59 |
|
60 |
-
def improve_prompt(prompt):
|
61 |
try:
|
62 |
-
instruction = "Mejora mi prompt
|
63 |
formatted_prompt = f"{instruction}: {prompt}"
|
64 |
response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
|
65 |
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
|
@@ -79,13 +83,18 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
|
|
79 |
output_res = ImageSlider(label="Flux / Upscaled")
|
80 |
with gr.Column(scale=2):
|
81 |
prompt = gr.Textbox(label="Descripción de imágen")
|
82 |
-
improved_prompt = gr.Textbox(label="Mejorada mi idea", interactive=False)
|
83 |
-
improve_btn = gr.Button("Mejora mi prompt")
|
84 |
basemodel_choice = gr.Dropdown(label="Modelo", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
|
85 |
lora_model_choice = gr.Dropdown(label="LORA Realismo", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
|
|
|
86 |
with gr.Row():
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
improve_btn.click(fn=improve_prompt, inputs=[prompt], outputs=improved_prompt)
|
90 |
|
91 |
with gr.Accordion(label="Opciones Avanzadas", open=False):
|
@@ -93,9 +102,8 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
|
|
93 |
height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
|
94 |
scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
|
95 |
steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
|
96 |
-
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
|
97 |
seed = gr.Number(label="Semilla", value=-1)
|
98 |
|
99 |
btn = gr.Button("Generar")
|
100 |
-
btn.click(fn=gen, inputs=[
|
101 |
-
demo.launch()
|
|
|
38 |
|
39 |
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora):
|
40 |
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
|
41 |
+
|
42 |
+
improved_prompt = await improve_prompt(prompt)
|
43 |
+
combined_prompt = f"{prompt} {improved_prompt}"
|
44 |
+
|
45 |
+
image, seed = await generate_image(combined_prompt, model, "", width, height, scales, steps, seed)
|
46 |
|
47 |
if isinstance(image, str) and image.startswith("Error"):
|
48 |
return [image, None]
|
|
|
51 |
image.save(image_path, format="JPEG")
|
52 |
|
53 |
if process_upscale:
|
54 |
+
upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor)
|
55 |
if upscale_image_path is not None:
|
56 |
upscale_image = Image.open(upscale_image_path)
|
57 |
upscale_image.save("upscale_image.jpg", format="JPEG")
|
|
|
61 |
else:
|
62 |
return [image_path, image_path]
|
63 |
|
64 |
+
async def improve_prompt(prompt):
|
65 |
try:
|
66 |
+
instruction = "Mejora mi prompt para texto a imagen en inglés con estilo, cinematografía, cámaras, atmósfera e iluminación para la mejor calidad, de máximo 200 palabras."
|
67 |
formatted_prompt = f"{instruction}: {prompt}"
|
68 |
response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
|
69 |
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
|
|
|
83 |
output_res = ImageSlider(label="Flux / Upscaled")
|
84 |
with gr.Column(scale=2):
|
85 |
prompt = gr.Textbox(label="Descripción de imágen")
|
|
|
|
|
86 |
basemodel_choice = gr.Dropdown(label="Modelo", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
|
87 |
lora_model_choice = gr.Dropdown(label="LORA Realismo", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
|
88 |
+
|
89 |
with gr.Row():
|
90 |
+
process_lora = gr.Checkbox(label="Procesar LORA")
|
91 |
+
process_upscale = gr.Checkbox(label="Procesar Escalador")
|
92 |
+
|
93 |
+
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
|
94 |
+
|
95 |
+
improved_prompt = gr.Textbox(label="Prompt Mejorado", interactive=False)
|
96 |
+
|
97 |
+
improve_btn = gr.Button("Mejora mi prompt")
|
98 |
improve_btn.click(fn=improve_prompt, inputs=[prompt], outputs=improved_prompt)
|
99 |
|
100 |
with gr.Accordion(label="Opciones Avanzadas", open=False):
|
|
|
102 |
height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
|
103 |
scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10)
|
104 |
steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20)
|
|
|
105 |
seed = gr.Number(label="Semilla", value=-1)
|
106 |
|
107 |
btn = gr.Button("Generar")
|
108 |
+
btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=output_res)
|
109 |
+
demo.launch()
|