Spaces:
Running
Running
salomonsky
commited on
Commit
•
ffe0681
1
Parent(s):
2713519
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,6 @@ import gradio as gr
|
|
3 |
import numpy as np
|
4 |
import random
|
5 |
from huggingface_hub import AsyncInferenceClient, InferenceClient
|
6 |
-
import asyncio
|
7 |
from PIL import Image
|
8 |
from gradio_client import Client, handle_file
|
9 |
from gradio_imageslider import ImageSlider
|
@@ -41,8 +40,8 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
|
|
41 |
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
|
42 |
image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
|
43 |
|
44 |
-
if image
|
45 |
-
return [
|
46 |
|
47 |
image_path = "temp_image.jpg"
|
48 |
image.save(image_path, format="JPEG")
|
@@ -60,10 +59,10 @@ async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_fac
|
|
60 |
|
61 |
def improve_prompt(prompt):
|
62 |
try:
|
63 |
-
instruction = "Mejora mi prompt para texto a imagen en inglés con estilo, cinematografía, cámaras, atmósfera e iluminación para la mejor calidad, de máximo 200 palabras."
|
64 |
formatted_prompt = f"{instruction}: {prompt}"
|
65 |
response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
|
66 |
-
improved_text = response.strip()
|
67 |
|
68 |
return improved_text
|
69 |
except Exception as e:
|
@@ -80,15 +79,13 @@ with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
|
|
80 |
output_res = ImageSlider(label="Flux / Upscaled")
|
81 |
with gr.Column(scale=2):
|
82 |
prompt = gr.Textbox(label="Descripción de imágen")
|
|
|
|
|
83 |
basemodel_choice = gr.Dropdown(label="Modelo", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
|
84 |
lora_model_choice = gr.Dropdown(label="LORA Realismo", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
|
85 |
process_lora = gr.Checkbox(label="Procesar LORA")
|
86 |
process_upscale = gr.Checkbox(label="Procesar Escalador")
|
87 |
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
|
88 |
-
|
89 |
-
improved_prompt = gr.Textbox(label="Prompt Mejorado", interactive=False)
|
90 |
-
|
91 |
-
improve_btn = gr.Button("Mejora mi prompt")
|
92 |
improve_btn.click(fn=improve_prompt, inputs=[prompt], outputs=improved_prompt)
|
93 |
|
94 |
with gr.Accordion(label="Opciones Avanzadas", open=False):
|
|
|
3 |
import numpy as np
|
4 |
import random
|
5 |
from huggingface_hub import AsyncInferenceClient, InferenceClient
|
|
|
6 |
from PIL import Image
|
7 |
from gradio_client import Client, handle_file
|
8 |
from gradio_imageslider import ImageSlider
|
|
|
40 |
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
|
41 |
image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
|
42 |
|
43 |
+
if isinstance(image, str) and image.startswith("Error"):
|
44 |
+
return [image, None]
|
45 |
|
46 |
image_path = "temp_image.jpg"
|
47 |
image.save(image_path, format="JPEG")
|
|
|
59 |
|
60 |
def improve_prompt(prompt):
|
61 |
try:
|
62 |
+
instruction = "Mejora mi prompt y desarrolla mi idea para texto a imagen en inglés con estilo para el modelo FLUX, cinematografía, cámaras, atmósfera e iluminación para la mejor calidad, de máximo 200 palabras."
|
63 |
formatted_prompt = f"{instruction}: {prompt}"
|
64 |
response = llm_client.text_generation(formatted_prompt, max_new_tokens=200)
|
65 |
+
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip()
|
66 |
|
67 |
return improved_text
|
68 |
except Exception as e:
|
|
|
79 |
output_res = ImageSlider(label="Flux / Upscaled")
|
80 |
with gr.Column(scale=2):
|
81 |
prompt = gr.Textbox(label="Descripción de imágen")
|
82 |
+
improved_prompt = gr.Textbox(label="Mejorada mi idea", interactive=False)
|
83 |
+
improve_btn = gr.Button("Mejora mi prompt")
|
84 |
basemodel_choice = gr.Dropdown(label="Modelo", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
|
85 |
lora_model_choice = gr.Dropdown(label="LORA Realismo", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
|
86 |
process_lora = gr.Checkbox(label="Procesar LORA")
|
87 |
process_upscale = gr.Checkbox(label="Procesar Escalador")
|
88 |
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
|
|
|
|
|
|
|
|
|
89 |
improve_btn.click(fn=improve_prompt, inputs=[prompt], outputs=improved_prompt)
|
90 |
|
91 |
with gr.Accordion(label="Opciones Avanzadas", open=False):
|