Spaces:
Running
Running
import os | |
import gradio as gr | |
import numpy as np | |
import random | |
from huggingface_hub import AsyncInferenceClient, InferenceClient | |
from PIL import Image | |
from gradio_client import Client, handle_file | |
from gradio_imageslider import ImageSlider | |
MAX_SEED = np.iinfo(np.int32).max | |
HF_TOKEN = os.environ.get("HF_TOKEN") | |
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER") | |
client = AsyncInferenceClient() | |
llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
def enable_lora(lora_add, basemodel): | |
return basemodel if not lora_add else lora_add | |
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed): | |
try: | |
if seed == -1: | |
seed = random.randint(0, MAX_SEED) | |
seed = int(seed) | |
text = prompt + "," + lora_word | |
image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model) | |
return image, seed | |
except Exception as e: | |
return f"Error al generar imagen: {e}", None | |
def get_upscale_finegrain(prompt, img_path, upscale_factor): | |
try: | |
client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER) | |
result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process") | |
return result[1] | |
except Exception as e: | |
return None | |
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora): | |
model = enable_lora(lora_model, basemodel) if process_lora else basemodel | |
improved_prompt = await improve_prompt(prompt) | |
combined_prompt = f"{prompt} {improved_prompt}" | |
image, seed = await generate_image(combined_prompt, model, "", width, height, scales, steps, seed) | |
if isinstance(image, str) and image.startswith("Error"): | |
return [image, None] | |
image_path = "temp_image.jpg" | |
image.save(image_path, format="JPEG") | |
if process_upscale: | |
upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor) | |
if upscale_image_path is not None: | |
upscale_image = Image.open(upscale_image_path) | |
upscale_image.save("upscale_image.jpg", format="JPEG") | |
return [image_path, "upscale_image.jpg"] | |
else: | |
return [image_path, image_path] | |
else: | |
return [image_path, image_path] | |
async def improve_prompt(prompt): | |
try: | |
instruction = "create a prompt with my idea and translate it into English (improve my text and add it), adding detailed descriptions of character, style, cinematography, cameras, atmosphere, and lighting for the best quality and realism, up max to 300 words." | |
formatted_prompt = f"{instruction}: {prompt}" | |
response = llm_client.text_generation(formatted_prompt, max_new_tokens=300) | |
improved_text = response['generated_text'].strip() if 'generated_text' in response else response.strip() | |
return improved_text | |
except Exception as e: | |
return f"Error mejorando el prompt: {e}" | |
css = """ | |
#col-container{ margin: 0 auto; max-width: 1024px;} | |
""" | |
with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo: | |
with gr.Column(elem_id="col-container"): | |
with gr.Row(): | |
with gr.Column(scale=3): | |
output_res = ImageSlider(label="Flux / Upscaled") | |
with gr.Column(scale=2): | |
prompt = gr.Textbox(label="Descripción de imágen") | |
basemodel_choice = gr.Dropdown(label="Modelo", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell") | |
lora_model_choice = gr.Dropdown(label="LORA Realismo", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora") | |
with gr.Row(): | |
process_lora = gr.Checkbox(label="Procesar LORA") | |
process_upscale = gr.Checkbox(label="Procesar Escalador") | |
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2) | |
improved_prompt = gr.Textbox(label="Prompt Mejorado", interactive=False) | |
improve_btn = gr.Button("Mejora mi prompt") | |
improve_btn.click(fn=improve_prompt, inputs=[prompt], outputs=improved_prompt) | |
with gr.Accordion(label="Opciones Avanzadas", open=False): | |
width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280) | |
height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768) | |
scales = gr.Slider(label="Escalado", minimum=1, maximum=20, step=1, value=10) | |
steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=20) | |
seed = gr.Number(label="Semilla", value=-1) | |
btn = gr.Button("Generar") | |
btn.click(fn=gen, inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora], outputs=output_res) | |
demo.launch() |