Spaces:
Running
Running
File size: 5,536 Bytes
0cfb4a5 481dde5 d4fba6d 0dec378 de6051a 0dec378 0a67e9a a484b84 d4fba6d 2fc432b 1a52ee5 32fdddd 219d097 471c590 e3be785 481dde5 32fdddd e3be785 2fc432b 32fdddd 1a52ee5 481dde5 32fdddd 2f35681 32fdddd e3be785 481dde5 32fdddd e3be785 32fdddd 2fc432b e3be785 32fdddd e3be785 32fdddd e3be785 3b4ee8c 32fdddd 3b4ee8c 32fdddd f7b9f36 5e03798 32fdddd 5e03798 481dde5 5e03798 32fdddd 5e03798 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
import os
import torch
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient
from translatepy import Translator
import requests
import re
import asyncio
from PIL import Image
from gradio_client import Client, handle_file
from huggingface_hub import login
from gradio_imageslider import ImageSlider
MAX_SEED = np.iinfo(np.int32).max
HF_TOKEN = os.environ.get("HF_TOKEN")
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER")
if not os.path.exists('GFPGANv1.4.pth'):
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P .")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_path = 'GFPGANv1.4.pth'
gfpgan = GFPGANer(
model_path=model_path,
upscale_factor=4,
arch='clean',
channel_multiplier=2,
model_name='GPFGAN',
device=device
)
def enable_lora(lora_add, basemodel):
return basemodel if not lora_add else lora_add
async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
try:
if seed == -1:
seed = random.randint(0, MAX_SEED)
seed = int(seed)
text = str(Translator().translate(prompt, 'English')) + "," + lora_word
client = AsyncInferenceClient()
image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
return image, seed
except Exception as e:
print(f"Error generating image: {e}")
return None, None
def get_upscale_finegrain(prompt, img_path, upscale_factor):
try:
client = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER)
result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
return result[1]
except Exception as e:
print(f"Error upscale image: {e}")
return None
def get_upscale_gfpgan(prompt, img_path):
try:
img = gfpgan.enhance(img_path)
return img
except Exception as e:
print(f"Error upscale image: {e}")
return None
async def gen(prompt, basemodel, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model, process_lora, upscale_model):
model = enable_lora(lora_model, basemodel) if process_lora else basemodel
image, seed = await generate_image(prompt, model, "", width, height, scales, steps, seed)
if image is None:
return [None, None]
image_path = "temp_image.jpg"
image.save(image_path, format="JPEG")
if process_upscale:
if upscale_model == "FineGrain":
upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
elif upscale_model == "GPFGAN":
upscale_image = get_upscale_gfpgan(prompt, image_path)
upscale_image_path = "upscale_image.jpg"
upscale_image.save(upscale_image_path, format="JPEG")
return [image_path, upscale_image_path]
else:
return [image_path, image_path]
css = """
#col-container{ margin: 0 auto; max-width: 1024px;}
"""
with gr.Blocks(css=css, theme="Nymbo/Nymbo_Theme") as demo:
with gr.Column(elem_id="col-container"):
with gr.Row():
with gr.Column(scale=3):
output_res = ImageSlider(label="Flux / Upscaled")
with gr.Column(scale=2):
prompt = gr.Textbox(label="Descripción de imágen")
basemodel_choice = gr.Dropdown(label="Modelo", choices=["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-DEV"], value="black-forest-labs/FLUX.1-schnell")
lora_model_choice = gr.Dropdown(label="LORA Realismo", choices=["Shakker-Labs/FLUX.1-dev-LoRA-add-details", "XLabs-AI/flux-RealismLora"], value="XLabs-AI/flux-RealismLora")
process_lora = gr.Checkbox(label="Procesar LORA")
process_upscale = gr.Checkbox(label="Procesar Escalador")
upscale_factor = gr.Radio(label="Factor de Escala", choices=[2, 4, 8], value=2)
upscale_model = gr.Radio(label="Modelo de Escalado", choices=["FineGrain", "GPFGAN"], value="GPFGAN")
with gr.Accordion(label="Opciones Avanzadas", open=False):
width = gr.Slider(label="Ancho", minimum=512, maximum=1280, step=8, value=1280)
height = gr.Slider(label="Alto", minimum=512, maximum=1280, step=8, value=768)
scales = gr.Slider(label="Escalas", minimum=3.5, maximum=7, step=0.1, value=3.5)
steps = gr.Slider(label="Pasos", minimum=1, maximum=100, step=1, value=24)
seed = gr.Slider(label="Semillas", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
submit_btn = gr.Button("Crear", scale=1)
submit_btn.click(
fn=lambda: None,
inputs=None,
outputs=[output_res],
queue=False
).then(
fn=gen,
inputs=[prompt, basemodel_choice, width, height, scales, steps, seed, upscale_factor, process_upscale, lora_model_choice, process_lora, upscale_model],
outputs=[output_res]
)
demo.launch() |