Spaces:
Running
Running
import os, random, asyncio, numpy as np | |
from pathlib import Path | |
from PIL import Image | |
from insightface.app import FaceAnalysis | |
import streamlit as st | |
from huggingface_hub import InferenceClient, AsyncInferenceClient | |
from gradio_client import Client, handle_file | |
import yaml | |
import insightface | |
MAX_SEED = np.iinfo(np.int32).max | |
DATA_PATH = Path("./data"); DATA_PATH.mkdir(exist_ok=True) | |
HF_TOKEN_UPSCALER = os.environ.get("HF_TOKEN_UPSCALER") | |
client, llm_client = AsyncInferenceClient(), InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
try: | |
credentials = yaml.safe_load(open("config.yaml")) | |
except Exception as e: | |
st.error(f"Error al cargar config: {e}"); credentials = {"username": "", "password": ""} | |
def prepare_face_app(): | |
app = FaceAnalysis(name='buffalo_l'); app.prepare(ctx_id=0, det_size=(640, 640)) | |
return app, insightface.model_zoo.get_model('onix.onnx') | |
app, swapper = prepare_face_app() | |
async def generate_image(prompt, model, w, h, scale, steps, seed): | |
seed = random.randint(0, MAX_SEED) if seed == -1 else seed | |
image = await client.text_to_image(prompt=prompt, height=h, width=w, guidance_scale=scale, num_inference_steps=steps, model=model) | |
return image, seed if not isinstance(image, str) else (None, None) | |
def get_upscale_finegrain(prompt, img_path, upscale_factor): | |
try: | |
result = Client("finegrain/finegrain-image-enhancer", hf_token=HF_TOKEN_UPSCALER).predict(input_image=handle_file(img_path), prompt=prompt, upscale_factor=upscale_factor) | |
return result[1] if isinstance(result, list) and len(result) > 1 else None | |
except Exception: return None | |
async def gen(prompt, basemodel, w, h, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language): | |
combined_prompt = f"{prompt} {await improve_prompt(prompt, language)}" if process_enhancer else prompt | |
image, seed = await generate_image(combined_prompt, basemodel, w, h, scales, steps, seed) | |
if image is None: return ["Error al generar imagen", None, combined_prompt] | |
image_path = save_image(image, seed); prompt_file_path = save_prompt(combined_prompt, seed) | |
if process_upscale: | |
upscale_image_path = get_upscale_finegrain(combined_prompt, image_path, upscale_factor) | |
if upscale_image_path: | |
Image.open(upscale_image_path).save(DATA_PATH / f"upscale_image_{seed}.jpg", format="JPEG") | |
image_path.unlink() | |
return [str(DATA_PATH / f"upscale_image_{seed}.jpg"), str(prompt_file_path)] | |
return [str(image_path), str(prompt_file_path)] | |
async def improve_prompt(prompt, language): | |
instruction = "With this idea, describe in English a detailed txt2img prompt in 500 characters at most..." if language == "en" else "Con esta idea, describe en espa帽ol un prompt detallado de txt2img..." | |
response = await llm_client.text_generation(f"{prompt}: {instruction}", max_new_tokens=500) | |
return response.get('generated_text', '').strip()[:500] | |
def save_image(image, seed): | |
if image.mode == 'RGBA': image = image.convert('RGB') | |
image_path = DATA_PATH / f"image_{seed}.jpg" | |
image.save(image_path, format="JPEG") | |
return image_path | |
def save_prompt(prompt_text, seed): | |
prompt_file_path = DATA_PATH / f"prompt_{seed}.txt" | |
open(prompt_file_path, "w").write(prompt_text) | |
return prompt_file_path | |
def get_storage(): | |
files = [file for file in DATA_PATH.glob("*.jpg") if file.is_file()] | |
total_size = sum([file.stat().st_size for file in files]) / (1024.0 ** 3) | |
return files, f"Uso total: {total_size:.3f} GB" | |
def delete_image(image_path): | |
try: | |
Path(image_path).unlink(); st.success(f"Imagen {image_path} borrada.") | |
except Exception as e: st.error(f"Error al borrar imagen: {e}") | |
def delete_all_images(): | |
for file in DATA_PATH.glob("*.jpg"): file.unlink(); st.success("Todas las im谩genes han sido borradas.") | |
def authenticate_user(username, password, credentials): | |
return username == credentials["username"] and password == credentials["password"] | |
def login_form(credentials): | |
if 'authenticated' not in st.session_state: st.session_state['authenticated'] = False | |
if not st.session_state['authenticated']: | |
username = st.text_input("Usuario"); password = st.text_input("Contrase帽a", type='password') | |
if st.button("Iniciar Sesi贸n"): | |
if authenticate_user(username, password, credentials): | |
st.session_state['authenticated'] = True; st.success("Inicio de sesi贸n exitoso.") | |
else: st.error("Credenciales incorrectas.") | |
def upload_image(): | |
uploaded_file = st.sidebar.file_uploader("Sube una imagen", type=["png", "jpg", "jpeg"]) | |
if uploaded_file: | |
image_path = DATA_PATH / uploaded_file.name | |
with open(image_path, "wb") as f: f.write(uploaded_file.getbuffer()) | |
st.sidebar.success(f"Imagen {uploaded_file.name} cargada correctamente.") | |
return image_path, save_prompt("#uploadedbyuser", image_path.stem) | |
return None | |
def gallery(): | |
files, usage = get_storage() | |
st.sidebar.write(f"{usage}") | |
if st.sidebar.button("Borrar Todas las Im谩genes"): delete_all_images() | |
cols = st.columns(6) | |
for idx, file in enumerate(files): | |
with cols[idx % 6]: | |
st.image(str(file)) | |
try: | |
prompt_file_path = DATA_PATH / f"prompt_{file.stem.split('_')[-1]}.txt" | |
st.write(f"Prompt: {open(prompt_file_path).read()}") | |
except FileNotFoundError: | |
st.write("Prompt no encontrado.") | |
st.button(f"Borrar Imagen {file.name}", on_click=delete_image, args=(file,)) | |
if st.button(f"Swap Face en {file.name}"): upload_source_and_swap(file) | |
def face_swap(image_path, source_image_path): | |
try: | |
img_dest, img_src = Image.open(image_path), Image.open(source_image_path) | |
faces = app.get(img_src) | |
if not faces: st.error("No se encontraron caras en la imagen source."); return None | |
swapped_img = swapper.get(img_dest, faces[0]) | |
swapped_img_path = DATA_PATH / f"swapped_{Path(image_path).stem}.jpg" | |
swapped_img.save(swapped_img_path, format="JPEG") | |
return swapped_img_path | |
except Exception as e: st.error(f"Error en face swap: {e}"); return None | |
def upload_source_and_swap(image_path): | |
source_image = st.file_uploader("Sube la imagen source para face swap", type=["png", "jpg", "jpeg"]) | |
if source_image: | |
source_image_path = DATA_PATH / source_image.name | |
with open(source_image_path, "wb") as f: f.write(source_image.getbuffer()) | |
st.success(f"Imagen source {source_image.name} cargada correctamente.") | |
swapped_image_path = face_swap(image_path, source_image_path) | |
if swapped_image_path: st.image(str(swapped_image_path), caption="Imagen con Face Swap", use_column_width=True) | |
async def main(): | |
st.set_page_config(layout="wide") | |
login_form(credentials) | |
if not st.session_state['authenticated']: st.warning("Por favor, inicia sesi贸n para acceder a la aplicaci贸n."); return | |
prompt = st.sidebar.text_input("Descripci贸n de la imagen", max_chars=900) | |
process_enhancer, language = st.sidebar.checkbox("Mejorar Prompt", value=False), st.sidebar.selectbox("Idioma", ["en", "es"]) | |
basemodel, format_option, process_upscale = st.sidebar.selectbox("Modelo Base", ["black-forest-labs/FLUX.1-DEV", "black-forest-labs/FLUX.1-schnell"]), st.sidebar.selectbox("Formato", ["9:16", "16:9"]), st.sidebar.checkbox("Procesar Escalador", value=False) | |
upscale_factor, scales, steps, seed = st.sidebar.selectbox("Factor de Escala", [2, 4, 8], index=0), st.sidebar.slider("Escalado", 1, 20, 10), st.sidebar.slider("Pasos", 1, 100, 20), st.sidebar.number_input("Semilla", value=-1) | |
w, h = (1080, 1920) if format_option == "9:16" else (1920, 1080) | |
upload_image() | |
image_path, prompt_file_path = None, None | |
if st.sidebar.button("Generar Imagen"): | |
with st.spinner("Generando..."): | |
image_path, prompt_file_path = await gen(prompt, basemodel, w, h, scales, steps, seed, upscale_factor, process_upscale, process_enhancer, language) | |
if image_path: | |
st.image(image_path, caption="Imagen Generada") | |
st.write(f"Prompt: {open(prompt_file_path).read()}") | |
if image_path: | |
st.success("Imagen generada y almacenada.") | |
gallery() | |
if __name__ == "__main__": | |
asyncio.run(main()) | |