#Save ZeroGPU limited resources, switch to InferenceAPI import os import gradio as gr import numpy as np import random from huggingface_hub import AsyncInferenceClient from translatepy import Translator import requests import re import asyncio from PIL import Image translator = Translator() HF_TOKEN = os.environ.get("HF_TOKEN", None) # Constants basemodel = "black-forest-labs/FLUX.1-dev" MAX_SEED = np.iinfo(np.int32).max CSS = """ footer { visibility: hidden; } """ JS = """function () { gradioURL = window.location.href if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }""" def enable_lora(lora_add): if not lora_add: return basemodel else: return lora_add async def generate_image( prompt:str, model:str, lora_word:str, width:int=768, height:int=1024, scales:float=3.5, steps:int=24, seed:int=-1): if seed == -1: seed = random.randint(0, MAX_SEED) seed = int(seed) print(f'prompt:{prompt}') text = str(translator.translate(prompt, 'English')) + "," + lora_word client = AsyncInferenceClient() try: image = await client.text_to_image( prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model, ) except Exception as e: raise gr.Error(f"Error in {e}") return image, seed async def gen( prompt:str, lora_add:str="", lora_word:str="", width:int=768, height:int=1024, scales:float=3.5, steps:int=24, seed:int=-1, progress=gr.Progress(track_tqdm=True) ): model = enable_lora(lora_add) print(model) image, seed = await generate_image(prompt,model,lora_word,width,height,scales,steps,seed) return image, seed examples = [ ["A cartoon-style blonde European-American woman wearing sunglasses stood in front of the triumphant door to take a selfie, the upper bodyartistic style","Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration","artistic style blends elements of reality and illustration"], ["1980s anime screengrab, VHS quality, a woman with her face glitching and disorted, a halo above her head","dataautogpt3/FLUX-SyntheticAnime","1980s anime screengrab, VHS quality"], ["photograph, background of Earth from space, red car on the Moon watching Earth","martintomov/retrofuturism-flux","retrofuturism"], ["a living room interior","fofr/flux-80s-cyberpunk","80s cyberpunk"] ] # Gradio Interface with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo: gr.HTML("

Flux Lab Light

") gr.HTML("

Powered By HF Inference API

") with gr.Row(): with gr.Column(scale=4): with gr.Row(): img = gr.Image(type="filepath", label='flux Generated Image', height=600) with gr.Row(): prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6) sendBtn = gr.Button(scale=1, variant='primary') with gr.Accordion("Advanced Options", open=True): with gr.Column(scale=1): width = gr.Slider( label="Width", minimum=512, maximum=1280, step=8, value=768, ) height = gr.Slider( label="Height", minimum=512, maximum=1280, step=8, value=1024, ) scales = gr.Slider( label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5, ) steps = gr.Slider( label="Steps", minimum=1, maximum=100, step=1, value=24, ) seed = gr.Slider( label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=-1, ) lora_add = gr.Textbox( label="Add Flux LoRA", info="Copy the HF LoRA model name here", lines=1, placeholder="Please use Warm status model", ) lora_word = gr.Textbox( label="Add Flux LoRA Trigger Word", info="Add the Trigger Word", lines=1, value="", ) gr.Examples( examples=examples, inputs=[prompt,lora_add,lora_word], outputs=[img, seed], fn=gen, cache_examples="lazy", examples_per_page=4, ) gr.on( triggers=[ prompt.submit, sendBtn.click, ], fn=gen, inputs=[ prompt, lora_add, lora_word, width, height, scales, steps, seed ], outputs=[img, seed], api_name="run", ) demo.queue().launch()