#Save ZeroGPU limited resources, switch to InferenceAPI import os import gradio as gr import numpy as np import random from huggingface_hub import AsyncInferenceClient from translatepy import Translator import requests import re import asyncio from PIL import Image translator = Translator() HF_TOKEN = os.environ.get("HF_TOKEN", None) # Constants basemodel = "black-forest-labs/FLUX.1-dev" MAX_SEED = np.iinfo(np.int32).max CSS = """ footer { visibility: hidden; } """ JS = """function () { gradioURL = window.location.href if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }""" def enable_lora(lora_add): if not lora_add: return basemodel else: return lora_add async def generate_image( prompt:str, model:str, lora_word:str, width:int=768, height:int=1024, scales:float=3.5, steps:int=24, seed:int=-1): if seed == -1: seed = random.randint(0, MAX_SEED) seed = int(seed) print(f'prompt:{prompt}') text = str(translator.translate(prompt, 'English')) + lora_word client = AsyncInferenceClient() image = await client.text_to_image( prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model, ) print(image) return image, seed async def gen( prompt:str, lora_add:str="", lora_word:str="", width:int=768, height:int=1024, scales:float=3.5, steps:int=24, seed:int=-1, progress=gr.Progress(track_tqdm=True) ): model = enable_lora(lora_add) print(model) image, seed = await generate_image(prompt,model,lora_word,width,height,scales,steps,seed) return image, seed examples = [ ["A cartoon-style blonde European-American woman wearing sunglasses stood in front of the triumphant door to take a selfie, the upper bodyartistic style","Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration","artistic style blends elements of reality and illustration"], ["A cartoon style European woman wearing glasses is eating a table of seafood,including lobster,oysters,and other shellfish,in a well lit modern restaurant. The background of the restaurant is very blurry,and she is holding the utensils ready to eat. There is a glass of red wine and various dishes on the table. The illustrations contrast with the real food and environment,creating a unique mixed media effect and high angle perspective","Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration","artistic style blends elements of reality and illustration"], ["A cartoon style European man opens his hands and takes a selfie under the Sydney Opera House","Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration","artistic style blends elements of reality and illustration"], ["Against the backdrop of the Eiffel Tower, a cartoon style European woman wearing a delicate white floral dress stands there, with the iconic building of the tower clearly visible under the azure sky, capturing the romantic charm of Paris. When she takes photos against this stunning background, her flowing hair adds a dreamy atmosphere","Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration","artistic style blends elements of reality and illustration"] ] # Gradio Interface with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo: gr.HTML("

Flux Labs

") gr.HTML("

Add the LoRA model on the menu

") with gr.Row(): with gr.Column(scale=4): with gr.Row(): img = gr.Image(type="filepath", label='flux Generated Image', height=600) with gr.Row(): prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6) sendBtn = gr.Button(scale=1, variant='primary') with gr.Accordion("Advanced Options", open=True): with gr.Column(scale=1): width = gr.Slider( label="Width", minimum=512, maximum=1280, step=8, value=768, ) height = gr.Slider( label="Height", minimum=512, maximum=1280, step=8, value=1024, ) scales = gr.Slider( label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5, ) steps = gr.Slider( label="Steps", minimum=1, maximum=100, step=1, value=24, ) seed = gr.Slider( label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=-1, ) lora_add = gr.Textbox( label="Add Flux LoRA", info="Copy the HF LoRA model name here", lines=1, value="Shakker-Labs/FLUX.1-dev-LoRA-blended-realistic-illustration", ) lora_word = gr.Textbox( label="Add Flux LoRA Trigger Word", info="Add the Trigger Word", lines=1, value="", ) gr.Examples( examples=examples, inputs=[prompt,lora_add,lora_word], outputs=[img, seed], fn=gen, cache_examples="lazy", examples_per_page=4, ) gr.on( triggers=[ prompt.submit, sendBtn.click, ], fn=gen, inputs=[ prompt, lora_add, lora_word, width, height, scales, steps, seed ], outputs=[img, seed], api_name="run", ) demo.queue().launch()