File size: 6,435 Bytes
79024bb
0dec378
 
 
 
a484b84
0dec378
0a67e9a
 
a484b84
9958140
0dec378
 
 
 
b206729
0dec378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79024bb
 
b206729
b37d7c8
79024bb
3d2ee8a
a484b84
1c144e4
b206729
79024bb
1c144e4
 
 
 
b206729
0dec378
 
 
 
 
a5a56d7
b5806de
8c0b352
79024bb
a3cc10d
 
 
 
 
 
 
 
 
 
 
8c0b352
79024bb
0dec378
a484b84
1c144e4
0784aaa
79024bb
6c31c17
 
1c144e4
6c31c17
 
e201fee
0a67e9a
79024bb
97b9ae3
7f2fa6c
befb00d
e201fee
8f6c590
bf2b726
61092ea
b5806de
9795423
bf2b726
72bca8f
 
9e17cb9
8f6c590
0dec378
 
 
289d5f1
b5806de
 
0dec378
 
b206729
79024bb
0dec378
 
 
 
 
 
 
 
 
 
e2531dc
0dec378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2531dc
0dec378
 
 
 
 
 
 
 
0a67e9a
 
 
 
61092ea
79024bb
 
 
 
 
0a67e9a
 
79024bb
0dec378
 
79024bb
 
0a67e9a
0dec378
72bca8f
0dec378
0a67e9a
 
 
 
2ee61fc
0a67e9a
 
 
0784aaa
 
79024bb
0a67e9a
 
 
 
b206729
0a67e9a
bf2b726
0a67e9a
a5a56d7
bf2b726
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
#Save ZeroGPU limited resources, switch to InferenceAPI
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import AsyncInferenceClient
from translatepy import Translator
import requests
import re
import asyncio
from PIL import Image

translator = Translator()
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# Constants
basemodel = "black-forest-labs/FLUX.1-dev"
MAX_SEED = np.iinfo(np.int32).max

CSS = """
footer {
    visibility: hidden;
}
"""

JS = """function () {
  gradioURL = window.location.href
  if (!gradioURL.endsWith('?__theme=dark')) {
    window.location.replace(gradioURL + '?__theme=dark');
  }
}"""

def enable_lora(lora_add):
    if not lora_add:
        return basemodel
    else:
        return lora_add

async def generate_image(
    prompt:str,
    model:str,
    lora_word:str,
    width:int=768,
    height:int=1024,
    scales:float=3.5,
    steps:int=24,
    seed:int=-1):

    if seed == -1:
        seed = random.randint(0, MAX_SEED)
    seed = int(seed)
    print(f'prompt:{prompt}')
    
    text = str(translator.translate(prompt, 'English')) + "," + lora_word

    client = AsyncInferenceClient()
    try:
        image = await client.text_to_image(
            prompt=text,
            height=height,
            width=width,
            guidance_scale=scales,
            num_inference_steps=steps,
            model=model,
        )
    except Exception as e:
        raise gr.Error(f"Error in {e}")
    
    return image, seed

async def gen(
    prompt:str,
    lora_add:str="",
    lora_word:str="",
    width:int=768,
    height:int=1024,
    scales:float=3.5,
    steps:int=24,
    seed:int=-1,
    progress=gr.Progress(track_tqdm=True)
):
    model = enable_lora(lora_add)
    print(model)
    image, seed = await generate_image(prompt,model,lora_word,width,height,scales,steps,seed)
    return image, seed
     
examples = [
    ["a seal holding a beach ball in a pool","bingbangboom/flux_dreamscape","in the style of BSstyle004"],
    ["1980s anime screengrab, VHS quality, a woman with her face glitching and disorted, a halo above her head","dataautogpt3/FLUX-SyntheticAnime","1980s anime screengrab, VHS quality"],
    ["photograph, background of Earth from space, red car on the Moon watching Earth","martintomov/retrofuturism-flux","retrofuturism"],
    ["a living room interior","fofr/flux-80s-cyberpunk","80s cyberpunk"],
    ["Shrek, a lovable green ogre with a big smile, sitting on a moss-covered rock while enjoying a plate of freshly picked vegetables, in a magical forest filled with whimsical creatures, dappled sunlight filtering through the trees, surrounded by curious fairies peeking out from behind leaves","alvarobartt/ghibli-characters-flux-lora","Ghibli style"],
    ["a tourist in London, illustration in the style of VCTRNDRWNG, Victorian-era drawing","dvyio/flux-lora-victorian-drawing","illustration in the style of VCTRNDRWNG"],
    ["an African American and a caucasian man petting a cat at a busy electronic store. flikr photo from 2012. three people working in the background","kudzueye/boreal-flux-dev-v2","photo"],
    ["mgwr/cine, woman silhouette, morning light, sun rays, indoor scene, soft focus, golden hour, stretching pose, peaceful mood, cozy atmosphere, window light, shadows and highlights, backlit figure, minimalistic interior, warm tones, contemplative moment, calm energy, serene environment, yoga-inspired, elegant posture, natural light beams, artistic composition","mgwr/Cine-Aesthetic","atmospheric lighting and a dreamy, surreal vibe"]
]

# Gradio Interface

with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
    gr.HTML("<h1><center>Flux Lab Light</center></h1>")
    gr.HTML("<p><center>Powered By HF Inference API</center></p>")
    with gr.Row():
        with gr.Column(scale=4):
            with gr.Row():
                img = gr.Image(type="filepath", label='flux Generated Image', height=600)
            with gr.Row():
                prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6)
                sendBtn = gr.Button(scale=1, variant='primary')
        with gr.Accordion("Advanced Options", open=True):
            with gr.Column(scale=1):
                width = gr.Slider(
                    label="Width",
                    minimum=512,
                    maximum=1280,
                    step=8,
                    value=768,
                )
                height = gr.Slider(
                    label="Height",
                    minimum=512,
                    maximum=1280,
                    step=8,
                    value=1024,
                )
                scales = gr.Slider(
                    label="Guidance",
                    minimum=3.5,
                    maximum=7,
                    step=0.1,
                    value=3.5,
                )
                steps = gr.Slider(
                    label="Steps",
                    minimum=1,
                    maximum=100,
                    step=1,
                    value=24,
                )
                seed = gr.Slider(
                    label="Seeds",
                    minimum=-1,
                    maximum=MAX_SEED,
                    step=1,
                    value=-1,
                )
                lora_add = gr.Textbox(
                    label="Add Flux LoRA",
                    info="Copy the HF LoRA model name here",
                    lines=1,
                    placeholder="Please use Warm status model",
                )
                lora_word = gr.Textbox(
                    label="Add Flux LoRA Trigger Word",
                    info="Add the Trigger Word",
                    lines=1,
                    value="",
                )

    gr.Examples(
        examples=examples,
        inputs=[prompt,lora_add,lora_word],
        outputs=[img, seed],
        fn=gen,
        cache_examples="lazy",
        examples_per_page=4,
    )

    gr.on(
        triggers=[
            prompt.submit,
            sendBtn.click,
        ],
        fn=gen,
        inputs=[
            prompt,
            lora_add,
            lora_word,
            width, 
            height, 
            scales, 
            steps, 
            seed
        ],
        outputs=[img, seed]
    )
    
if __name__ == "__main__":
    demo.queue(api_open=False).launch(show_api=False, share=False)