File size: 13,262 Bytes
4e0e2d3
38b82b3
5d39f28
 
b96c8c5
 
4e0e2d3
b96c8c5
 
 
5d39f28
b96c8c5
 
 
 
 
5d39f28
 
c6a81f6
5d39f28
 
b96c8c5
 
 
 
c6a81f6
 
5d39f28
 
 
b96c8c5
5d39f28
b96c8c5
80c0bfe
b96c8c5
 
 
 
c6a81f6
b96c8c5
 
 
5d39f28
c6a81f6
5d39f28
b96c8c5
a6a747f
b96c8c5
 
 
5d39f28
b96c8c5
58f80cc
b96c8c5
 
 
5d39f28
b96c8c5
3c02d45
b96c8c5
3c02d45
b96c8c5
3c02d45
b96c8c5
3c02d45
b96c8c5
3c02d45
b96c8c5
3c02d45
b96c8c5
 
 
 
 
 
 
3c02d45
b96c8c5
3c02d45
b96c8c5
 
 
5d39f28
b96c8c5
5d39f28
b96c8c5
5d39f28
b96c8c5
5d39f28
b96c8c5
5d39f28
b96c8c5
5d39f28
b96c8c5
5d39f28
b96c8c5
 
 
 
 
 
 
 
 
5d39f28
b96c8c5
5d39f28
b96c8c5
 
 
 
5d39f28
b96c8c5
 
 
58f80cc
b96c8c5
 
 
5d39f28
b96c8c5
 
 
 
5d39f28
 
 
b96c8c5
5d39f28
 
b96c8c5
5d39f28
b96c8c5
 
 
 
5d39f28
c6a81f6
 
 
 
 
 
 
 
 
b96c8c5
5d39f28
 
b96c8c5
 
 
5d39f28
b96c8c5
 
2115a66
b96c8c5
5d39f28
 
 
 
b96c8c5
 
00e74d5
b96c8c5
c6a81f6
 
 
 
 
 
 
 
 
b96c8c5
d680fb0
c6a81f6
5d39f28
 
 
b96c8c5
2115a66
b96c8c5
 
 
 
2115a66
5d39f28
c1cbdbc
b96c8c5
194a41e
b96c8c5
 
194a41e
c75a58b
80c0bfe
c75a58b
 
5d39f28
 
b96c8c5
56914a9
b96c8c5
 
5d39f28
 
 
b96c8c5
 
5d39f28
 
2115a66
c6a81f6
5d39f28
b96c8c5
 
5d39f28
 
 
b96c8c5
2115a66
5d39f28
 
 
 
 
 
 
b96c8c5
 
5d39f28
b96c8c5
 
 
5d39f28
 
 
 
 
2115a66
 
b96c8c5
2115a66
b96c8c5
2115a66
b96c8c5
5d39f28
194a41e
b96c8c5
 
 
c6a81f6
b96c8c5
 
 
 
 
 
 
 
 
80c0bfe
00e74d5
 
194a41e
 
80c0bfe
 
 
 
 
c6a81f6
80c0bfe
 
 
 
 
 
 
 
 
 
 
5d39f28
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
import gradio as gr
import torch
from diffusers import DiffusionPipeline
from diffusers import AutoencoderKL
from config import *
from helpers import *

def device_change(device, config):
    
    config = set_config(config, 'device', device)
    
    return config, str(config), assemble_code(config)

def models_change(model, scheduler, config):

    config = set_config(config, 'model', model)

    use_safetensors = False
    refiner = ""
    
    # no model selected (because this is UI init run)
    if type(model) != list and str(model) != 'None':
        
        use_safetensors = str(models[model]['use_safetensors'])
        model_description = models[model]['description']
        refiner = models[model]['refiner']
                
        # if no scheduler is selected, choose the default one for this model
        if scheduler == None:
            
            scheduler = models[model]['scheduler']

    else:

        model_description = 'Please select a model.'
        
    config["use_safetensors"] = str(use_safetensors)
    config["scheduler"] = str(scheduler)
    config["refiner"] = str(refiner)
    
    # safety_checker_change(in_safety_checker.value, config)
    # requires_safety_checker_change(in_requires_safety_checker.value, config)

    return model_description, refiner, use_safetensors, scheduler, config, str(config), assemble_code(config)

def data_type_change(data_type, config):

    config = set_config(config, 'data_type', data_type)

    return config, str(config), assemble_code(config)

def tensorfloat32_change(allow_tensorfloat32, config):  
        
    config = set_config(config, 'allow_tensorfloat32', allow_tensorfloat32)

    return config, str(config), assemble_code(config)

def inference_steps_change(inference_steps, config):
    
    config = set_config(config, 'inference_steps', inference_steps)

    return config, str(config), assemble_code(config)

def manual_seed_change(manual_seed, config):
    
    config = set_config(config, 'manual_seed', manual_seed)

    return config, str(config), assemble_code(config)

def guidance_scale_change(guidance_scale, config):

    config = set_config(config, 'guidance_scale', guidance_scale)

    return config, str(config), assemble_code(config)

def prompt_change(prompt, config):
    
    config = set_config(config, 'prompt', prompt)

    return config, str(config), assemble_code(config)

def negative_prompt_change(negative_prompt, config):
    
    config = set_config(config, 'negative_prompt', negative_prompt)
    
    return config, str(config), assemble_code(config)

def variant_change(variant, config):
    
    config = set_config(config, 'variant', variant)

    return config, str(config), assemble_code(config)
    
def safety_checker_change(safety_checker, config):
        
    config = set_config(config, 'safety_checker', safety_checker)

    return config, str(config), assemble_code(config)

def requires_safety_checker_change(requires_safety_checker, config):

    config = set_config(config, 'requires_safety_checker', requires_safety_checker)

    return config, str(config), assemble_code(config)

def schedulers_change(scheduler, config):
    
    if str(scheduler) != 'None' and type(scheduler) != list:
        
        scheduler_description = schedulers[scheduler]
        
    else:
        scheduler_description = 'Please select a scheduler.'
        
    config = set_config(config, 'scheduler', scheduler)

    return scheduler_description, config, str(config), assemble_code(config)
    
def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True)):
    
    # str_config = str_config.replace("'", '"').replace('None', 'null').replace('False', 'false')
    # config = json.loads(str_config)

    if str(config["model"]) != 'None' and str(config["scheduler"]) != 'None':
        
        progress((1,3), desc="Preparing pipeline initialization...")
        
        torch.backends.cuda.matmul.allow_tf32 = get_bool(config["allow_tensorfloat32"]) # Use TensorFloat-32 as of https://huggingface.co/docs/diffusers/main/en/optimization/fp16 faster, but slightly less accurate computations
        
        progress((2,3), desc="Initializing pipeline...")
                    
        pipeline = DiffusionPipeline.from_pretrained(
            config["model"], 
            use_safetensors = get_bool(config["use_safetensors"]), 
            torch_dtype = get_data_type(config["data_type"]), 
            variant = get_variant(config["variant"])).to(config["device"])
        
        if config['refiner'] != '':
            refiner = DiffusionPipeline.from_pretrained(
                    config['refiner'],
                    text_encoder_2=pipeline.text_encoder_2,
                    vae=pipeline.vae,
                    torch_dtype=get_data_type(config["data_type"]),
                    use_safetensors=get_bool(config["use_safetensors"]), 
                    variant = get_variant(config["variant"])).to(config["device"])
            
        if str(config["safety_checker"]).lower() == 'false':
            pipeline.safety_checker = None 

        pipeline.requires_safety_checker = get_bool(config["requires_safety_checker"])
            
        pipeline.scheduler = get_scheduler(config["scheduler"], pipeline.scheduler.config)
        
        if config["manual_seed"] < 0 or config["manual_seed"] is None or config["manual_seed"] == '':
            generator = torch.Generator(config["device"])
        else:
            generator = torch.manual_seed(int(config["manual_seed"])) 
        
        progress((3,3), desc="Creating the result...")

        image = pipeline(
            prompt = config["prompt"],
            negative_prompt = config["negative_prompt"],
            generator = generator,
            num_inference_steps = int(config["inference_steps"]), 
            guidance_scale = float(config["guidance_scale"])).images

        if config['refiner'] != '':
            image = refiner(
                prompt = config["prompt"],
                num_inference_steps = int(config["inference_steps"]), 
                image=image,
            ).images
            
        config_history.append(config.copy())

        return image[0], dict_list_to_markdown_table(config_history), config_history
    
    else:
    
        return "Please select a model AND a scheduler.", None, config_history

appConfig = load_app_config()
models = appConfig.get("models", {})
schedulers = appConfig.get("schedulers", {})
devices =  appConfig.get("devices", [])

# interface
with gr.Blocks(analytics_enabled=False) as demo:
    
    
    config = gr.State(value=get_initial_config())
    config_history = gr.State(value=[])

    gr.Markdown('''## Text-2-Image Playground
                <small>by <a target="_blank" href="https://www.linkedin.com/in/nickyreinert/">Nicky Reinert</a> | 
                home base: https://huggingface.co/spaces/n42/pictero
                </small>''')
    gr.Markdown("### Device specific settings")
    with gr.Row():
        in_devices = gr.Dropdown(label="Device:", value=config.value["device"], choices=devices, filterable=True, multiselect=False, allow_custom_value=True)
        in_data_type = gr.Radio(label="Data Type:", value=config.value["data_type"], choices=["bfloat16", "float16", "float32"], info="`bfloat16` is not supported on MPS devices right now; `float16` may also not be supported on all devices, Half-precision weights, will save GPU memory, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16")
        in_allow_tensorfloat32 = gr.Radio(label="Allow TensorFloat32:", value=config.value["allow_tensorfloat32"], choices=["True", "False"], info="is not supported on MPS devices right now; use TensorFloat-32 is faster, but results in slightly less accurate computations, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
        in_variant = gr.Radio(label="Variant:", value=config.value["variant"], choices=["fp16", None], info="Use half-precision weights will save GPU memory, not all models support that, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
        
    gr.Markdown("### Model specific settings")
    with gr.Row():
        in_models = gr.Dropdown(choices=list(models.keys()), label="Model")
        out_model_description = gr.Textbox(value="", label="Description")
    with gr.Row():
        with gr.Column(scale=1):
            in_use_safetensors = gr.Radio(label="Use safe tensors:", choices=["True", "False"], interactive=False)
            in_model_refiner = gr.Textbox(value="", label="Refiner")
        with gr.Column(scale=1):
            in_safety_checker = gr.Radio(label="Enable safety checker:", value=config.value["safety_checker"], choices=["True", "False"])
            in_requires_safety_checker = gr.Radio(label="Requires safety checker:", value=config.value["requires_safety_checker"], choices=["True", "False"])

    gr.Markdown("### Scheduler")
    with gr.Row():
        in_schedulers = gr.Dropdown(choices=list(schedulers.keys()), label="Scheduler", info="see https://huggingface.co/docs/diffusers/using-diffusers/loading#schedulers" )
        out_scheduler_description = gr.Textbox(value="", label="Description")
    
    gr.Markdown("### Adapters")
    with gr.Row():
        gr.Markdown('Choose an adapter.')
        
    gr.Markdown("### Inference settings")
    with gr.Row():
        in_prompt = gr.TextArea(label="Prompt", value=config.value["prompt"])
        in_negative_prompt = gr.TextArea(label="Negative prompt", value=config.value["negative_prompt"])
    with gr.Row():
        in_inference_steps = gr.Number(label="Inference steps", value=config.value["inference_steps"])
        in_manual_seed = gr.Number(label="Manual seed", value=config.value["manual_seed"], info="Set this to -1 or leave it empty to randomly generate an image. A fixed value will result in a similar image for every run")
        in_guidance_scale = gr.Slider(minimum=0, maximum=1, step=0.01, label="Guidance Scale", value=config.value["guidance_scale"], info="A low guidance scale leads to a faster inference time, with the drawback that negative prompts don’t have any effect on the denoising process.")
        
    gr.Markdown("### Output")
    with gr.Row():
        btn_start_pipeline = gr.Button(value="Run inferencing")
    with gr.Row():
        # out_result = gr.Textbox(label="Status", value="")
        out_image = gr.Image()
        out_code = gr.Code(assemble_code(config.value), label="Code")
    with gr.Row():
        out_config = gr.Code(value=str(config.value), label="Current config")
    with gr.Row():
        out_config_history = gr.Markdown(dict_list_to_markdown_table(config_history.value))
    
    in_devices.change(fn=device_change, inputs=[in_devices, config], outputs=[config, out_config, out_code])
    in_data_type.change(data_type_change, inputs=[in_data_type, config], outputs=[config, out_config, out_code])
    in_allow_tensorfloat32.change(tensorfloat32_change, inputs=[in_allow_tensorfloat32, config], outputs=[config, out_config, out_code])
    in_variant.change(variant_change, inputs=[in_variant, config], outputs=[config, out_config, out_code])
    in_models.change(models_change, inputs=[in_models, in_schedulers, config], outputs=[out_model_description, in_model_refiner, in_use_safetensors, in_schedulers, config, out_config, out_code])
    in_safety_checker.change(safety_checker_change, inputs=[in_safety_checker, config], outputs=[config, out_config, out_code])
    in_requires_safety_checker.change(requires_safety_checker_change, inputs=[in_requires_safety_checker, config], outputs=[config, out_config, out_code])
    in_schedulers.change(schedulers_change, inputs=[in_schedulers, config], outputs=[out_scheduler_description, config, out_config, out_code])
    in_inference_steps.change(inference_steps_change, inputs=[in_inference_steps, config], outputs=[config, out_config, out_code])
    in_manual_seed.change(manual_seed_change, inputs=[in_manual_seed, config], outputs=[config, out_config, out_code])
    in_guidance_scale.change(guidance_scale_change, inputs=[in_guidance_scale, config], outputs=[config, out_config, out_code])
    in_prompt.change(prompt_change, inputs=[in_prompt, config], outputs=[config, out_config, out_code])
    in_negative_prompt.change(negative_prompt_change, inputs=[in_negative_prompt, config], outputs=[config, out_config, out_code])
    btn_start_pipeline.click(run_inference, inputs=[config, config_history], outputs=[out_image, out_config_history, config_history])

    # send current respect initial config to init_config to populate parameters to all relevant input fields
    # if GET parameter is set, it will overwrite initial config parameters
    demo.load(fn=get_config_from_url,
        inputs=[config], 
        outputs=[
            in_models,
            in_devices,
            in_use_safetensors,
            in_data_type,
            in_model_refiner,
            in_variant,
            in_safety_checker,
            in_requires_safety_checker,
            in_schedulers,
            in_prompt,
            in_negative_prompt,
            in_inference_steps,
            in_manual_seed,
            in_guidance_scale
            ])

demo.launch()