|
import gradio as gr |
|
import io |
|
import base64 |
|
from flask import Flask, render_template, request, send_file, jsonify |
|
import torch |
|
import json |
|
from PIL import Image |
|
from diffusers import DiffusionPipeline |
|
from diffusers import ( |
|
DDPMScheduler, |
|
DDIMScheduler, |
|
PNDMScheduler, |
|
LMSDiscreteScheduler, |
|
EulerAncestralDiscreteScheduler, |
|
EulerDiscreteScheduler, |
|
DPMSolverMultistepScheduler, |
|
) |
|
import threading |
|
import requests |
|
from flask import Flask, render_template_string |
|
from gradio import Interface |
|
from diffusers import AutoencoderKL |
|
import pandas as pd |
|
import base64 |
|
from config import Config |
|
|
|
config = Config() |
|
|
|
def device_change(device): |
|
|
|
return config.set_config('device', device), config.assemble_code() |
|
|
|
def models_change(model, scheduler): |
|
|
|
use_safetensors = False |
|
|
|
|
|
if type(model) != list and model is not None: |
|
|
|
use_safetensors = str(config.model_configs[model]['use_safetensors']) |
|
|
|
|
|
if scheduler == None: |
|
|
|
scheduler = config.model_configs[model]['scheduler'] |
|
|
|
safety_checker_change(config.current["safety_checker"]) |
|
requires_safety_checker_change(config.current["requires_safety_checker"]) |
|
|
|
return use_safetensors, scheduler, config.set_config('model', model), config.assemble_code() |
|
|
|
def data_type_change(data_type): |
|
|
|
return config.set_config('data_type', data_type), config.assemble_code() |
|
|
|
def get_data_type(str_data_type): |
|
|
|
if str_data_type == "bfloat16": |
|
return torch.bfloat16 |
|
else: |
|
return torch.float16 |
|
|
|
def tensorfloat32_change(allow_tensorfloat32): |
|
|
|
return config.set_config('allow_tensorfloat32', allow_tensorfloat32), config.assemble_code() |
|
|
|
def inference_steps_change(inference_steps): |
|
|
|
return config.set_config('inference_steps', inference_steps), config.assemble_code() |
|
|
|
def manual_seed_change(manual_seed): |
|
|
|
return config.set_config('manual_seed', manual_seed), config.assemble_code() |
|
|
|
def guidance_scale_change(guidance_scale): |
|
|
|
return config.set_config('guidance_scale', guidance_scale), config.assemble_code() |
|
|
|
def prompt_change(prompt): |
|
|
|
return config.set_config('prompt', prompt), config.assemble_code() |
|
|
|
def negative_prompt_change(negative_prompt): |
|
|
|
return config.set_config('negative_prompt', negative_prompt), config.assemble_code() |
|
|
|
def variant_change(variant): |
|
|
|
return config.set_config('variant', variant), config.assemble_code() |
|
|
|
def safety_checker_change(safety_checker): |
|
|
|
return config.set_config('safety_checker', safety_checker), config.assemble_code() |
|
|
|
def requires_safety_checker_change(requires_safety_checker): |
|
|
|
return config.set_config('requires_safety_checker', requires_safety_checker), config.assemble_code() |
|
|
|
def schedulers_change(scheduler): |
|
|
|
return config.get_scheduler_description(scheduler), config.set_config('scheduler', scheduler), config.assemble_code() |
|
|
|
def get_tensorfloat32(allow_tensorfloat32): |
|
|
|
return True if str(allow_tensorfloat32).lower() == 'true' else False |
|
|
|
def get_scheduler(scheduler, config): |
|
|
|
if scheduler == "DDPMScheduler": |
|
return DDPMScheduler.from_config(config) |
|
elif scheduler == "DDIMScheduler": |
|
return DDIMScheduler.from_config(config) |
|
elif scheduler == "PNDMScheduler": |
|
return PNDMScheduler.from_config(config) |
|
elif scheduler == "LMSDiscreteScheduler": |
|
return LMSDiscreteScheduler.from_config(config) |
|
elif scheduler == "EulerAncestralDiscreteScheduler": |
|
return EulerAncestralDiscreteScheduler.from_config(config) |
|
elif scheduler == "EulerDiscreteScheduler": |
|
return EulerDiscreteScheduler.from_config(config) |
|
elif scheduler == "DPMSolverMultistepScheduler": |
|
return DPMSolverMultistepScheduler.from_config(config) |
|
else: |
|
return DPMSolverMultistepScheduler.from_config(config) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.set_inital_config() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config_history = [] |
|
|
|
|
|
def run_inference(model, |
|
device, |
|
use_safetensors, |
|
data_type, |
|
variant, |
|
safety_checker, |
|
requires_safety_checker, |
|
scheduler, |
|
prompt, |
|
negative_prompt, |
|
inference_steps, |
|
manual_seed, |
|
guidance_scale, |
|
progress=gr.Progress(track_tqdm=True)): |
|
|
|
if config.current["model"] != None and config.current["scheduler"] != None: |
|
|
|
progress((1,3), desc="Preparing pipeline initialization...") |
|
|
|
torch.backends.cuda.matmul.allow_tf32 = config.current["allow_tensorfloat32"] |
|
|
|
progress((2,3), desc="Initializing pipeline...") |
|
|
|
pipeline = DiffusionPipeline.from_pretrained( |
|
config.current["model"], |
|
use_safetensors=config.current["use_safetensors"], |
|
torch_dtype=get_data_type(config.current["data_type"]), |
|
variant=variant).to(config.current["device"]) |
|
|
|
if config.current["safety_checker"] is None or str(config.current["safety_checker"]).lower == 'false': |
|
pipeline.safety_checker = None |
|
|
|
pipeline.requires_safety_checker = config.current["requires_safety_checker"] |
|
|
|
pipeline.scheduler = get_scheduler(scheduler, pipeline.scheduler.config) |
|
|
|
manual_seed = int(manual_seed) |
|
if manual_seed < 0 or manual_seed is None or manual_seed == '': |
|
generator = torch.Generator(device) |
|
else: |
|
generator = torch.manual_seed(42) |
|
|
|
progress((3,3), desc="Creating the result...") |
|
|
|
image = pipeline( |
|
prompt=prompt, |
|
negative_prompt=negative_prompt, |
|
generator=generator, |
|
num_inference_steps=int(inference_steps), |
|
guidance_scale=float(guidance_scale)).images[0] |
|
|
|
config_history.append(config.current.copy()) |
|
|
|
return image, dict_list_to_markdown_table(config_history) |
|
|
|
else: |
|
|
|
return "Please select a model AND a scheduler.", None |
|
|
|
def dict_list_to_markdown_table(config_history): |
|
|
|
if not config_history: |
|
return "" |
|
|
|
headers = list(config_history[0].keys()) |
|
markdown_table = "| share | " + " | ".join(headers) + " |\n" |
|
markdown_table += "| --- | " + " | ".join(["---"] * len(headers)) + " |\n" |
|
|
|
for index, config in enumerate(config_history): |
|
|
|
encoded_config = base64.b64encode(str(config).encode()).decode() |
|
share_link = f'<a target="_blank" href="?config={encoded_config}">📎</a>' |
|
markdown_table += f"| {share_link} | " + " | ".join(str(config.get(key, "")) for key in headers) + " |\n" |
|
|
|
markdown_table = '<div style="overflow-x: auto;">\n\n' + markdown_table + '</div>' |
|
|
|
return markdown_table |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
|
gr.Markdown('''## Text-2-Image Playground |
|
<small>by <a target="_blank" href="https://www.linkedin.com/in/nickyreinert/">Nicky Reinert</a> | |
|
home base: https://huggingface.co/spaces/n42/pictero |
|
</small>''') |
|
gr.Markdown("### Device specific settings") |
|
with gr.Row(): |
|
in_devices = gr.Dropdown(label="Device:", value=config.current["device"], choices=config.devices, filterable=True, multiselect=False, allow_custom_value=True) |
|
in_data_type = gr.Radio(label="Data Type:", value=config.current["data_type"], choices=["bfloat16", "float16"], info="`bfloat16` is not supported on MPS devices right now; Half-precision weights, will save GPU memory, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16") |
|
in_allow_tensorfloat32 = gr.Radio(label="Allow TensorFloat32:", value=config.current["allow_tensorfloat32"], choices=[True, False], info="is not supported on MPS devices right now; use TensorFloat-32 is faster, but results in slightly less accurate computations, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ") |
|
in_variant = gr.Radio(label="Variant:", value=config.current["variant"], choices=["fp16", None], info="Use half-precision weights will save GPU memory, not all models support that, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ") |
|
|
|
gr.Markdown("### Model specific settings") |
|
with gr.Row(): |
|
models = list(config.model_configs.keys()) |
|
in_models = gr.Dropdown(choices=models, label="Model") |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
in_use_safetensors = gr.Radio(label="Use safe tensors:", choices=["True", "False"], interactive=False) |
|
with gr.Column(scale=1): |
|
in_safety_checker = gr.Radio(label="Enable safety checker:", value=config.current["safety_checker"], choices=[True, False]) |
|
in_requires_safety_checker = gr.Radio(label="Requires safety checker:", value=config.current["requires_safety_checker"], choices=[True, False]) |
|
|
|
gr.Markdown("### Scheduler") |
|
with gr.Row(): |
|
schedulers = list(config.scheduler_configs.keys()) |
|
in_schedulers = gr.Dropdown(choices=schedulers, label="Scheduler", info="see https://huggingface.co/docs/diffusers/using-diffusers/loading#schedulers" ) |
|
out_scheduler_description = gr.Textbox(value="", label="Description") |
|
|
|
gr.Markdown("### Adapters") |
|
with gr.Row(): |
|
gr.Markdown('Choose an adapter.') |
|
|
|
gr.Markdown("### Inference settings") |
|
with gr.Row(): |
|
in_prompt = gr.TextArea(label="Prompt", value=config.current["prompt"]) |
|
in_negative_prompt = gr.TextArea(label="Negative prompt", value=config.current["negative_prompt"]) |
|
with gr.Row(): |
|
in_inference_steps = gr.Number(label="Inference steps", value=config.current["inference_steps"]) |
|
in_manual_seed = gr.Number(label="Manual seed", value=config.current["manual_seed"], info="Set this to -1 or leave it empty to randomly generate an image. A fixed value will result in a similar image for every run") |
|
in_guidance_scale = gr.Slider(minimum=0, maximum=1, step=0.01, label="Guidance Scale", value=config.current["guidance_scale"], info="A low guidance scale leads to a faster inference time, with the drawback that negative prompts don’t have any effect on the denoising process.") |
|
|
|
gr.Markdown("### Output") |
|
with gr.Row(): |
|
btn_start_pipeline = gr.Button(value="Run inferencing") |
|
with gr.Row(): |
|
|
|
out_image = gr.Image() |
|
out_code = gr.Code(config.assemble_code(), label="Code") |
|
with gr.Row(): |
|
out_current_config = gr.Code(value=str(config.current), label="Current config") |
|
with gr.Row(): |
|
out_config_history = gr.Markdown(dict_list_to_markdown_table(config_history)) |
|
|
|
in_devices.change(device_change, inputs=[in_devices], outputs=[out_current_config, out_code]) |
|
in_data_type.change(data_type_change, inputs=[in_data_type], outputs=[out_current_config, out_code]) |
|
in_allow_tensorfloat32.change(tensorfloat32_change, inputs=[in_allow_tensorfloat32], outputs=[out_current_config, out_code]) |
|
in_variant.change(variant_change, inputs=[in_variant], outputs=[out_current_config, out_code]) |
|
in_models.change(models_change, inputs=[in_models, in_schedulers], outputs=[in_use_safetensors, in_schedulers, out_current_config, out_code]) |
|
in_safety_checker.change(safety_checker_change, inputs=[in_safety_checker], outputs=[out_current_config, out_code]) |
|
in_requires_safety_checker.change(requires_safety_checker_change, inputs=[in_requires_safety_checker], outputs=[out_current_config, out_code]) |
|
in_schedulers.change(schedulers_change, inputs=[in_schedulers], outputs=[out_scheduler_description, out_current_config, out_code]) |
|
in_inference_steps.change(inference_steps_change, inputs=[in_inference_steps], outputs=[out_current_config, out_code]) |
|
in_manual_seed.change(manual_seed_change, inputs=[in_manual_seed], outputs=[out_current_config, out_code]) |
|
in_guidance_scale.change(guidance_scale_change, inputs=[in_guidance_scale], outputs=[out_current_config, out_code]) |
|
in_prompt.change(prompt_change, inputs=[in_prompt], outputs=[out_current_config, out_code]) |
|
in_negative_prompt.change(negative_prompt_change, inputs=[in_negative_prompt], outputs=[out_current_config, out_code]) |
|
btn_start_pipeline.click(run_inference, inputs=[ |
|
in_models, |
|
in_devices, |
|
in_use_safetensors, |
|
in_data_type, |
|
in_variant, |
|
in_safety_checker, |
|
in_requires_safety_checker, |
|
in_schedulers, |
|
in_prompt, |
|
in_negative_prompt, |
|
in_inference_steps, |
|
in_manual_seed, |
|
in_guidance_scale |
|
], outputs=[ |
|
out_image, |
|
out_config_history]) |
|
|
|
demo.load(fn=config.init_config, inputs=out_current_config, |
|
outputs=[ |
|
in_models, |
|
in_devices, |
|
in_use_safetensors, |
|
in_data_type, |
|
in_variant, |
|
in_safety_checker, |
|
in_requires_safety_checker, |
|
in_schedulers, |
|
in_prompt, |
|
in_negative_prompt, |
|
in_inference_steps, |
|
in_manual_seed, |
|
in_guidance_scale |
|
]) |
|
|
|
demo.launch() |