pictero / app.py
n42's picture
adding refiner option
c6a81f6
raw
history blame
13.3 kB
import gradio as gr
import torch
from diffusers import DiffusionPipeline
from diffusers import AutoencoderKL
from config import *
from helpers import *
def device_change(device, config):
config = set_config(config, 'device', device)
return config, str(config), assemble_code(config)
def models_change(model, scheduler, config):
config = set_config(config, 'model', model)
use_safetensors = False
refiner = ""
# no model selected (because this is UI init run)
if type(model) != list and str(model) != 'None':
use_safetensors = str(models[model]['use_safetensors'])
model_description = models[model]['description']
refiner = models[model]['refiner']
# if no scheduler is selected, choose the default one for this model
if scheduler == None:
scheduler = models[model]['scheduler']
else:
model_description = 'Please select a model.'
config["use_safetensors"] = str(use_safetensors)
config["scheduler"] = str(scheduler)
config["refiner"] = str(refiner)
# safety_checker_change(in_safety_checker.value, config)
# requires_safety_checker_change(in_requires_safety_checker.value, config)
return model_description, refiner, use_safetensors, scheduler, config, str(config), assemble_code(config)
def data_type_change(data_type, config):
config = set_config(config, 'data_type', data_type)
return config, str(config), assemble_code(config)
def tensorfloat32_change(allow_tensorfloat32, config):
config = set_config(config, 'allow_tensorfloat32', allow_tensorfloat32)
return config, str(config), assemble_code(config)
def inference_steps_change(inference_steps, config):
config = set_config(config, 'inference_steps', inference_steps)
return config, str(config), assemble_code(config)
def manual_seed_change(manual_seed, config):
config = set_config(config, 'manual_seed', manual_seed)
return config, str(config), assemble_code(config)
def guidance_scale_change(guidance_scale, config):
config = set_config(config, 'guidance_scale', guidance_scale)
return config, str(config), assemble_code(config)
def prompt_change(prompt, config):
config = set_config(config, 'prompt', prompt)
return config, str(config), assemble_code(config)
def negative_prompt_change(negative_prompt, config):
config = set_config(config, 'negative_prompt', negative_prompt)
return config, str(config), assemble_code(config)
def variant_change(variant, config):
config = set_config(config, 'variant', variant)
return config, str(config), assemble_code(config)
def safety_checker_change(safety_checker, config):
config = set_config(config, 'safety_checker', safety_checker)
return config, str(config), assemble_code(config)
def requires_safety_checker_change(requires_safety_checker, config):
config = set_config(config, 'requires_safety_checker', requires_safety_checker)
return config, str(config), assemble_code(config)
def schedulers_change(scheduler, config):
if str(scheduler) != 'None' and type(scheduler) != list:
scheduler_description = schedulers[scheduler]
else:
scheduler_description = 'Please select a scheduler.'
config = set_config(config, 'scheduler', scheduler)
return scheduler_description, config, str(config), assemble_code(config)
def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True)):
# str_config = str_config.replace("'", '"').replace('None', 'null').replace('False', 'false')
# config = json.loads(str_config)
if str(config["model"]) != 'None' and str(config["scheduler"]) != 'None':
progress((1,3), desc="Preparing pipeline initialization...")
torch.backends.cuda.matmul.allow_tf32 = get_bool(config["allow_tensorfloat32"]) # Use TensorFloat-32 as of https://huggingface.co/docs/diffusers/main/en/optimization/fp16 faster, but slightly less accurate computations
progress((2,3), desc="Initializing pipeline...")
pipeline = DiffusionPipeline.from_pretrained(
config["model"],
use_safetensors = get_bool(config["use_safetensors"]),
torch_dtype = get_data_type(config["data_type"]),
variant = get_variant(config["variant"])).to(config["device"])
if config['refiner'] != '':
refiner = DiffusionPipeline.from_pretrained(
config['refiner'],
text_encoder_2=pipeline.text_encoder_2,
vae=pipeline.vae,
torch_dtype=get_data_type(config["data_type"]),
use_safetensors=get_bool(config["use_safetensors"]),
variant = get_variant(config["variant"])).to(config["device"])
if str(config["safety_checker"]).lower() == 'false':
pipeline.safety_checker = None
pipeline.requires_safety_checker = get_bool(config["requires_safety_checker"])
pipeline.scheduler = get_scheduler(config["scheduler"], pipeline.scheduler.config)
if config["manual_seed"] < 0 or config["manual_seed"] is None or config["manual_seed"] == '':
generator = torch.Generator(config["device"])
else:
generator = torch.manual_seed(int(config["manual_seed"]))
progress((3,3), desc="Creating the result...")
image = pipeline(
prompt = config["prompt"],
negative_prompt = config["negative_prompt"],
generator = generator,
num_inference_steps = int(config["inference_steps"]),
guidance_scale = float(config["guidance_scale"])).images
if config['refiner'] != '':
image = refiner(
prompt = config["prompt"],
num_inference_steps = int(config["inference_steps"]),
image=image,
).images
config_history.append(config.copy())
return image[0], dict_list_to_markdown_table(config_history), config_history
else:
return "Please select a model AND a scheduler.", None, config_history
appConfig = load_app_config()
models = appConfig.get("models", {})
schedulers = appConfig.get("schedulers", {})
devices = appConfig.get("devices", [])
# interface
with gr.Blocks(analytics_enabled=False) as demo:
config = gr.State(value=get_initial_config())
config_history = gr.State(value=[])
gr.Markdown('''## Text-2-Image Playground
<small>by <a target="_blank" href="https://www.linkedin.com/in/nickyreinert/">Nicky Reinert</a> |
home base: https://huggingface.co/spaces/n42/pictero
</small>''')
gr.Markdown("### Device specific settings")
with gr.Row():
in_devices = gr.Dropdown(label="Device:", value=config.value["device"], choices=devices, filterable=True, multiselect=False, allow_custom_value=True)
in_data_type = gr.Radio(label="Data Type:", value=config.value["data_type"], choices=["bfloat16", "float16", "float32"], info="`bfloat16` is not supported on MPS devices right now; `float16` may also not be supported on all devices, Half-precision weights, will save GPU memory, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16")
in_allow_tensorfloat32 = gr.Radio(label="Allow TensorFloat32:", value=config.value["allow_tensorfloat32"], choices=["True", "False"], info="is not supported on MPS devices right now; use TensorFloat-32 is faster, but results in slightly less accurate computations, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
in_variant = gr.Radio(label="Variant:", value=config.value["variant"], choices=["fp16", None], info="Use half-precision weights will save GPU memory, not all models support that, see https://huggingface.co/docs/diffusers/main/en/optimization/fp16 ")
gr.Markdown("### Model specific settings")
with gr.Row():
in_models = gr.Dropdown(choices=list(models.keys()), label="Model")
out_model_description = gr.Textbox(value="", label="Description")
with gr.Row():
with gr.Column(scale=1):
in_use_safetensors = gr.Radio(label="Use safe tensors:", choices=["True", "False"], interactive=False)
in_model_refiner = gr.Textbox(value="", label="Refiner")
with gr.Column(scale=1):
in_safety_checker = gr.Radio(label="Enable safety checker:", value=config.value["safety_checker"], choices=["True", "False"])
in_requires_safety_checker = gr.Radio(label="Requires safety checker:", value=config.value["requires_safety_checker"], choices=["True", "False"])
gr.Markdown("### Scheduler")
with gr.Row():
in_schedulers = gr.Dropdown(choices=list(schedulers.keys()), label="Scheduler", info="see https://huggingface.co/docs/diffusers/using-diffusers/loading#schedulers" )
out_scheduler_description = gr.Textbox(value="", label="Description")
gr.Markdown("### Adapters")
with gr.Row():
gr.Markdown('Choose an adapter.')
gr.Markdown("### Inference settings")
with gr.Row():
in_prompt = gr.TextArea(label="Prompt", value=config.value["prompt"])
in_negative_prompt = gr.TextArea(label="Negative prompt", value=config.value["negative_prompt"])
with gr.Row():
in_inference_steps = gr.Number(label="Inference steps", value=config.value["inference_steps"])
in_manual_seed = gr.Number(label="Manual seed", value=config.value["manual_seed"], info="Set this to -1 or leave it empty to randomly generate an image. A fixed value will result in a similar image for every run")
in_guidance_scale = gr.Slider(minimum=0, maximum=1, step=0.01, label="Guidance Scale", value=config.value["guidance_scale"], info="A low guidance scale leads to a faster inference time, with the drawback that negative prompts don’t have any effect on the denoising process.")
gr.Markdown("### Output")
with gr.Row():
btn_start_pipeline = gr.Button(value="Run inferencing")
with gr.Row():
# out_result = gr.Textbox(label="Status", value="")
out_image = gr.Image()
out_code = gr.Code(assemble_code(config.value), label="Code")
with gr.Row():
out_config = gr.Code(value=str(config.value), label="Current config")
with gr.Row():
out_config_history = gr.Markdown(dict_list_to_markdown_table(config_history.value))
in_devices.change(fn=device_change, inputs=[in_devices, config], outputs=[config, out_config, out_code])
in_data_type.change(data_type_change, inputs=[in_data_type, config], outputs=[config, out_config, out_code])
in_allow_tensorfloat32.change(tensorfloat32_change, inputs=[in_allow_tensorfloat32, config], outputs=[config, out_config, out_code])
in_variant.change(variant_change, inputs=[in_variant, config], outputs=[config, out_config, out_code])
in_models.change(models_change, inputs=[in_models, in_schedulers, config], outputs=[out_model_description, in_model_refiner, in_use_safetensors, in_schedulers, config, out_config, out_code])
in_safety_checker.change(safety_checker_change, inputs=[in_safety_checker, config], outputs=[config, out_config, out_code])
in_requires_safety_checker.change(requires_safety_checker_change, inputs=[in_requires_safety_checker, config], outputs=[config, out_config, out_code])
in_schedulers.change(schedulers_change, inputs=[in_schedulers, config], outputs=[out_scheduler_description, config, out_config, out_code])
in_inference_steps.change(inference_steps_change, inputs=[in_inference_steps, config], outputs=[config, out_config, out_code])
in_manual_seed.change(manual_seed_change, inputs=[in_manual_seed, config], outputs=[config, out_config, out_code])
in_guidance_scale.change(guidance_scale_change, inputs=[in_guidance_scale, config], outputs=[config, out_config, out_code])
in_prompt.change(prompt_change, inputs=[in_prompt, config], outputs=[config, out_config, out_code])
in_negative_prompt.change(negative_prompt_change, inputs=[in_negative_prompt, config], outputs=[config, out_config, out_code])
btn_start_pipeline.click(run_inference, inputs=[config, config_history], outputs=[out_image, out_config_history, config_history])
# send current respect initial config to init_config to populate parameters to all relevant input fields
# if GET parameter is set, it will overwrite initial config parameters
demo.load(fn=get_config_from_url,
inputs=[config],
outputs=[
in_models,
in_devices,
in_use_safetensors,
in_data_type,
in_model_refiner,
in_variant,
in_safety_checker,
in_requires_safety_checker,
in_schedulers,
in_prompt,
in_negative_prompt,
in_inference_steps,
in_manual_seed,
in_guidance_scale
])
demo.launch()