nickyreinert-vml commited on
Commit
29cefb0
·
1 Parent(s): f7b4ac7

bug fix: when AutoEncoder is not set, process stops

Browse files
Files changed (3) hide show
  1. app.py +2 -3
  2. appConfig.json +0 -1
  3. config.py +1 -1
app.py CHANGED
@@ -274,7 +274,6 @@ def run_inference(config, config_history, pipeline, progress=gr.Progress(track_t
274
 
275
  if str(config["model"]) != 'None' and str(config["model"]) != 'null' and str(config["model"]) != '' and str(config["scheduler"]) != 'None':
276
 
277
- print(config["model"])
278
  progress(1, desc="Initializing pipeline...")
279
 
280
  torch.cuda.empty_cache()
@@ -297,7 +296,7 @@ def run_inference(config, config_history, pipeline, progress=gr.Progress(track_t
297
  if str(config["attention_slicing"]).lower() == 'true': pipeline.enable_attention_slicing()
298
 
299
  # AUTO ENCODER
300
- if str(config["auto_encoder"]).lower() != 'none' and str(config["auto_encoder"]).lower() != 'null':
301
  pipeline.vae = AutoencoderKL.from_pretrained(config["auto_encoder"], torch_dtype=get_data_type(config["data_type"])).to(config["device"])
302
 
303
  if str(config["enable_vae_slicing"]).lower() != 'false': pipeline.enable_vae_slicing()
@@ -472,7 +471,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
472
  gr.Markdown("**VAE** stands for Variational Auto Encoders. An 'autoencoder' is an artificial neural network that is able to encode input data and decode to output data to bascially recreate the input. The VAE whereas adds a couple of additional layers of complexity to create new and unique output.")
473
  with gr.Row():
474
  with gr.Column():
475
- in_auto_encoders = gr.Dropdown(value="None", choices=list(auto_encoders.keys()), label="Auto encoder", info="leave empty to not add an auto encoder")
476
  out_auto_encoder_description = gr.Textbox(value="", label="Description")
477
  with gr.Column():
478
  in_enable_vae_slicing = gr.Radio(label="Enable VAE slicing:", value=config.value["enable_vae_slicing"], choices=["True", "False"], info="decoding the batches of latents one image at a time, which may reduce memory usage, see https://huggingface.co/docs/diffusers/main/en/optimization/memory")
 
274
 
275
  if str(config["model"]) != 'None' and str(config["model"]) != 'null' and str(config["model"]) != '' and str(config["scheduler"]) != 'None':
276
 
 
277
  progress(1, desc="Initializing pipeline...")
278
 
279
  torch.cuda.empty_cache()
 
296
  if str(config["attention_slicing"]).lower() == 'true': pipeline.enable_attention_slicing()
297
 
298
  # AUTO ENCODER
299
+ if str(config["auto_encoder"]).lower() != 'none' and str(config["auto_encoder"]).lower() != 'null' and str(config["auto_encoder"]).lower() != '':
300
  pipeline.vae = AutoencoderKL.from_pretrained(config["auto_encoder"], torch_dtype=get_data_type(config["data_type"])).to(config["device"])
301
 
302
  if str(config["enable_vae_slicing"]).lower() != 'false': pipeline.enable_vae_slicing()
 
471
  gr.Markdown("**VAE** stands for Variational Auto Encoders. An 'autoencoder' is an artificial neural network that is able to encode input data and decode to output data to bascially recreate the input. The VAE whereas adds a couple of additional layers of complexity to create new and unique output.")
472
  with gr.Row():
473
  with gr.Column():
474
+ in_auto_encoders = gr.Dropdown(value="", choices=list(auto_encoders.keys()), label="Auto encoder", info="leave empty to not add an auto encoder")
475
  out_auto_encoder_description = gr.Textbox(value="", label="Description")
476
  with gr.Column():
477
  in_enable_vae_slicing = gr.Radio(label="Enable VAE slicing:", value=config.value["enable_vae_slicing"], choices=["True", "False"], info="decoding the batches of latents one image at a time, which may reduce memory usage, see https://huggingface.co/docs/diffusers/main/en/optimization/memory")
appConfig.json CHANGED
@@ -65,7 +65,6 @@
65
  ],
66
  "refiners": ["stabilityai/stable-diffusion-xl-refiner-1.0"],
67
  "auto_encoders": {
68
- "None": "",
69
  "stabilityai/sdxl-vae": "finetuned auto encoder for stable diffusion models, see https://huggingface.co/stabilityai/sdxl-vae",
70
  "madebyollin/sdxl-vae-fp16-fix": "stable diffusion models encoder with fp16 precision, see https://huggingface.co/madebyollin/sdxl-vae-fp16-fix",
71
  "stabilityai/sd-vae-ft-mse": "works best with CompVis/stable-diffusion-v1-4, see https://huggingface.co/stabilityai/sd-vae-ft-mse"
 
65
  ],
66
  "refiners": ["stabilityai/stable-diffusion-xl-refiner-1.0"],
67
  "auto_encoders": {
 
68
  "stabilityai/sdxl-vae": "finetuned auto encoder for stable diffusion models, see https://huggingface.co/stabilityai/sdxl-vae",
69
  "madebyollin/sdxl-vae-fp16-fix": "stable diffusion models encoder with fp16 precision, see https://huggingface.co/madebyollin/sdxl-vae-fp16-fix",
70
  "stabilityai/sd-vae-ft-mse": "works best with CompVis/stable-diffusion-v1-4, see https://huggingface.co/stabilityai/sd-vae-ft-mse"
config.py CHANGED
@@ -179,7 +179,7 @@ def assemble_code(str_config):
179
  if str(config["cpu_offload"]).lower() != 'false': code.append("pipeline.enable_model_cpu_offload()")
180
 
181
  # AUTO ENCODER
182
- if str(config["auto_encoder"]).lower() != 'none':
183
  code.append(f'pipeline.vae = AutoencoderKL.from_pretrained("{config["auto_encoder"]}", torch_dtype=data_type).to(device)')
184
 
185
  if str(config["enable_vae_slicing"]).lower() != 'false': code.append("pipeline.enable_vae_slicing()")
 
179
  if str(config["cpu_offload"]).lower() != 'false': code.append("pipeline.enable_model_cpu_offload()")
180
 
181
  # AUTO ENCODER
182
+ if str(config["auto_encoder"]).lower() != 'none' and str(config["auto_encoder"]).lower() != 'null' and str(config["auto_encoder"]).lower() != '':
183
  code.append(f'pipeline.vae = AutoencoderKL.from_pretrained("{config["auto_encoder"]}", torch_dtype=data_type).to(device)')
184
 
185
  if str(config["enable_vae_slicing"]).lower() != 'false': code.append("pipeline.enable_vae_slicing()")