adding "null" to condition tests
Browse files
app.py
CHANGED
@@ -32,7 +32,7 @@ def models_change(model, scheduler, config):
|
|
32 |
trigger_token = ""
|
33 |
|
34 |
# no model selected (because this is UI init run)
|
35 |
-
if type(model) != list and str(model) != 'None':
|
36 |
|
37 |
use_safetensors = str(models[model]['use_safetensors'])
|
38 |
model_description = models[model]['description']
|
@@ -137,7 +137,7 @@ def requires_safety_checker_change(requires_safety_checker, config):
|
|
137 |
|
138 |
def auto_encoders_change(auto_encoder, config):
|
139 |
|
140 |
-
if str(auto_encoder) != 'None' and type(auto_encoder) != list:
|
141 |
|
142 |
auto_encoder_description = auto_encoders[auto_encoder]
|
143 |
|
@@ -150,7 +150,7 @@ def auto_encoders_change(auto_encoder, config):
|
|
150 |
|
151 |
def schedulers_change(scheduler, config):
|
152 |
|
153 |
-
if str(scheduler) != 'None' and type(scheduler) != list:
|
154 |
|
155 |
scheduler_description = schedulers[scheduler]
|
156 |
|
@@ -163,7 +163,7 @@ def schedulers_change(scheduler, config):
|
|
163 |
|
164 |
def adapters_textual_inversion_change(adapter_textual_inversion, config):
|
165 |
|
166 |
-
if str(adapter_textual_inversion) != 'None' and type(adapter_textual_inversion) != list:
|
167 |
|
168 |
adapter_textual_inversion_description = adapters['textual_inversion'][adapter_textual_inversion]['description']
|
169 |
in_adapters_textual_inversion_token = adapters['textual_inversion'][adapter_textual_inversion]['token']
|
@@ -187,7 +187,7 @@ def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True))
|
|
187 |
# str_config = str_config.replace("'", '"').replace('None', 'null').replace('False', 'false')
|
188 |
# config = json.loads(str_config)
|
189 |
|
190 |
-
if str(config["model"]) != 'None' and str(config["scheduler"]) != 'None':
|
191 |
|
192 |
progress((1,3), desc="Preparing pipeline initialization...")
|
193 |
|
@@ -206,14 +206,14 @@ def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True))
|
|
206 |
pipeline.enable_model_cpu_offload()
|
207 |
|
208 |
# AUTO ENCODER
|
209 |
-
if str(config["auto_encoder"]).lower() != 'none':
|
210 |
pipeline.vae = AutoencoderKL.from_pretrained(config["auto_encoder"], torch_dtype=get_data_type(config["data_type"])).to(config["device"])
|
211 |
|
212 |
if str(config["enable_vae_slicing"]).lower() != 'false': pipeline.enable_vae_slicing()
|
213 |
if str(config["enable_vae_tiling"]).lower() != 'false': pipeline.enable_vae_tiling()
|
214 |
|
215 |
# INIT REFINER
|
216 |
-
if config['refiner'].lower() != 'none':
|
217 |
refiner = DiffusionPipeline.from_pretrained(
|
218 |
config['refiner'],
|
219 |
text_encoder_2=pipeline.text_encoder_2,
|
@@ -243,7 +243,7 @@ def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True))
|
|
243 |
|
244 |
# ADAPTERS
|
245 |
# TEXTUAL INVERSION
|
246 |
-
if str(config["adapter_textual_inversion"]).lower() != 'none':
|
247 |
pipeline.load_textual_inversion(config["adapter_textual_inversion"], token=config["adapter_textual_inversion_token"])
|
248 |
|
249 |
progress((3,3), desc="Creating the result...")
|
@@ -257,7 +257,7 @@ def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True))
|
|
257 |
num_inference_steps = int(config["inference_steps"]),
|
258 |
guidance_scale = float(config["guidance_scale"])).images
|
259 |
|
260 |
-
if config['refiner'].lower() != 'none':
|
261 |
image = refiner(
|
262 |
prompt = prompt,
|
263 |
num_inference_steps = int(config["inference_steps"]),
|
|
|
32 |
trigger_token = ""
|
33 |
|
34 |
# no model selected (because this is UI init run)
|
35 |
+
if type(model) != list and str(model) != 'None' and str(model) != 'null':
|
36 |
|
37 |
use_safetensors = str(models[model]['use_safetensors'])
|
38 |
model_description = models[model]['description']
|
|
|
137 |
|
138 |
def auto_encoders_change(auto_encoder, config):
|
139 |
|
140 |
+
if str(auto_encoder) != 'None' and str(auto_encoder) != 'null' and type(auto_encoder) != list:
|
141 |
|
142 |
auto_encoder_description = auto_encoders[auto_encoder]
|
143 |
|
|
|
150 |
|
151 |
def schedulers_change(scheduler, config):
|
152 |
|
153 |
+
if str(scheduler) != 'None' and str(scheduler) != 'null' and type(scheduler) != list:
|
154 |
|
155 |
scheduler_description = schedulers[scheduler]
|
156 |
|
|
|
163 |
|
164 |
def adapters_textual_inversion_change(adapter_textual_inversion, config):
|
165 |
|
166 |
+
if str(adapter_textual_inversion) != 'None' and str(adapter_textual_inversion) != 'null' and type(adapter_textual_inversion) != list:
|
167 |
|
168 |
adapter_textual_inversion_description = adapters['textual_inversion'][adapter_textual_inversion]['description']
|
169 |
in_adapters_textual_inversion_token = adapters['textual_inversion'][adapter_textual_inversion]['token']
|
|
|
187 |
# str_config = str_config.replace("'", '"').replace('None', 'null').replace('False', 'false')
|
188 |
# config = json.loads(str_config)
|
189 |
|
190 |
+
if str(config["model"]) != 'None' and str(config["model"]) != 'null' and str(config["scheduler"]) != 'None':
|
191 |
|
192 |
progress((1,3), desc="Preparing pipeline initialization...")
|
193 |
|
|
|
206 |
pipeline.enable_model_cpu_offload()
|
207 |
|
208 |
# AUTO ENCODER
|
209 |
+
if str(config["auto_encoder"]).lower() != 'none' and str(config["auto_encoder"]).lower() != 'null':
|
210 |
pipeline.vae = AutoencoderKL.from_pretrained(config["auto_encoder"], torch_dtype=get_data_type(config["data_type"])).to(config["device"])
|
211 |
|
212 |
if str(config["enable_vae_slicing"]).lower() != 'false': pipeline.enable_vae_slicing()
|
213 |
if str(config["enable_vae_tiling"]).lower() != 'false': pipeline.enable_vae_tiling()
|
214 |
|
215 |
# INIT REFINER
|
216 |
+
if str(config['refiner']).lower() != 'none' and str(config['refiner']).lower() != 'null':
|
217 |
refiner = DiffusionPipeline.from_pretrained(
|
218 |
config['refiner'],
|
219 |
text_encoder_2=pipeline.text_encoder_2,
|
|
|
243 |
|
244 |
# ADAPTERS
|
245 |
# TEXTUAL INVERSION
|
246 |
+
if str(config["adapter_textual_inversion"]).lower() != 'none' and str(config["adapter_textual_inversion"]).lower() != 'null':
|
247 |
pipeline.load_textual_inversion(config["adapter_textual_inversion"], token=config["adapter_textual_inversion_token"])
|
248 |
|
249 |
progress((3,3), desc="Creating the result...")
|
|
|
257 |
num_inference_steps = int(config["inference_steps"]),
|
258 |
guidance_scale = float(config["guidance_scale"])).images
|
259 |
|
260 |
+
if config['refiner'].lower() != 'none' and config['refiner'].lower() != 'null':
|
261 |
image = refiner(
|
262 |
prompt = prompt,
|
263 |
num_inference_steps = int(config["inference_steps"]),
|