center_crop variable add to everywhere

#3
by dkebudi - opened
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -246,13 +246,13 @@ def start_training(
246
  commands = [
247
  "pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0",
248
  "pretrained_vae_model_name_or_path=madebyollin/sdxl-vae-fp16-fix",
249
- f"center_crop={bool(True)}",
250
  f"instance_prompt={concept_sentence}",
251
  f"dataset_name=./{dataset_folder}",
252
  "caption_column=prompt",
253
  f"output_dir={slugged_lora_name}",
254
  f"mixed_precision={mixed_precision}",
255
  f"resolution={int(resolution)}",
 
256
  f"train_batch_size={int(train_batch_size)}",
257
  f"repeats={int(repeats)}",
258
  f"gradient_accumulation_steps={int(gradient_accumulation_steps)}",
@@ -393,6 +393,7 @@ def start_training_og(
393
  text_encoder_learning_rate,
394
  seed,
395
  resolution,
 
396
  num_train_epochs,
397
  checkpointing_steps,
398
  prior_loss_weight,
@@ -434,6 +435,7 @@ def start_training_og(
434
  f"--output_dir={slugged_lora_name}",
435
  f"--mixed_precision={mixed_precision}",
436
  f"--resolution={int(resolution)}",
 
437
  f"--train_batch_size={int(train_batch_size)}",
438
  f"--repeats={int(repeats)}",
439
  f"--gradient_accumulation_steps={int(gradient_accumulation_steps)}",
@@ -1034,6 +1036,7 @@ If you prefer command line, you can run our [training script]({training_script_u
1034
  text_encoder_learning_rate,
1035
  seed,
1036
  resolution,
 
1037
  num_train_epochs,
1038
  checkpointing_steps,
1039
  prior_loss_weight,
 
246
  commands = [
247
  "pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0",
248
  "pretrained_vae_model_name_or_path=madebyollin/sdxl-vae-fp16-fix",
 
249
  f"instance_prompt={concept_sentence}",
250
  f"dataset_name=./{dataset_folder}",
251
  "caption_column=prompt",
252
  f"output_dir={slugged_lora_name}",
253
  f"mixed_precision={mixed_precision}",
254
  f"resolution={int(resolution)}",
255
+ f"center_crop={bool(True)}",
256
  f"train_batch_size={int(train_batch_size)}",
257
  f"repeats={int(repeats)}",
258
  f"gradient_accumulation_steps={int(gradient_accumulation_steps)}",
 
393
  text_encoder_learning_rate,
394
  seed,
395
  resolution,
396
+ center_crop,
397
  num_train_epochs,
398
  checkpointing_steps,
399
  prior_loss_weight,
 
435
  f"--output_dir={slugged_lora_name}",
436
  f"--mixed_precision={mixed_precision}",
437
  f"--resolution={int(resolution)}",
438
+ f"--center_crop={bool(True)}",
439
  f"--train_batch_size={int(train_batch_size)}",
440
  f"--repeats={int(repeats)}",
441
  f"--gradient_accumulation_steps={int(gradient_accumulation_steps)}",
 
1036
  text_encoder_learning_rate,
1037
  seed,
1038
  resolution,
1039
+ center_crop,
1040
  num_train_epochs,
1041
  checkpointing_steps,
1042
  prior_loss_weight,