File size: 5,176 Bytes
7417cba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
{
"concepts": [
{
"instance_prompt": "wral",
"class_prompt": "",
"instance_data_dir": "E:/Classroom/wral",
"class_data_dir": "",
"flip_p": "",
"do_not_balance": 0,
"use_sub_dirs": 0
},
{
"instance_prompt": "reem",
"class_prompt": "",
"instance_data_dir": "E:/Classroom/reem",
"class_data_dir": "",
"flip_p": "",
"do_not_balance": 0,
"use_sub_dirs": 0
},
{
"instance_prompt": "peth",
"class_prompt": "",
"instance_data_dir": "E:/Classroom/peth",
"class_data_dir": "",
"flip_p": "",
"do_not_balance": 0,
"use_sub_dirs": 0
},
{
"instance_prompt": "inem",
"class_prompt": "",
"instance_data_dir": "E:/Classroom/inem",
"class_data_dir": "",
"flip_p": "",
"do_not_balance": 0,
"use_sub_dirs": 0
},
{
"instance_prompt": "gera",
"class_prompt": "",
"instance_data_dir": "E:/Classroom/gera",
"class_data_dir": "",
"flip_p": "",
"do_not_balance": 0,
"use_sub_dirs": 0
},
{
"instance_prompt": "yoh",
"class_prompt": "",
"instance_data_dir": "E:/Classroom/yoh",
"class_data_dir": "",
"flip_p": "",
"do_not_balance": 0,
"use_sub_dirs": 0
},
{
"instance_prompt": "emit",
"class_prompt": "",
"instance_data_dir": "E:/Classroom/emit",
"class_data_dir": "",
"flip_p": "",
"do_not_balance": 0,
"use_sub_dirs": 0
},
{
"instance_prompt": "enei",
"class_prompt": "",
"instance_data_dir": "E:/Classroom/enei",
"class_data_dir": "",
"flip_p": "",
"do_not_balance": 0,
"use_sub_dirs": 0
},
{
"instance_prompt": "ucn",
"class_prompt": "",
"instance_data_dir": "E:/Classroom/ucn",
"class_data_dir": "",
"flip_p": "",
"do_not_balance": 0,
"use_sub_dirs": 0
}
],
"sample_prompts": [
"gera, wral, 1girl, solo, breasts, cleavage, ponytail, formal, suit, long hair, brown eyes, hair ornament, large breasts, brown hair, looking at viewer, upper body",
"inem, wral, 1girl, solo, bow, jacket, shirt, indoors, bowtie, parody, bangs, blazer, long hair, blonde hair, purple eyes, school uniform, white shirt, red jacket, looking at viewer, upper body, blue bow, closed mouth, collared shirt, hair between eyes",
"peth, wral, 1girl, solo, braid, ribbon, bow, classroom, indoors, jacket, desk, bowtie, shirt, bangs, long hair, hair ribbon, white ribbon, looking at viewer, school uniform, brown hair, closed mouth, school desk, white shirt, blue bow",
"reem, wral, 1girl, solo, smile, jacket, bow, bowtie, blazer, school uniform, short hair, brown hair, looking at viewer, blue bow, red jacket",
"yoh, wral, 1boy, solo, jacket, shirt, male focus, brown hair, looking to the side, school uniform, red jacket, brown eyes, yellow eyes, grey background"
],
"add_controlled_seed_to_sample": [
"66"
],
"model_path": "E:\\ST\\models\\model",
"vae_path": "",
"output_path": "E:/MORE COOK",
"send_telegram_updates": 0,
"telegram_token": "",
"telegram_chat_id": "",
"resolution": "512",
"batch_size": "8",
"train_epocs": "200",
"mixed_precision": "fp16",
"use_8bit_adam": 1,
"use_gradient_checkpointing": 0,
"accumulation_steps": "1",
"learning_rate": "3e-6",
"warmup_steps": "0",
"learning_rate_scheduler": "constant",
"regenerate_latent_cache": 0,
"train_text_encoder": 1,
"with_prior_loss_preservation": 0,
"prior_loss_preservation_weight": "1.0",
"use_image_names_as_captions": 0,
"auto_balance_concept_datasets": 0,
"add_class_images_to_dataset": 0,
"number_of_class_images": "0",
"save_every_n_epochs": "20",
"number_of_samples_to_generate": "2",
"sample_height": "512",
"sample_width": "512",
"sample_random_aspect_ratio": 0,
"sample_on_training_start": 1,
"aspect_ratio_bucketing": 1,
"seed": "3434559",
"dataset_repeats": "1",
"limit_text_encoder_training": "15%",
"use_text_files_as_captions": 1,
"ckpt_version": null,
"convert_to_ckpt_after_training": 1,
"execute_post_conversion": 1,
"disable_cudnn_benchmark": 1,
"sample_step_interval": "900",
"conditional_dropout": "4%",
"clip_penultimate": 1,
"use_ema": 0,
"aspect_ratio_bucketing_mode": "Duplicate Fill",
"dynamic_bucketing_mode": "Duplicate",
"model_variant": "Regular",
"fallback_mask_prompt": "",
"attention": "xformers",
"batch_prompt_sampling": 0,
"shuffle_dataset_per_epoch": 0
} |