world_size: 4 experiment_name: 'FFHQH_SEHARM_1024_FINAL' datasets: iharmony: "/home/jovyan/datasets/iHarmony4/" crop_size: 1024 # dataset_root: "/home/jovyan/datasets/iHarmony4/" ffhqh: "/home/jovyan/datasets/synthetic_ffhq/" dataset_root: "/home/jovyan/datasets/synthetic_ffhq/" ffhq: "/home/jovyan/datasets/ffhq/images1024x1024/" isTrain: True task_for_oneformer: "matting" datasets_to_use_test: ["FFHQH"] #"IhdDataset", "HCOCO", "HFlickr", "Hday2night", "HAdobe5k", "FFHQH"] dataset_to_use: "FFHQH" pretrained_model: 'microsoft/swin-base-patch4-window7-224' #"microsoft/swinv2-tiny-patch4-window8-256" #"nielsr/mask2former-swin-base-youtubevis-2021" #"nvidia/mit-b2" batch_size: 4 num_workers: 4 log_dir: "log" checkpoint_dir: "checkpoints" load_pretrained: True # checkpoint: "/home/jovyan/efremyan/harmonization/best_50.pth" # checkpoint: "/home/jovyan/efremyan/harmonization/checkpoints/01_August_13_38/epoch-13.pth" checkpoint: "/home/jovyan/efremyan/harmonization/checkpoints/03_August_23_36/epoch-42.pth" distributed_addr: "localhost" # distributed_port: "12356" # image_size: 512 lr: 1e-5 epochs: 300 disable_validation: False disable_mixed_precision: False log_image_interval: 10 log_image_number: 4 save_model_interval: 1 lambda_losses: default: 1. Color: 0. PSNR: 1. LPIPS: 0.5 L2: 1. L1: 0. softmax: 1e-4 model: grid_counts: [10, 5, 1] enc_sizes: [3, 16, 32, 64, 128, 256, 512] init_weights: [0.5, 0.5] # weights of local grided image and global harmonized init_value: 0.8 skips: True