abc commited on
Commit
bd600ff
·
1 Parent(s): c797d8e

Delete append_module.py

Browse files
Files changed (1) hide show
  1. append_module.py +0 -504
append_module.py DELETED
@@ -1,504 +0,0 @@
1
- import argparse
2
- import json
3
- import shutil
4
- import time
5
- from typing import Dict, List, NamedTuple, Tuple
6
- from accelerate import Accelerator
7
- from torch.autograd.function import Function
8
- import glob
9
- import math
10
- import os
11
- import random
12
- import hashlib
13
- from io import BytesIO
14
-
15
- from tqdm import tqdm
16
- import torch
17
- from torchvision import transforms
18
- from transformers import CLIPTokenizer
19
- import diffusers
20
- from diffusers import DDPMScheduler, StableDiffusionPipeline
21
- import albumentations as albu
22
- import numpy as np
23
- from PIL import Image
24
- import cv2
25
- from einops import rearrange
26
- from torch import einsum
27
- import safetensors.torch
28
-
29
- import library.model_util as model_util
30
- import library.train_util as train_util
31
-
32
- #============================================================================================================
33
- #AdafactorScheduleに暫定的にinitial_lrを層別に適用できるようにしたもの
34
- #============================================================================================================
35
- from torch.optim.lr_scheduler import LambdaLR
36
- class AdafactorSchedule_append(LambdaLR):
37
- """
38
- Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g.,
39
- for logging), this class creates a proxy object that retrieves the current lr values from the optimizer.
40
-
41
- It returns `initial_lr` during startup and the actual `lr` during stepping.
42
- """
43
-
44
- def __init__(self, optimizer, initial_lr=0.0):
45
- def lr_lambda(_):
46
- return initial_lr
47
-
48
- for group in optimizer.param_groups:
49
- if not type(initial_lr)==list:
50
- group["initial_lr"] = initial_lr
51
- else:
52
- group["initial_lr"] = initial_lr.pop(0)
53
- super().__init__(optimizer, lr_lambda)
54
- for group in optimizer.param_groups:
55
- del group["initial_lr"]
56
-
57
- def get_lr(self):
58
- opt = self.optimizer
59
- lrs = [
60
- opt._get_lr(group, opt.state[group["params"][0]])
61
- for group in opt.param_groups
62
- if group["params"][0].grad is not None
63
- ]
64
- if len(lrs) == 0:
65
- lrs = self.base_lrs # if called before stepping
66
- return lrs
67
-
68
- #============================================================================================================
69
- #model_util 内より
70
- #============================================================================================================
71
- def make_bucket_resolutions_fix(max_reso, min_reso, min_size=256, max_size=1024, divisible=64, step=1):
72
- max_width, max_height = max_reso
73
- max_area = (max_width // divisible) * (max_height // divisible)
74
-
75
- min_widht, min_height = min_reso
76
- min_area = (min_widht // divisible) * (min_height // divisible)
77
-
78
- area_size_list = []
79
- area_size_resos_list = []
80
- _max_area = max_area
81
-
82
- while True:
83
- resos = set()
84
- size = int(math.sqrt(_max_area)) * divisible
85
- resos.add((size, size))
86
-
87
- size = min_size
88
- while size <= max_size:
89
- width = size
90
- height = min(max_size, (_max_area // (width // divisible)) * divisible)
91
- resos.add((width, height))
92
- resos.add((height, width))
93
-
94
- # # make additional resos
95
- # if width >= height and width - divisible >= min_size:
96
- # resos.add((width - divisible, height))
97
- # resos.add((height, width - divisible))
98
- # if height >= width and height - divisible >= min_size:
99
- # resos.add((width, height - divisible))
100
- # resos.add((height - divisible, width))
101
-
102
- size += divisible
103
-
104
- resos = list(resos)
105
- resos.sort()
106
-
107
- #aspect_ratios = [w / h for w, h in resos]
108
- area_size_list.append(_max_area)
109
- area_size_resos_list.append(resos)
110
- #area_size_ratio_list.append(aspect_ratios)
111
-
112
- _max_area -= step
113
- if _max_area < min_area:
114
- break
115
- return area_size_resos_list, area_size_list
116
-
117
- #============================================================================================================
118
- #train_util 内より
119
- #============================================================================================================
120
- class BucketManager_append(train_util.BucketManager):
121
- def __init__(self, no_upscale, max_reso, min_size, max_size, reso_steps, min_reso=None, area_step=None) -> None:
122
- super().__init__(no_upscale, max_reso, min_size, max_size, reso_steps)
123
- print("BucketManager_appendを作成しました")
124
- if min_reso is None:
125
- self.min_reso = None
126
- self.min_area = None
127
- else:
128
- self.min_reso = min_reso
129
- self.min_area = min_reso[0] * min_reso[1]
130
- self.area_step = area_step
131
- self.area_sizes_flag = False
132
-
133
- def make_buckets(self):
134
- if self.min_reso:
135
- print(f"make_resolution append")
136
- resos, area_sizes = make_bucket_resolutions_fix(self.max_reso, self.min_reso, self.min_size, self.max_size, self.reso_steps, self.area_step)
137
- self.set_predefined_resos(resos, area_sizes)
138
- else:
139
- resos = model_util.make_bucket_resolutions(self.max_reso, self.min_size, self.max_size, self.reso_steps)
140
- self.set_predefined_resos(resos)
141
-
142
- def set_predefined_resos(self, resos, area_sizes=None):
143
- # 規定サイズから選ぶ場合の解像度、aspect ratioの情報を格納しておく
144
- if area_sizes:
145
- self.area_sizes_flag = True
146
- self.predefined_area_sizes = np.array(area_sizes.copy())
147
- self.predefined_resos_list = resos.copy()
148
- self.predefined_resos_set_list = [set(reso) for reso in resos]
149
- self.predefined_aspect_ratios_list = [np.array([w/h for w,h in reso]) for reso in resos]
150
- self.predefined_resos = None
151
- self.predefined_resos_set = None
152
- self.predefined_aspect_ratios = None
153
- else:
154
- self.area_sizes_flag = False
155
- self.predefined_area_sizes = None
156
- self.predefined_resos = resos.copy()
157
- self.predefined_resos_set = set(resos)
158
- self.predefined_aspect_ratios = np.array([w / h for w, h in resos])
159
-
160
- def select_bucket(self, image_width, image_height):
161
- # 画像サイズを算出する
162
- area_size = (image_width//64) * (image_height//64)
163
- aspect_ratio = image_width / image_height
164
- bucket_size_id = None
165
- # 拡張したバケットサイズを使うために画像サイズのエリアを決定する
166
- if self.area_sizes_flag:
167
- size_errors = self.predefined_area_sizes - area_size
168
- bucket_size_id = np.abs(size_errors).argmin()
169
- #一定の範囲を探索して使用する画像サイズを確定する
170
- serch_size_range = 1
171
- bucket_size_id_list = [bucket_size_id]
172
- for i in range(serch_size_range):
173
- if bucket_size_id - i <0:
174
- bucket_size_id_list.append(bucket_size_id + i + 1)
175
- elif bucket_size_id + 1 + i >= len(self.predefined_resos_list):
176
- bucket_size_id_list.append(bucket_size_id - i - 1)
177
- else:
178
- bucket_size_id_list.append(bucket_size_id - i - 1)
179
- bucket_size_id_list.append(bucket_size_id + i + 1)
180
- _min_error = 1000.
181
- _min_id = bucket_size_id
182
- for now_size_id in bucket_size_id:
183
- self.predefined_aspect_ratios = self.predefined_aspect_ratios_list[now_size_id]
184
- ar_errors = self.predefined_aspect_ratios - aspect_ratio
185
- ar_error = np.abs(ar_errors).min()
186
- if _min_error > ar_error:
187
- _min_error = ar_error
188
- _min_id = now_size_id
189
- if _min_error == 0.:
190
- break
191
- bucket_size_id = _min_id
192
- del _min_error, _min_id, ar_error #余計なものは掃除
193
- self.predefined_resos = self.predefined_resos_list[bucket_size_id]
194
- self.predefined_resos_set = self.predefined_resos_set_list[bucket_size_id]
195
- self.predefined_aspect_ratios = self.predefined_aspect_ratios_list[bucket_size_id]
196
- # --ここから処理はそのまま
197
- if not self.no_upscale:
198
- # 同じaspect ratioがあるかもしれないので(fine tuningで、no_upscale=Trueで前処理した場合)、解像度が同じものを優先する
199
- reso = (image_width, image_height)
200
- if reso in self.predefined_resos_set:
201
- pass
202
- else:
203
- ar_errors = self.predefined_aspect_ratios - aspect_ratio
204
- predefined_bucket_id = np.abs(ar_errors).argmin() # 当該解像度以外でaspect ratio errorが最も少ないもの
205
- reso = self.predefined_resos[predefined_bucket_id]
206
-
207
- ar_reso = reso[0] / reso[1]
208
- if aspect_ratio > ar_reso: # 横が長い→縦を合わせる
209
- scale = reso[1] / image_height
210
- else:
211
- scale = reso[0] / image_width
212
-
213
- resized_size = (int(image_width * scale + .5), int(image_height * scale + .5))
214
- # print("use predef", image_width, image_height, reso, resized_size)
215
- else:
216
- if image_width * image_height > self.max_area:
217
- # 画像が大きすぎるのでアスペクト比を保ったまま縮小することを前提にbucketを決める
218
- resized_width = math.sqrt(self.max_area * aspect_ratio)
219
- resized_height = self.max_area / resized_width
220
- assert abs(resized_width / resized_height - aspect_ratio) < 1e-2, "aspect is illegal"
221
-
222
- # リサイズ後の短辺または長辺をreso_steps単位にする:aspect ratioの差が少ないほうを選ぶ
223
- # 元のbucketingと同じロジック
224
- b_width_rounded = self.round_to_steps(resized_width)
225
- b_height_in_wr = self.round_to_steps(b_width_rounded / aspect_ratio)
226
- ar_width_rounded = b_width_rounded / b_height_in_wr
227
-
228
- b_height_rounded = self.round_to_steps(resized_height)
229
- b_width_in_hr = self.round_to_steps(b_height_rounded * aspect_ratio)
230
- ar_height_rounded = b_width_in_hr / b_height_rounded
231
-
232
- # print(b_width_rounded, b_height_in_wr, ar_width_rounded)
233
- # print(b_width_in_hr, b_height_rounded, ar_height_rounded)
234
-
235
- if abs(ar_width_rounded - aspect_ratio) < abs(ar_height_rounded - aspect_ratio):
236
- resized_size = (b_width_rounded, int(b_width_rounded / aspect_ratio + .5))
237
- else:
238
- resized_size = (int(b_height_rounded * aspect_ratio + .5), b_height_rounded)
239
- # print(resized_size)
240
- else:
241
- resized_size = (image_width, image_height) # リサイズは不要
242
-
243
- # 画像のサイズ未満をbucketのサイズとする(paddingせずにcroppingする)
244
- bucket_width = resized_size[0] - resized_size[0] % self.reso_steps
245
- bucket_height = resized_size[1] - resized_size[1] % self.reso_steps
246
- # print("use arbitrary", image_width, image_height, resized_size, bucket_width, bucket_height)
247
-
248
- reso = (bucket_width, bucket_height)
249
-
250
- self.add_if_new_reso(reso)
251
-
252
- ar_error = (reso[0] / reso[1]) - aspect_ratio
253
- return reso, resized_size, ar_error
254
-
255
- class DreamBoothDataset(train_util.DreamBoothDataset):
256
- def __init__(self, batch_size, train_data_dir, reg_data_dir, tokenizer, max_token_length, caption_extension, shuffle_caption, shuffle_keep_tokens, resolution, enable_bucket, min_bucket_reso, max_bucket_reso, bucket_reso_steps, bucket_no_upscale, prior_loss_weight, flip_aug, color_aug, face_crop_aug_range, random_crop, debug_dataset, min_resolution=None, area_step=None) -> None:
257
- print("use append DreamBoothDataset")
258
- self.min_resolution = min_resolution
259
- self.area_step = area_step
260
- super().__init__(batch_size, train_data_dir, reg_data_dir, tokenizer, max_token_length, caption_extension, shuffle_caption, shuffle_keep_tokens,
261
- resolution, enable_bucket, min_bucket_reso, max_bucket_reso, bucket_reso_steps, bucket_no_upscale, prior_loss_weight,
262
- flip_aug, color_aug, face_crop_aug_range, random_crop, debug_dataset)
263
- def make_buckets(self):
264
- '''
265
- bucketingを行わない場合も呼び出し必須(ひとつだけbucketを作る)
266
- min_size and max_size are ignored when enable_bucket is False
267
- '''
268
- print("loading image sizes.")
269
- for info in tqdm(self.image_data.values()):
270
- if info.image_size is None:
271
- info.image_size = self.get_image_size(info.absolute_path)
272
-
273
- if self.enable_bucket:
274
- print("make buckets")
275
- else:
276
- print("prepare dataset")
277
-
278
- # bucketを作成し、画像をbucketに振り分ける
279
- if self.enable_bucket:
280
- if self.bucket_manager is None: # fine tuningの場合でmetadataに定義がある場合は、すでに初期化済み
281
- #======================================================================change
282
- if self.min_resolution:
283
- self.bucket_manager = BucketManager_append(self.bucket_no_upscale, (self.width, self.height),
284
- self.min_bucket_reso, self.max_bucket_reso, self.bucket_reso_steps, self.min_resolution, self.area_step)
285
- else:
286
- self.bucket_manager = train_util.BucketManager(self.bucket_no_upscale, (self.width, self.height),
287
- self.min_bucket_reso, self.max_bucket_reso, self.bucket_reso_steps)
288
- #======================================================================change
289
- if not self.bucket_no_upscale:
290
- self.bucket_manager.make_buckets()
291
- else:
292
- print("min_bucket_reso and max_bucket_reso are ignored if bucket_no_upscale is set, because bucket reso is defined by image size automatically / bucket_no_upscaleが指定された場合は、bucketの解像度は画像サイズから自動計算されるため、min_bucket_resoとmax_bucket_resoは無視されます")
293
-
294
- img_ar_errors = []
295
- for image_info in self.image_data.values():
296
- image_width, image_height = image_info.image_size
297
- image_info.bucket_reso, image_info.resized_size, ar_error = self.bucket_manager.select_bucket(image_width, image_height)
298
-
299
- # print(image_info.image_key, image_info.bucket_reso)
300
- img_ar_errors.append(abs(ar_error))
301
-
302
- self.bucket_manager.sort()
303
- else:
304
- self.bucket_manager = train_util.BucketManager(False, (self.width, self.height), None, None, None)
305
- self.bucket_manager.set_predefined_resos([(self.width, self.height)]) # ひとつの固定サイズbucketのみ
306
- for image_info in self.image_data.values():
307
- image_width, image_height = image_info.image_size
308
- image_info.bucket_reso, image_info.resized_size, _ = self.bucket_manager.select_bucket(image_width, image_height)
309
-
310
- for image_info in self.image_data.values():
311
- for _ in range(image_info.num_repeats):
312
- self.bucket_manager.add_image(image_info.bucket_reso, image_info.image_key)
313
-
314
- # bucket情報を表示、格納する
315
- if self.enable_bucket:
316
- self.bucket_info = {"buckets": {}}
317
- print("number of images (including repeats) / 各bucketの画像枚数(繰り返し回数を含む)")
318
- for i, (reso, bucket) in enumerate(zip(self.bucket_manager.resos, self.bucket_manager.buckets)):
319
- count = len(bucket)
320
- if count > 0:
321
- self.bucket_info["buckets"][i] = {"resolution": reso, "count": len(bucket)}
322
- print(f"bucket {i}: resolution {reso}, count: {len(bucket)}")
323
-
324
- img_ar_errors = np.array(img_ar_errors)
325
- mean_img_ar_error = np.mean(np.abs(img_ar_errors))
326
- self.bucket_info["mean_img_ar_error"] = mean_img_ar_error
327
- print(f"mean ar error (without repeats): {mean_img_ar_error}")
328
-
329
- # データ参照用indexを作る。このindexはdatasetのshuffleに用いられる
330
- self.buckets_indices: List(train_util.BucketBatchIndex) = []
331
- for bucket_index, bucket in enumerate(self.bucket_manager.buckets):
332
- batch_count = int(math.ceil(len(bucket) / self.batch_size))
333
- for batch_index in range(batch_count):
334
- self.buckets_indices.append(train_util.BucketBatchIndex(bucket_index, self.batch_size, batch_index))
335
-
336
- # ↓以下はbucketごとのbatch件数があまりにも増えて混乱を招くので元に戻す
337
- #  学習時はステップ数がランダムなので、同一画像が同一batch内にあってもそれほど悪影響はないであろう、と考えられる
338
- #
339
- # # bucketが細分化されることにより、ひとつのbucketに一種類の画像のみというケースが増え、つまりそれは
340
- # # ひとつのbatchが同じ画像で占められることになるので、さすがに良くないであろう
341
- # # そのためバッチサイズを画像種類までに制限する
342
- # # ただそれでも同一画像が同一バッチに含まれる可能性はあるので、繰り返し回数が少ないほうがshuffleの品質は良くなることは間違いない?
343
- # # TO DO 正則化画像をepochまたがりで利用する仕組み
344
- # num_of_image_types = len(set(bucket))
345
- # bucket_batch_size = min(self.batch_size, num_of_image_types)
346
- # batch_count = int(math.ceil(len(bucket) / bucket_batch_size))
347
- # # print(bucket_index, num_of_image_types, bucket_batch_size, batch_count)
348
- # for batch_index in range(batch_count):
349
- # self.buckets_indices.append(BucketBatchIndex(bucket_index, bucket_batch_size, batch_index))
350
- # ↑ここまで
351
-
352
- self.shuffle_buckets()
353
- self._length = len(self.buckets_indices)
354
-
355
- class FineTuningDataset(train_util.FineTuningDataset):
356
- def __init__(self, json_file_name, batch_size, train_data_dir, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens, resolution, enable_bucket, min_bucket_reso, max_bucket_reso, bucket_reso_steps, bucket_no_upscale, flip_aug, color_aug, face_crop_aug_range, random_crop, dataset_repeats, debug_dataset) -> None:
357
- train_util.glob_images = glob_images
358
- super().__init__( json_file_name, batch_size, train_data_dir, tokenizer, max_token_length, shuffle_caption, shuffle_keep_tokens,
359
- resolution, enable_bucket, min_bucket_reso, max_bucket_reso, bucket_reso_steps, bucket_no_upscale, flip_aug, color_aug, face_crop_aug_range,
360
- random_crop, dataset_repeats, debug_dataset)
361
-
362
- def glob_images(directory, base="*", npz_flag=True):
363
- img_paths = []
364
- dots = []
365
- for ext in train_util.IMAGE_EXTENSIONS:
366
- dots.append(ext)
367
- if npz_flag:
368
- dots.append(".npz")
369
- for ext in dots:
370
- if base == '*':
371
- img_paths.extend(glob.glob(os.path.join(glob.escape(directory), base + ext)))
372
- else:
373
- img_paths.extend(glob.glob(glob.escape(os.path.join(directory, base + ext))))
374
- return img_paths
375
-
376
- #============================================================================================================
377
- #networks.lora
378
- #============================================================================================================
379
- from networks.lora import LoRANetwork
380
- def replace_prepare_optimizer_params(networks):
381
- def prepare_optimizer_params(self, text_encoder_lr, unet_lr, scheduler_lr=None, loranames=None):
382
-
383
- def enumerate_params(loras, lora_name=None):
384
- params = []
385
- for lora in loras:
386
- if lora_name is not None:
387
- if lora_name in lora.lora_name:
388
- params.extend(lora.parameters())
389
- else:
390
- params.extend(lora.parameters())
391
- return params
392
-
393
- self.requires_grad_(True)
394
- all_params = []
395
- ret_scheduler_lr = []
396
-
397
- if loranames is not None:
398
- textencoder_names = [None]
399
- unet_names = [None]
400
- if "text_encoder" in loranames:
401
- textencoder_names = loranames["text_encoder"]
402
- if "unet" in loranames:
403
- unet_names = loranames["unet"]
404
-
405
- if self.text_encoder_loras:
406
- for textencoder_name in textencoder_names:
407
- param_data = {'params': enumerate_params(self.text_encoder_loras, lora_name=textencoder_name)}
408
- if text_encoder_lr is not None:
409
- param_data['lr'] = text_encoder_lr
410
- if scheduler_lr is not None:
411
- ret_scheduler_lr.append(scheduler_lr[0])
412
- all_params.append(param_data)
413
-
414
- if self.unet_loras:
415
- for unet_name in unet_names:
416
- param_data = {'params': enumerate_params(self.unet_loras, lora_name=unet_name)}
417
- if unet_lr is not None:
418
- param_data['lr'] = unet_lr
419
- if scheduler_lr is not None:
420
- ret_scheduler_lr.append(scheduler_lr[1])
421
- all_params.append(param_data)
422
-
423
- return all_params, ret_scheduler_lr
424
-
425
- LoRANetwork.prepare_optimizer_params = prepare_optimizer_params
426
-
427
- #============================================================================================================
428
- #新規追加
429
- #============================================================================================================
430
- def add_append_arguments(parser: argparse.ArgumentParser):
431
- # for train_network_opt.py
432
- parser.add_argument("--optimizer", type=str, default="AdamW", choices=["AdamW", "RAdam", "AdaBound", "AdaBelief", "AggMo", "AdamP", "Adastand", "Adastand_belief", "Apollo", "Lamb", "Ranger", "RangerVA", "Lookahead_Adam", "Lookahead_DiffGrad", "Yogi", "NovoGrad", "QHAdam", "DiffGrad", "MADGRAD", "Adafactor"], help="使用するoptimizerを指定する")
433
- parser.add_argument("--optimizer_arg", type=str, default=None, nargs='*')
434
- parser.add_argument("--split_lora_networks", action="store_true")
435
- parser.add_argument("--split_lora_level", type=int, default=0, help="どれくらい細分化するかの設定 0がunetのみを層別に 1がunetを大枠で分割 2がtextencoder含めて層別")
436
- parser.add_argument("--min_resolution", type=str, default=None)
437
- parser.add_argument("--area_step", type=int, default=1)
438
- parser.add_argument("--config", type=str, default=None)
439
-
440
- def create_split_names(split_flag, split_level):
441
- split_names = None
442
- if split_flag:
443
- split_names = {}
444
- text_encoder_names = [None]
445
- unet_names = ["lora_unet_mid_block"]
446
- if split_level==1:
447
- unet_names.append(f"lora_unet_down_blocks_")
448
- unet_names.append(f"lora_unet_up_blocks_")
449
- elif split_level==2 or split_level==0:
450
- if split_level==2:
451
- text_encoder_names = []
452
- for i in range(12):
453
- text_encoder_names.append(f"lora_te_text_model_encoder_layers_{i}_")
454
- for i in range(3):
455
- unet_names.append(f"lora_unet_down_blocks_{i}")
456
- unet_names.append(f"lora_unet_up_blocks_{i+1}")
457
- split_names["text_encoder"] = text_encoder_names
458
- split_names["unet"] = unet_names
459
- return split_names
460
-
461
- def get_config(parser):
462
- args = parser.parse_args()
463
- if args.config is not None and (not args.config==""):
464
- import yaml
465
- import datetime
466
- if os.path.splitext(args.config)[-1] == ".yaml":
467
- args.config = os.path.splitext(args.config)[0]
468
- config_path = f"./{args.config}.yaml"
469
- if os.path.exists(config_path):
470
- print(f"{config_path} から設定を読み込み中...")
471
- margs, rest = parser.parse_known_args()
472
- with open(config_path, mode="r") as f:
473
- configs = yaml.unsafe_load(f)
474
- #変数でのやり取りをするためargparserからDict型を取り出す
475
- args_dic = vars(args)
476
- #デフォから引数指定で変更があるものを確認
477
- change_def_dic = {}
478
- args_type_dic = {}
479
- for key, v in args_dic.items():
480
- if not parser.get_default(key) == v:
481
- change_def_dic[key] = v
482
- #デフォ指定されてるデータ型を取得する
483
- for key, act in parser._option_string_actions.items():
484
- if key=="-h": continue
485
- key = key[2:]
486
- args_type_dic[key] = act.type
487
- #データタイプの確認とargsにkeyの内容を代入していく
488
- for key, v in configs.items():
489
- if key in args_dic:
490
- if args_dic[key] is not None:
491
- new_type = type(args_dic[key])
492
- if (not type(v) == new_type) and (not new_type==list):
493
- v = new_type(v)
494
- else:
495
- if v is not None:
496
- if not type(v) == args_type_dic[key]:
497
- v = args_type_dic[key](v)
498
- args_dic[key] = v
499
- #最後にデフォから指定が変わってるものを変更する
500
- for key, v in change_def_dic.items():
501
- args_dic[key] = v
502
- else:
503
- print(f"{config_path} が見つかりませんでした")
504
- return args