Bluwynd commited on
Commit
bde6c13
·
verified ·
1 Parent(s): 7feb6b5

Upload autotrain.ipynb

Browse files
Files changed (1) hide show
  1. autotrain.ipynb +673 -0
autotrain.ipynb ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os\n",
10
+ "import zipfile\n",
11
+ "import shutil\n",
12
+ "from subprocess import getoutput\n",
13
+ "from IPython.utils import capture\n",
14
+ "import random\n",
15
+ "import concurrent.futures\n",
16
+ "from tqdm import tqdm\n",
17
+ "from PIL import Image\n",
18
+ "import time\n",
19
+ "import re\n",
20
+ "import json\n",
21
+ "import glob\n",
22
+ "import gdown\n",
23
+ "import requests\n",
24
+ "import subprocess\n",
25
+ "from urllib.parse import urlparse, unquote\n",
26
+ "from pathlib import Path\n",
27
+ "import toml\n",
28
+ "\n",
29
+ "#root_dir\n",
30
+ "root_dir = \"/content\"\n",
31
+ "deps_dir = os.path.join(root_dir,\"deps\")\n",
32
+ "repo_dir = os.path.join(root_dir,\"Kohya-Colab\")\n",
33
+ "training_dir = os.path.join(root_dir,\"Dreamboot-Config\")\n",
34
+ "pretrained_model = os.path.join(root_dir,\"pretrained_model\")\n",
35
+ "vae_dir = os.path.join(root_dir,\"vae\")\n",
36
+ "config_dir = os.path.join(training_dir,\"config\")\n",
37
+ "\n",
38
+ "#repo_dir\n",
39
+ "accelerate_config = os.path.join(repo_dir, \"accelerate_config/config.yaml\")\n",
40
+ "tools_dir = os.path.join(repo_dir,\"tools\")\n",
41
+ "finetune_dir = os.path.join(repo_dir,\"finetune\")\n",
42
+ "\n",
43
+ "for store in [\"root_dir\", \"deps_dir\", \"repo_dir\", \"training_dir\", \"pretrained_model\", \"vae_dir\", \"accelerate_config\", \"tools_dir\", \"finetune_dir\", \"config_dir\"]:\n",
44
+ " with capture.capture_output() as cap:\n",
45
+ " %store {store}\n",
46
+ " del cap\n",
47
+ "\n",
48
+ "repo_url = \"https://github.com/phamhungd/Kohya-Colab\"\n",
49
+ "bitsandytes_main_py = \"/usr/local/lib/python3.10/dist-packages/bitsandbytes/cuda_setup/main.py\"\n",
50
+ "branch = \"\"\n",
51
+ "verbose = False\n",
52
+ "\n",
53
+ "def read_file(filename):\n",
54
+ " with open(filename, \"r\") as f:\n",
55
+ " contents = f.read()\n",
56
+ " return contents\n",
57
+ "\n",
58
+ "\n",
59
+ "def write_file(filename, contents):\n",
60
+ " with open(filename, \"w\") as f:\n",
61
+ " f.write(contents)\n",
62
+ "\n",
63
+ "\n",
64
+ "def clone_repo(url):\n",
65
+ " if not os.path.exists(repo_dir):\n",
66
+ " os.chdir(root_dir)\n",
67
+ " !git clone {url} {repo_dir}\n",
68
+ " else:\n",
69
+ " os.chdir(repo_dir)\n",
70
+ " !git pull origin {branch} if branch else !git pull\n",
71
+ "\n",
72
+ "\n",
73
+ "def install_dependencies():\n",
74
+ " s = getoutput('nvidia-smi')\n",
75
+ "\n",
76
+ " if 'T4' in s:\n",
77
+ " !sed -i \"s@cpu@cuda@\" library/model_util.py\n",
78
+ "\n",
79
+ " !pip install {'-q' if not verbose else ''} --upgrade -r requirements.txt\n",
80
+ "\n",
81
+ " from accelerate.utils import write_basic_config\n",
82
+ "\n",
83
+ " if not os.path.exists(accelerate_config):\n",
84
+ " write_basic_config(save_location=accelerate_config)\n",
85
+ "\n",
86
+ "\n",
87
+ "def remove_bitsandbytes_message(filename):\n",
88
+ " welcome_message = \"\"\"\n",
89
+ "def evaluate_cuda_setup():\n",
90
+ " print('')\n",
91
+ " print('='*35 + 'BUG REPORT' + '='*35)\n",
92
+ " print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')\n",
93
+ " print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')\n",
94
+ " print('='*80)\"\"\"\n",
95
+ "\n",
96
+ " new_welcome_message = \"\"\"\n",
97
+ "def evaluate_cuda_setup():\n",
98
+ " import os\n",
99
+ " if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':\n",
100
+ " print('')\n",
101
+ " print('=' * 35 + 'BUG REPORT' + '=' * 35)\n",
102
+ " print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')\n",
103
+ " print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')\n",
104
+ " print('To hide this message, set the BITSANDBYTES_NOWELCOME variable like so: export BITSANDBYTES_NOWELCOME=1')\n",
105
+ " print('=' * 80)\"\"\"\n",
106
+ "\n",
107
+ " contents = read_file(filename)\n",
108
+ " new_contents = contents.replace(welcome_message, new_welcome_message)\n",
109
+ " write_file(filename, new_contents)\n",
110
+ "\n",
111
+ "\n",
112
+ "def main():\n",
113
+ " os.chdir(root_dir)\n",
114
+ "\n",
115
+ " for dir in [\n",
116
+ " deps_dir,\n",
117
+ " training_dir,\n",
118
+ " config_dir,\n",
119
+ " pretrained_model,\n",
120
+ " vae_dir\n",
121
+ " ]:\n",
122
+ " os.makedirs(dir, exist_ok=True)\n",
123
+ "\n",
124
+ " clone_repo(repo_url)\n",
125
+ "\n",
126
+ " if branch:\n",
127
+ " os.chdir(repo_dir)\n",
128
+ " status = os.system(f\"git checkout {branch}\")\n",
129
+ " if status != 0:\n",
130
+ " raise Exception(\"Failed to checkout branch or commit\")\n",
131
+ "\n",
132
+ " os.chdir(repo_dir)\n",
133
+ "\n",
134
+ " !apt install aria2 {'-qq' if not verbose else ''}\n",
135
+ "\n",
136
+ " install_dependencies()\n",
137
+ " time.sleep(3)\n",
138
+ "\n",
139
+ " remove_bitsandbytes_message(bitsandytes_main_py)\n",
140
+ "\n",
141
+ " os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n",
142
+ " os.environ[\"BITSANDBYTES_NOWELCOME\"] = \"1\"\n",
143
+ " os.environ[\"SAFETENSORS_FAST_GPU\"] = \"1\"\n",
144
+ "\n",
145
+ " cuda_path = \"/usr/local/cuda-11.8/targets/x86_64-linux/lib/\"\n",
146
+ " ld_library_path = os.environ.get(\"LD_LIBRARY_PATH\", \"\")\n",
147
+ " os.environ[\"LD_LIBRARY_PATH\"] = f\"{ld_library_path}:{cuda_path}\"\n",
148
+ "\n",
149
+ "main()\n",
150
+ "\n",
151
+ "\n",
152
+ "print(f\"Your train data directory : {train_data_dir}\")\n",
153
+ "\n",
154
+ "os.chdir(finetune_dir)\n",
155
+ "\n",
156
+ "config = {\n",
157
+ " \"_train_data_dir\": train_data_dir,\n",
158
+ " \"batch_size\": 8,\n",
159
+ " \"repo_id\": \"SmilingWolf/wd-v1-4-convnextv2-tagger-v2\",\n",
160
+ " \"recursive\": True,\n",
161
+ " \"remove_underscore\": True,\n",
162
+ " \"general_threshold\": Threshold,\n",
163
+ " \"character_threshold\": 0.35,\n",
164
+ " \"caption_extension\": \".txt\",\n",
165
+ " \"max_data_loader_n_workers\": 2,\n",
166
+ " \"debug\": True,\n",
167
+ "}\n",
168
+ "\n",
169
+ "args = \"\"\n",
170
+ "for k, v in config.items():\n",
171
+ " if k.startswith(\"_\"):\n",
172
+ " args += f'\"{v}\" '\n",
173
+ " elif isinstance(v, str):\n",
174
+ " args += f'--{k}=\"{v}\" '\n",
175
+ " elif isinstance(v, bool) and v:\n",
176
+ " args += f\"--{k} \"\n",
177
+ " elif isinstance(v, float) and not isinstance(v, bool):\n",
178
+ " args += f\"--{k}={v} \"\n",
179
+ " elif isinstance(v, int) and not isinstance(v, bool):\n",
180
+ " args += f\"--{k}={v} \"\n",
181
+ "\n",
182
+ "final_args = f\"python tag_images_by_wd14_tagger.py {args}\"\n",
183
+ "if not NoAutoCaption :\n",
184
+ " !{final_args}\n",
185
+ "\n",
186
+ "os.chdir(root_dir)\n",
187
+ "\n",
188
+ "extension = \".txt\"\n",
189
+ "custom_tag = CustomCaption\n",
190
+ "\n",
191
+ "def read_file(filename):\n",
192
+ " with open(filename, \"r\") as f:\n",
193
+ " contents = f.read()\n",
194
+ " return contents\n",
195
+ "\n",
196
+ "def write_file(filename, contents):\n",
197
+ " with open(filename, \"w\") as f:\n",
198
+ " f.write(contents)\n",
199
+ "\n",
200
+ "def process_tags(filename, custom_tag, append, remove_tag):\n",
201
+ " contents = read_file(filename)\n",
202
+ " tags = [tag.strip() for tag in contents.split(',')]\n",
203
+ " custom_tags = [tag.strip() for tag in custom_tag.split(',')]\n",
204
+ "\n",
205
+ " for custom_tag in custom_tags:\n",
206
+ " custom_tag = custom_tag.replace(\"_\", \" \")\n",
207
+ " if remove_tag:\n",
208
+ " while custom_tag in tags:\n",
209
+ " tags.remove(custom_tag)\n",
210
+ " else:\n",
211
+ " if custom_tag not in tags:\n",
212
+ " if append:\n",
213
+ " tags.append(custom_tag)\n",
214
+ " else:\n",
215
+ " tags.insert(0, custom_tag)\n",
216
+ "\n",
217
+ " contents = ', '.join(tags)\n",
218
+ " write_file(filename, contents)\n",
219
+ "\n",
220
+ "def process_directory(train_data_dir, tag, append, remove_tag, recursive):\n",
221
+ " for filename in os.listdir(train_data_dir):\n",
222
+ " file_path = os.path.join(train_data_dir, filename)\n",
223
+ " if os.path.isdir(file_path) and recursive:\n",
224
+ " process_directory(file_path, tag, append, remove_tag, recursive)\n",
225
+ " elif filename.endswith(extension):\n",
226
+ " process_tags(file_path, tag, append, remove_tag)\n",
227
+ "\n",
228
+ "if not any(\n",
229
+ " [filename.endswith(extension) for filename in os.listdir(train_data_dir)]\n",
230
+ "):\n",
231
+ " for filename in os.listdir(train_data_dir):\n",
232
+ " if filename.endswith((\".png\", \".jpg\", \".jpeg\", \".webp\", \".bmp\")):\n",
233
+ " open(\n",
234
+ " os.path.join(train_data_dir, filename.split(\".\")[0] + extension),\n",
235
+ " \"w\",\n",
236
+ " ).close()\n",
237
+ "if not NoAutoCaption :\n",
238
+ " process_directory(train_data_dir, custom_tag, False, False, True)\n",
239
+ "\n",
240
+ "#3.Setting\n",
241
+ "\n",
242
+ "MODEL_URLS = {\n",
243
+ " \"RealisticVision-v51\" : \"https://huggingface.co/phamhungd/GuoZovya/resolve/main/RealisticVision-v51.safetensors\",\n",
244
+ " \"Anything-v3\" : \"https://huggingface.co/cag/anything-v3-1/resolve/main/anything-v3-1.safetensors\",\n",
245
+ " \"Chilloutmix\" : \"https://civitai.com/api/download/models/11745\",\n",
246
+ "}\n",
247
+ "MODEL_URL = MODEL_URLS.get(Model, Model)\n",
248
+ "drive_dir = os.path.join(root_dir, \"drive/MyDrive\")\n",
249
+ "def get_supported_extensions():\n",
250
+ " return tuple([\".ckpt\", \".safetensors\", \".pt\", \".pth\"])\n",
251
+ "\n",
252
+ "def get_filename(url, quiet=True):\n",
253
+ " extensions = get_supported_extensions()\n",
254
+ "\n",
255
+ " if url.startswith(drive_dir) or url.endswith(tuple(extensions)):\n",
256
+ " filename = os.path.basename(url)\n",
257
+ " else:\n",
258
+ " response = requests.get(url, stream=True)\n",
259
+ " response.raise_for_status()\n",
260
+ "\n",
261
+ " if 'content-disposition' in response.headers:\n",
262
+ " content_disposition = response.headers['content-disposition']\n",
263
+ " filename = re.findall('filename=\"?([^\"]+)\"?', content_disposition)[0]\n",
264
+ " else:\n",
265
+ " url_path = urlparse(url).path\n",
266
+ " filename = unquote(os.path.basename(url_path))\n",
267
+ "\n",
268
+ " if filename.endswith(tuple(get_supported_extensions())):\n",
269
+ " return filename\n",
270
+ " else:\n",
271
+ " return None\n",
272
+ "\n",
273
+ "def get_most_recent_file(directory):\n",
274
+ " files = glob.glob(os.path.join(directory, \"*\"))\n",
275
+ " if not files:\n",
276
+ " return None\n",
277
+ " most_recent_file = max(files, key=os.path.getmtime)\n",
278
+ " basename = os.path.basename(most_recent_file)\n",
279
+ "\n",
280
+ " return most_recent_file\n",
281
+ "\n",
282
+ "def parse_args(config):\n",
283
+ " args = []\n",
284
+ "\n",
285
+ " for k, v in config.items():\n",
286
+ " if k.startswith(\"_\"):\n",
287
+ " args.append(f\"{v}\")\n",
288
+ " elif isinstance(v, str) and v is not None:\n",
289
+ " args.append(f'--{k}={v}')\n",
290
+ " elif isinstance(v, bool) and v:\n",
291
+ " args.append(f\"--{k}\")\n",
292
+ " elif isinstance(v, float) and not isinstance(v, bool):\n",
293
+ " args.append(f\"--{k}={v}\")\n",
294
+ " elif isinstance(v, int) and not isinstance(v, bool):\n",
295
+ " args.append(f\"--{k}={v}\")\n",
296
+ "\n",
297
+ " return args\n",
298
+ "def aria2_download(dir, filename, url):\n",
299
+ " aria2_config = {\n",
300
+ " \"console-log-level\" : \"error\",\n",
301
+ " \"summary-interval\" : 10,\n",
302
+ " \"continue\" : True,\n",
303
+ " \"max-connection-per-server\" : 16,\n",
304
+ " \"min-split-size\" : \"1M\",\n",
305
+ " \"split\" : 16,\n",
306
+ " \"dir\" : dir,\n",
307
+ " \"out\" : filename,\n",
308
+ " \"_url\" : url,\n",
309
+ " }\n",
310
+ " aria2_args = parse_args(aria2_config)\n",
311
+ " subprocess.run([\"aria2c\", *aria2_args])\n",
312
+ "\n",
313
+ "def gdown_download(url, dst, filepath):\n",
314
+ " if \"/uc?id/\" in url:\n",
315
+ " return gdown.download(url, filepath, quiet=False)\n",
316
+ " elif \"/file/d/\" in url:\n",
317
+ " return gdown.download(url=url, output=filepath, quiet=False, fuzzy=True)\n",
318
+ " elif \"/drive/folders/\" in url:\n",
319
+ " os.chdir(dst)\n",
320
+ " return gdown.download_folder(url, quiet=True, use_cookies=False)\n",
321
+ "\n",
322
+ "def download(url, dst):\n",
323
+ " print(f\"Starting downloading from {url}\")\n",
324
+ " filename = get_filename(url)\n",
325
+ " filepath = os.path.join(dst, filename)\n",
326
+ "\n",
327
+ " if \"drive.google.com\" in url:\n",
328
+ " gdown = gdown_download(url, dst, filepath)\n",
329
+ " else:\n",
330
+ " if \"huggingface.co\" in url and \"/blob/\" in url:\n",
331
+ " url = url.replace(\"/blob/\", \"/resolve/\")\n",
332
+ " aria2_download(dst, filename, url)\n",
333
+ "\n",
334
+ " print(f\"Download finished: {filepath}\")\n",
335
+ " return filepath\n",
336
+ "\n",
337
+ "def get_gpu_name():\n",
338
+ " try:\n",
339
+ " return subprocess.check_output(\"nvidia-smi --query-gpu=name --format=csv,noheader,nounits\", shell=True).decode('ascii').strip()\n",
340
+ " except:\n",
341
+ " return None\n",
342
+ "\n",
343
+ "def main():\n",
344
+ " global model_path, vae_path\n",
345
+ " model_path, vae_path = None, None\n",
346
+ " download_targets = {\n",
347
+ " \"model\": (MODEL_URL, pretrained_model),\n",
348
+ " }\n",
349
+ " for target, (url, dst) in download_targets.items():\n",
350
+ " if url and not url.startswith(f\"PASTE {target.upper()} URL OR GDRIVE PATH HERE\"):\n",
351
+ " filepath = download(url, dst)\n",
352
+ " if target == \"model\":\n",
353
+ " model_path = filepath\n",
354
+ " print()\n",
355
+ " if model_path:\n",
356
+ " print(f\"Selected model: {model_path}\")\n",
357
+ "\n",
358
+ "if Model.startswith(\"/content/drive/\"):\n",
359
+ " model_path = Model\n",
360
+ " print(f\"Diffusers model is loaded : {Model}\")\n",
361
+ "else:\n",
362
+ " main()\n",
363
+ "\n",
364
+ "!aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt -d /content/VAE -o VAE84EMA.vae.pt\n",
365
+ "vae = \"/content/VAE/VAE84EMA.vae.pt\"\n",
366
+ "\n",
367
+ "#Dataset Config\n",
368
+ "\n",
369
+ "activation_word = \"sd vn\"\n",
370
+ "caption_extension = \".txt\"\n",
371
+ "token_to_captions = False\n",
372
+ "dataset_repeats = Repeats\n",
373
+ "keep_tokens = 0\n",
374
+ "flip_aug = False\n",
375
+ "\n",
376
+ "if ',' in activation_word or ' ' in activation_word:\n",
377
+ " words = activation_word.replace(',', ' ').split()\n",
378
+ " class_token = words[-1]\n",
379
+ "\n",
380
+ "\n",
381
+ "def read_file(filename):\n",
382
+ " with open(filename, \"r\") as f:\n",
383
+ " contents = f.read()\n",
384
+ " return contents\n",
385
+ "\n",
386
+ "\n",
387
+ "def write_file(filename, contents):\n",
388
+ " with open(filename, \"w\") as f:\n",
389
+ " f.write(contents)\n",
390
+ "\n",
391
+ "\n",
392
+ "def get_supported_images(folder):\n",
393
+ " supported_extensions = (\".png\", \".jpg\", \".jpeg\", \".webp\", \".bmp\")\n",
394
+ " return [file for ext in supported_extensions for file in glob.glob(f\"{folder}/*{ext}\")]\n",
395
+ "\n",
396
+ "\n",
397
+ "def get_subfolders_with_supported_images(folder):\n",
398
+ " subfolders = [os.path.join(folder, subfolder) for subfolder in os.listdir(folder) if os.path.isdir(os.path.join(folder, subfolder))]\n",
399
+ " return [subfolder for subfolder in subfolders if len(get_supported_images(subfolder)) > 0]\n",
400
+ "\n",
401
+ "\n",
402
+ "def process_tags(filename, custom_tag, remove_tag):\n",
403
+ " contents = read_file(filename)\n",
404
+ " tags = [tag.strip() for tag in contents.split(',')]\n",
405
+ " custom_tags = [tag.strip() for tag in custom_tag.split(',')]\n",
406
+ "\n",
407
+ " for custom_tag in custom_tags:\n",
408
+ " custom_tag = custom_tag.replace(\"_\", \" \")\n",
409
+ " # if remove_tag:\n",
410
+ " # while custom_tag in tags:\n",
411
+ " # tags.remove(custom_tag)\n",
412
+ " # else:\n",
413
+ " if custom_tag not in tags:\n",
414
+ " tags.insert(0, custom_tag)\n",
415
+ "\n",
416
+ " contents = ', '.join(tags)\n",
417
+ " write_file(filename, contents)\n",
418
+ "\n",
419
+ "\n",
420
+ "def process_folder_recursively(folder):\n",
421
+ " for root, _, files in os.walk(folder):\n",
422
+ " for file in files:\n",
423
+ " if file.endswith(caption_extension):\n",
424
+ " file_path = os.path.join(root, file)\n",
425
+ " extracted_class_token = get_class_token_from_folder_name(root, folder)\n",
426
+ " train_supported_images = get_supported_images(train_data_dir)\n",
427
+ " tag = extracted_class_token if extracted_class_token else activation_word if train_supported_images else \"\"\n",
428
+ " if not tag == \"\":\n",
429
+ " process_tags(file_path, tag, remove_tag=(not token_to_captions))\n",
430
+ "\n",
431
+ "\n",
432
+ "def get_num_repeats(folder):\n",
433
+ " folder_name = os.path.basename(folder)\n",
434
+ " try:\n",
435
+ " repeats, _ = folder_name.split('_', 1)\n",
436
+ " num_repeats = int(repeats)\n",
437
+ " except ValueError:\n",
438
+ " num_repeats = dataset_repeats\n",
439
+ "\n",
440
+ " return num_repeats\n",
441
+ "\n",
442
+ "\n",
443
+ "def get_class_token_from_folder_name(folder, parent_folder):\n",
444
+ " if folder == parent_folder:\n",
445
+ " return class_token\n",
446
+ "\n",
447
+ " folder_name = os.path.basename(folder)\n",
448
+ " try:\n",
449
+ " _, concept = folder_name.split('_', 1)\n",
450
+ " return concept\n",
451
+ " except ValueError:\n",
452
+ " return \"\"\n",
453
+ "\n",
454
+ "train_supported_images = get_supported_images(train_data_dir)\n",
455
+ "train_subfolders = get_subfolders_with_supported_images(train_data_dir)\n",
456
+ "\n",
457
+ "subsets = []\n",
458
+ "config = {\n",
459
+ " \"general\": {\n",
460
+ " \"enable_bucket\": True,\n",
461
+ " \"caption_extension\": caption_extension,\n",
462
+ " \"shuffle_caption\": True,\n",
463
+ " \"keep_tokens\": keep_tokens,\n",
464
+ " \"bucket_reso_steps\": 64,\n",
465
+ " \"bucket_no_upscale\": False,\n",
466
+ " },\n",
467
+ " \"datasets\": [\n",
468
+ " {\n",
469
+ " \"resolution\": resolution,\n",
470
+ " \"min_bucket_reso\": 320 if resolution > 640 else 256,\n",
471
+ " \"max_bucket_reso\": 1280 if resolution > 640 else 1024,\n",
472
+ " \"caption_dropout_rate\": 0,\n",
473
+ " \"caption_tag_dropout_rate\": 0,\n",
474
+ " \"caption_dropout_every_n_epochs\": 0,\n",
475
+ " \"flip_aug\": flip_aug,\n",
476
+ " \"color_aug\": False,\n",
477
+ " \"face_crop_aug_range\": None,\n",
478
+ " \"subsets\": subsets,\n",
479
+ " }\n",
480
+ " ],\n",
481
+ "}\n",
482
+ "\n",
483
+ "if token_to_captions and keep_tokens < 2:\n",
484
+ " keep_tokens = 1\n",
485
+ "\n",
486
+ "process_folder_recursively(train_data_dir)\n",
487
+ "\n",
488
+ "if train_supported_images:\n",
489
+ " subsets.append({\n",
490
+ " \"image_dir\": train_data_dir,\n",
491
+ " \"class_tokens\": activation_word,\n",
492
+ " \"num_repeats\": dataset_repeats,\n",
493
+ " })\n",
494
+ "\n",
495
+ "for subfolder in train_subfolders:\n",
496
+ " num_repeats = get_num_repeats(subfolder)\n",
497
+ " extracted_class_token = get_class_token_from_folder_name(subfolder, train_data_dir)\n",
498
+ " subsets.append({\n",
499
+ " \"image_dir\": subfolder,\n",
500
+ " \"class_tokens\": extracted_class_token if extracted_class_token else None,\n",
501
+ " \"num_repeats\": num_repeats,\n",
502
+ " })\n",
503
+ "\n",
504
+ "for subset in subsets:\n",
505
+ " if not glob.glob(f\"{subset['image_dir']}/*.txt\"):\n",
506
+ " subset[\"class_tokens\"] = activation_word\n",
507
+ "\n",
508
+ "dataset_config = os.path.join(config_dir, \"dataset_config.toml\")\n",
509
+ "\n",
510
+ "for key in config:\n",
511
+ " if isinstance(config[key], dict):\n",
512
+ " for sub_key in config[key]:\n",
513
+ " if config[key][sub_key] == \"\":\n",
514
+ " config[key][sub_key] = None\n",
515
+ " elif config[key] == \"\":\n",
516
+ " config[key] = None\n",
517
+ "\n",
518
+ "config_str = toml.dumps(config)\n",
519
+ "\n",
520
+ "with open(dataset_config, \"w\") as f:\n",
521
+ " f.write(config_str)\n",
522
+ "\n",
523
+ "print(config_str)\n",
524
+ "\n",
525
+ "#Config\n",
526
+ "optimizer_args = False\n",
527
+ "conv_dim = 4\n",
528
+ "conv_alpha = 1\n",
529
+ "\n",
530
+ "network_module = \"networks.lora\"\n",
531
+ "network_args = \"\"\n",
532
+ "\n",
533
+ "config = {\n",
534
+ " \"model_arguments\": {\n",
535
+ " \"v2\": False,\n",
536
+ " \"v_parameterization\": False,\n",
537
+ " \"pretrained_model_name_or_path\": model_path,\n",
538
+ " \"vae\": vae,\n",
539
+ " },\n",
540
+ " \"additional_network_arguments\": {\n",
541
+ " \"no_metadata\": False,\n",
542
+ " \"unet_lr\": float(unet_lr),\n",
543
+ " \"text_encoder_lr\": float(text_encoder_lr),\n",
544
+ " \"network_module\": network_module,\n",
545
+ " \"network_dim\": 64,\n",
546
+ " \"network_alpha\": 32,\n",
547
+ " \"training_comment\": \"sdvn.me\",\n",
548
+ " },\n",
549
+ " \"optimizer_arguments\": {\n",
550
+ " \"optimizer_type\": \"AdamW8bit\",\n",
551
+ " \"optimizer_args\": eval(optimizer_args) if optimizer_args else None,\n",
552
+ " \"learning_rate\": unet_lr,\n",
553
+ " \"max_grad_norm\": 1.0,\n",
554
+ " \"lr_scheduler\": \"constant\",\n",
555
+ " },\n",
556
+ " \"dataset_arguments\": {\n",
557
+ " \"cache_latents\": True,\n",
558
+ " \"debug_dataset\": False,\n",
559
+ " \"vae_batch_size\": Batch_size,\n",
560
+ " },\n",
561
+ " \"training_arguments\": {\n",
562
+ " \"output_dir\": output_dir,\n",
563
+ " \"output_name\": Loraname,\n",
564
+ " \"save_precision\": \"fp16\",\n",
565
+ " \"save_every_n_epochs\": save_n_epochs_type_value,\n",
566
+ " \"train_batch_size\": Batch_size,\n",
567
+ " \"max_token_length\": 225,\n",
568
+ " \"mem_eff_attn\": False,\n",
569
+ " \"xformers\": True,\n",
570
+ " \"max_train_epochs\": num_epochs,\n",
571
+ " \"max_data_loader_n_workers\": 8,\n",
572
+ " \"persistent_data_loader_workers\": True,\n",
573
+ " \"gradient_checkpointing\": False,\n",
574
+ " \"gradient_accumulation_steps\": 1,\n",
575
+ " \"mixed_precision\": \"fp16\",\n",
576
+ " \"clip_skip\": 2,\n",
577
+ " \"logging_dir\": \"/content/Dreamboot-Config/logs\",\n",
578
+ " \"log_prefix\": Loraname,\n",
579
+ " \"lowram\": True,\n",
580
+ " \"training_comment\" : \"train by sdvn.me train 1click\",\n",
581
+ " },\n",
582
+ " \"sample_prompt_arguments\": {\n",
583
+ " \"sample_every_n_steps\": 200,\n",
584
+ " \"sample_every_n_epochs\": 1,\n",
585
+ " \"sample_sampler\": \"euler\",\n",
586
+ " },\n",
587
+ " \"dreambooth_arguments\": {\n",
588
+ " \"prior_loss_weight\": 1,\n",
589
+ " },\n",
590
+ " \"saving_arguments\": {\n",
591
+ " \"save_model_as\": \"safetensors\",\n",
592
+ " },\n",
593
+ "}\n",
594
+ "SamplePrompt = f\"{Loraname},front view, masterpiece,best quality\"\n",
595
+ "sample_str = f\"\"\"\n",
596
+ " {SamplePrompt}\\\n",
597
+ " --n lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry \\\n",
598
+ " --w 512 \\\n",
599
+ " --h 768 \\\n",
600
+ " --l 7 \\\n",
601
+ " --s 28\n",
602
+ "\"\"\"\n",
603
+ "config_path = os.path.join(config_dir, \"config_file.toml\")\n",
604
+ "prompt_path = os.path.join(config_dir, \"sample_prompt.txt\")\n",
605
+ "\n",
606
+ "for key in config:\n",
607
+ " if isinstance(config[key], dict):\n",
608
+ " for sub_key in config[key]:\n",
609
+ " if config[key][sub_key] == \"\":\n",
610
+ " config[key][sub_key] = None\n",
611
+ " elif config[key] == \"\":\n",
612
+ " config[key] = None\n",
613
+ "\n",
614
+ "config_str = toml.dumps(config)\n",
615
+ "\n",
616
+ "def write_file(filename, contents):\n",
617
+ " with open(filename, \"w\") as f:\n",
618
+ " f.write(contents)\n",
619
+ "\n",
620
+ "write_file(config_path, config_str)\n",
621
+ "write_file(prompt_path, sample_str)\n",
622
+ "\n",
623
+ "print(config_str)\n",
624
+ "\n",
625
+ "os.chdir(repo_dir)\n",
626
+ "\n",
627
+ "\n",
628
+ "train_file = \"train_network.py\"\n",
629
+ "ConfigFolder = \"/content/Dreamboot-Config/config\"\n",
630
+ "sample_prompt = f\"{ConfigFolder}/sample_prompt.txt\"\n",
631
+ "config_file = f\"{ConfigFolder}/config_file.toml\"\n",
632
+ "dataset_config = f\"{ConfigFolder}/dataset_config.toml\"\n",
633
+ "accelerate_conf = {\n",
634
+ " \"config_file\" : accelerate_config,\n",
635
+ " \"num_cpu_threads_per_process\" : 1,\n",
636
+ "}\n",
637
+ "\n",
638
+ "train_conf = {\n",
639
+ " \"sample_prompts\" : sample_prompt,\n",
640
+ " \"dataset_config\" : dataset_config,\n",
641
+ " \"config_file\" : config_file\n",
642
+ "}\n",
643
+ "\n",
644
+ "def train(config):\n",
645
+ " args = \"\"\n",
646
+ " for k, v in config.items():\n",
647
+ " if k.startswith(\"_\"):\n",
648
+ " args += f'\"{v}\" '\n",
649
+ " elif isinstance(v, str):\n",
650
+ " args += f'--{k}=\"{v}\" '\n",
651
+ " elif isinstance(v, bool) and v:\n",
652
+ " args += f\"--{k} \"\n",
653
+ " elif isinstance(v, float) and not isinstance(v, bool):\n",
654
+ " args += f\"--{k}={v} \"\n",
655
+ " elif isinstance(v, int) and not isinstance(v, bool):\n",
656
+ " args += f\"--{k}={v} \"\n",
657
+ "\n",
658
+ " return args\n",
659
+ "\n",
660
+ "accelerate_args = train(accelerate_conf)\n",
661
+ "train_args = train(train_conf)\n",
662
+ "final_args = f\"accelerate launch {accelerate_args} {train_file} {train_args}\"\n"
663
+ ]
664
+ }
665
+ ],
666
+ "metadata": {
667
+ "language_info": {
668
+ "name": "python"
669
+ }
670
+ },
671
+ "nbformat": 4,
672
+ "nbformat_minor": 2
673
+ }