{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "import zipfile\n", "import shutil\n", "from subprocess import getoutput\n", "from IPython.utils import capture\n", "import random\n", "import concurrent.futures\n", "from tqdm import tqdm\n", "from PIL import Image\n", "import time\n", "import re\n", "import json\n", "import glob\n", "import gdown\n", "import requests\n", "import subprocess\n", "from urllib.parse import urlparse, unquote\n", "from pathlib import Path\n", "import toml\n", "\n", "#root_dir\n", "root_dir = \"/content\"\n", "deps_dir = os.path.join(root_dir,\"deps\")\n", "repo_dir = os.path.join(root_dir,\"Kohya-Colab\")\n", "training_dir = os.path.join(root_dir,\"Dreamboot-Config\")\n", "pretrained_model = os.path.join(root_dir,\"pretrained_model\")\n", "vae_dir = os.path.join(root_dir,\"vae\")\n", "config_dir = os.path.join(training_dir,\"config\")\n", "\n", "#repo_dir\n", "accelerate_config = os.path.join(repo_dir, \"accelerate_config/config.yaml\")\n", "tools_dir = os.path.join(repo_dir,\"tools\")\n", "finetune_dir = os.path.join(repo_dir,\"finetune\")\n", "\n", "for store in [\"root_dir\", \"deps_dir\", \"repo_dir\", \"training_dir\", \"pretrained_model\", \"vae_dir\", \"accelerate_config\", \"tools_dir\", \"finetune_dir\", \"config_dir\"]:\n", " with capture.capture_output() as cap:\n", " %store {store}\n", " del cap\n", "\n", "repo_url = \"https://github.com/phamhungd/Kohya-Colab\"\n", "bitsandytes_main_py = \"/usr/local/lib/python3.10/dist-packages/bitsandbytes/cuda_setup/main.py\"\n", "branch = \"\"\n", "verbose = False\n", "\n", "def read_file(filename):\n", " with open(filename, \"r\") as f:\n", " contents = f.read()\n", " return contents\n", "\n", "\n", "def write_file(filename, contents):\n", " with open(filename, \"w\") as f:\n", " f.write(contents)\n", "\n", "\n", "def clone_repo(url):\n", " if not os.path.exists(repo_dir):\n", " os.chdir(root_dir)\n", " !git clone {url} {repo_dir}\n", " else:\n", " os.chdir(repo_dir)\n", " !git pull origin {branch} if branch else !git pull\n", "\n", "\n", "def install_dependencies():\n", " s = getoutput('nvidia-smi')\n", "\n", " if 'T4' in s:\n", " !sed -i \"s@cpu@cuda@\" library/model_util.py\n", "\n", " !pip install {'-q' if not verbose else ''} --upgrade -r requirements.txt\n", "\n", " from accelerate.utils import write_basic_config\n", "\n", " if not os.path.exists(accelerate_config):\n", " write_basic_config(save_location=accelerate_config)\n", "\n", "\n", "def remove_bitsandbytes_message(filename):\n", " welcome_message = \"\"\"\n", "def evaluate_cuda_setup():\n", " print('')\n", " print('='*35 + 'BUG REPORT' + '='*35)\n", " print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')\n", " print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')\n", " print('='*80)\"\"\"\n", "\n", " new_welcome_message = \"\"\"\n", "def evaluate_cuda_setup():\n", " import os\n", " if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':\n", " print('')\n", " print('=' * 35 + 'BUG REPORT' + '=' * 35)\n", " print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')\n", " print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')\n", " print('To hide this message, set the BITSANDBYTES_NOWELCOME variable like so: export BITSANDBYTES_NOWELCOME=1')\n", " print('=' * 80)\"\"\"\n", "\n", " contents = read_file(filename)\n", " new_contents = contents.replace(welcome_message, new_welcome_message)\n", " write_file(filename, new_contents)\n", "\n", "\n", "def main():\n", " os.chdir(root_dir)\n", "\n", " for dir in [\n", " deps_dir,\n", " training_dir,\n", " config_dir,\n", " pretrained_model,\n", " vae_dir\n", " ]:\n", " os.makedirs(dir, exist_ok=True)\n", "\n", " clone_repo(repo_url)\n", "\n", " if branch:\n", " os.chdir(repo_dir)\n", " status = os.system(f\"git checkout {branch}\")\n", " if status != 0:\n", " raise Exception(\"Failed to checkout branch or commit\")\n", "\n", " os.chdir(repo_dir)\n", "\n", " !apt install aria2 {'-qq' if not verbose else ''}\n", "\n", " install_dependencies()\n", " time.sleep(3)\n", "\n", " remove_bitsandbytes_message(bitsandytes_main_py)\n", "\n", " os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n", " os.environ[\"BITSANDBYTES_NOWELCOME\"] = \"1\"\n", " os.environ[\"SAFETENSORS_FAST_GPU\"] = \"1\"\n", "\n", " cuda_path = \"/usr/local/cuda-11.8/targets/x86_64-linux/lib/\"\n", " ld_library_path = os.environ.get(\"LD_LIBRARY_PATH\", \"\")\n", " os.environ[\"LD_LIBRARY_PATH\"] = f\"{ld_library_path}:{cuda_path}\"\n", "\n", "main()\n", "\n", "\n", "print(f\"Your train data directory : {train_data_dir}\")\n", "\n", "os.chdir(finetune_dir)\n", "\n", "config = {\n", " \"_train_data_dir\": train_data_dir,\n", " \"batch_size\": 8,\n", " \"repo_id\": \"SmilingWolf/wd-v1-4-convnextv2-tagger-v2\",\n", " \"recursive\": True,\n", " \"remove_underscore\": True,\n", " \"general_threshold\": 0.75,\n", " \"character_threshold\": 0.25,\n", " \"caption_extension\": \".txt\",\n", " \"max_data_loader_n_workers\": 2,\n", " \"debug\": True,\n", "}\n", "\n", "args = \"\"\n", "for k, v in config.items():\n", " if k.startswith(\"_\"):\n", " args += f'\"{v}\" '\n", " elif isinstance(v, str):\n", " args += f'--{k}=\"{v}\" '\n", " elif isinstance(v, bool) and v:\n", " args += f\"--{k} \"\n", " elif isinstance(v, float) and not isinstance(v, bool):\n", " args += f\"--{k}={v} \"\n", " elif isinstance(v, int) and not isinstance(v, bool):\n", " args += f\"--{k}={v} \"\n", "\n", "final_args = f\"python tag_images_by_wd14_tagger.py {args}\"\n", "if not NoAutoCaption :\n", " !{final_args}\n", "\n", "os.chdir(root_dir)\n", "\n", "extension = \".txt\"\n", "custom_tag = CustomCaption\n", "\n", "def read_file(filename):\n", " with open(filename, \"r\") as f:\n", " contents = f.read()\n", " return contents\n", "\n", "def write_file(filename, contents):\n", " with open(filename, \"w\") as f:\n", " f.write(contents)\n", "\n", "def process_tags(filename, custom_tag, append, remove_tag):\n", " contents = read_file(filename)\n", " tags = [tag.strip() for tag in contents.split(',')]\n", " custom_tags = [tag.strip() for tag in custom_tag.split(',')]\n", "\n", " for custom_tag in custom_tags:\n", " custom_tag = custom_tag.replace(\"_\", \" \")\n", " if remove_tag:\n", " while custom_tag in tags:\n", " tags.remove(custom_tag)\n", " else:\n", " if custom_tag not in tags:\n", " if append:\n", " tags.append(custom_tag)\n", " else:\n", " tags.insert(0, custom_tag)\n", "\n", " contents = ', '.join(tags)\n", " write_file(filename, contents)\n", "\n", "def process_directory(train_data_dir, tag, append, remove_tag, recursive):\n", " for filename in os.listdir(train_data_dir):\n", " file_path = os.path.join(train_data_dir, filename)\n", " if os.path.isdir(file_path) and recursive:\n", " process_directory(file_path, tag, append, remove_tag, recursive)\n", " elif filename.endswith(extension):\n", " process_tags(file_path, tag, append, remove_tag)\n", "\n", "if not any(\n", " [filename.endswith(extension) for filename in os.listdir(train_data_dir)]\n", "):\n", " for filename in os.listdir(train_data_dir):\n", " if filename.endswith((\".png\", \".jpg\", \".jpeg\", \".webp\", \".bmp\")):\n", " open(\n", " os.path.join(train_data_dir, filename.split(\".\")[0] + extension),\n", " \"w\",\n", " ).close()\n", "if not NoAutoCaption :\n", " process_directory(train_data_dir, custom_tag, False, False, True)\n", "\n", "#3.Setting\n", "\n", "MODEL_URLS = {\n", " \"GSMaletoPhotoreal_v4\" : \"https://civitai.com/api/download/models/164715\",\n", " \"GSMaletoFusion_v1\" : \"https://civitai.com/api/download/models/138518\",\n", " \"GSMaletoAnime_v1\" : \"https://civitai.com/api/download/models/503605\",\n", "}\n", "MODEL_URL = MODEL_URLS.get(Model, Model)\n", "drive_dir = os.path.join(root_dir, \"drive/MyDrive\")\n", "def get_supported_extensions():\n", " return tuple([\".ckpt\", \".safetensors\", \".pt\", \".pth\"])\n", "\n", "def get_filename(url, quiet=True):\n", " extensions = get_supported_extensions()\n", "\n", " if url.startswith(drive_dir) or url.endswith(tuple(extensions)):\n", " filename = os.path.basename(url)\n", " else:\n", " response = requests.get(url, stream=True)\n", " response.raise_for_status()\n", "\n", " if 'content-disposition' in response.headers:\n", " content_disposition = response.headers['content-disposition']\n", " filename = re.findall('filename=\"?([^\"]+)\"?', content_disposition)[0]\n", " else:\n", " url_path = urlparse(url).path\n", " filename = unquote(os.path.basename(url_path))\n", "\n", " if filename.endswith(tuple(get_supported_extensions())):\n", " return filename\n", " else:\n", " return None\n", "\n", "def get_most_recent_file(directory):\n", " files = glob.glob(os.path.join(directory, \"*\"))\n", " if not files:\n", " return None\n", " most_recent_file = max(files, key=os.path.getmtime)\n", " basename = os.path.basename(most_recent_file)\n", "\n", " return most_recent_file\n", "\n", "def parse_args(config):\n", " args = []\n", "\n", " for k, v in config.items():\n", " if k.startswith(\"_\"):\n", " args.append(f\"{v}\")\n", " elif isinstance(v, str) and v is not None:\n", " args.append(f'--{k}={v}')\n", " elif isinstance(v, bool) and v:\n", " args.append(f\"--{k}\")\n", " elif isinstance(v, float) and not isinstance(v, bool):\n", " args.append(f\"--{k}={v}\")\n", " elif isinstance(v, int) and not isinstance(v, bool):\n", " args.append(f\"--{k}={v}\")\n", "\n", " return args\n", "def aria2_download(dir, filename, url):\n", " aria2_config = {\n", " \"console-log-level\" : \"error\",\n", " \"summary-interval\" : 10,\n", " \"continue\" : True,\n", " \"max-connection-per-server\" : 16,\n", " \"min-split-size\" : \"1M\",\n", " \"split\" : 16,\n", " \"dir\" : dir,\n", " \"out\" : filename,\n", " \"_url\" : url,\n", " }\n", " aria2_args = parse_args(aria2_config)\n", " subprocess.run([\"aria2c\", *aria2_args])\n", "\n", "def gdown_download(url, dst, filepath):\n", " if \"/uc?id/\" in url:\n", " return gdown.download(url, filepath, quiet=False)\n", " elif \"/file/d/\" in url:\n", " return gdown.download(url=url, output=filepath, quiet=False, fuzzy=True)\n", " elif \"/drive/folders/\" in url:\n", " os.chdir(dst)\n", " return gdown.download_folder(url, quiet=True, use_cookies=False)\n", "\n", "def download(url, dst):\n", " print(f\"Starting downloading from {url}\")\n", " filename = get_filename(url)\n", " filepath = os.path.join(dst, filename)\n", "\n", " if \"drive.google.com\" in url:\n", " gdown = gdown_download(url, dst, filepath)\n", " else:\n", " if \"huggingface.co\" in url and \"/blob/\" in url:\n", " url = url.replace(\"/blob/\", \"/resolve/\")\n", " aria2_download(dst, filename, url)\n", "\n", " print(f\"Download finished: {filepath}\")\n", " return filepath\n", "\n", "def get_gpu_name():\n", " try:\n", " return subprocess.check_output(\"nvidia-smi --query-gpu=name --format=csv,noheader,nounits\", shell=True).decode('ascii').strip()\n", " except:\n", " return None\n", "\n", "def main():\n", " global model_path, vae_path\n", " model_path, vae_path = None, None\n", " download_targets = {\n", " \"model\": (MODEL_URL, pretrained_model),\n", " }\n", " for target, (url, dst) in download_targets.items():\n", " if url and not url.startswith(f\"PASTE {target.upper()} URL OR GDRIVE PATH HERE\"):\n", " filepath = download(url, dst)\n", " if target == \"model\":\n", " model_path = filepath\n", " print()\n", " if model_path:\n", " print(f\"Selected model: {model_path}\")\n", "\n", "if Model.startswith(\"/content/drive/\"):\n", " model_path = Model\n", " print(f\"Diffusers model is loaded : {Model}\")\n", "else:\n", " main()\n", "\n", "!aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt -d /content/VAE -o VAE84EMA.vae.pt\n", "vae = \"/content/VAE/VAE84EMA.vae.pt\"\n", "\n", "#Dataset Config\n", "\n", "activation_word = \"sd vn\"\n", "caption_extension = \".txt\"\n", "token_to_captions = False\n", "dataset_repeats = Repeats\n", "keep_tokens = 0\n", "flip_aug = False\n", "\n", "if ',' in activation_word or ' ' in activation_word:\n", " words = activation_word.replace(',', ' ').split()\n", " class_token = words[-1]\n", "\n", "\n", "def read_file(filename):\n", " with open(filename, \"r\") as f:\n", " contents = f.read()\n", " return contents\n", "\n", "\n", "def write_file(filename, contents):\n", " with open(filename, \"w\") as f:\n", " f.write(contents)\n", "\n", "\n", "def get_supported_images(folder):\n", " supported_extensions = (\".png\", \".jpg\", \".jpeg\", \".webp\", \".bmp\")\n", " return [file for ext in supported_extensions for file in glob.glob(f\"{folder}/*{ext}\")]\n", "\n", "\n", "def get_subfolders_with_supported_images(folder):\n", " subfolders = [os.path.join(folder, subfolder) for subfolder in os.listdir(folder) if os.path.isdir(os.path.join(folder, subfolder))]\n", " return [subfolder for subfolder in subfolders if len(get_supported_images(subfolder)) > 0]\n", "\n", "\n", "def process_tags(filename, custom_tag, remove_tag):\n", " contents = read_file(filename)\n", " tags = [tag.strip() for tag in contents.split(',')]\n", " custom_tags = [tag.strip() for tag in custom_tag.split(',')]\n", "\n", " for custom_tag in custom_tags:\n", " custom_tag = custom_tag.replace(\"_\", \" \")\n", " # if remove_tag:\n", " # while custom_tag in tags:\n", " # tags.remove(custom_tag)\n", " # else:\n", " if custom_tag not in tags:\n", " tags.insert(0, custom_tag)\n", "\n", " contents = ', '.join(tags)\n", " write_file(filename, contents)\n", "\n", "\n", "def process_folder_recursively(folder):\n", " for root, _, files in os.walk(folder):\n", " for file in files:\n", " if file.endswith(caption_extension):\n", " file_path = os.path.join(root, file)\n", " extracted_class_token = get_class_token_from_folder_name(root, folder)\n", " train_supported_images = get_supported_images(train_data_dir)\n", " tag = extracted_class_token if extracted_class_token else activation_word if train_supported_images else \"\"\n", " if not tag == \"\":\n", " process_tags(file_path, tag, remove_tag=(not token_to_captions))\n", "\n", "\n", "def get_num_repeats(folder):\n", " folder_name = os.path.basename(folder)\n", " try:\n", " repeats, _ = folder_name.split('_', 1)\n", " num_repeats = int(repeats)\n", " except ValueError:\n", " num_repeats = dataset_repeats\n", "\n", " return num_repeats\n", "\n", "\n", "def get_class_token_from_folder_name(folder, parent_folder):\n", " if folder == parent_folder:\n", " return class_token\n", "\n", " folder_name = os.path.basename(folder)\n", " try:\n", " _, concept = folder_name.split('_', 1)\n", " return concept\n", " except ValueError:\n", " return \"\"\n", "\n", "train_supported_images = get_supported_images(train_data_dir)\n", "train_subfolders = get_subfolders_with_supported_images(train_data_dir)\n", "\n", "subsets = []\n", "config = {\n", " \"general\": {\n", " \"enable_bucket\": True,\n", " \"caption_extension\": caption_extension,\n", " \"shuffle_caption\": True,\n", " \"keep_tokens\": keep_tokens,\n", " \"bucket_reso_steps\": 64,\n", " \"bucket_no_upscale\": False,\n", " },\n", " \"datasets\": [\n", " {\n", " \"resolution\": resolution,\n", " \"min_bucket_reso\": 320 if resolution > 640 else 256,\n", " \"max_bucket_reso\": 1280 if resolution > 640 else 1024,\n", " \"caption_dropout_rate\": 0,\n", " \"caption_tag_dropout_rate\": 0,\n", " \"caption_dropout_every_n_epochs\": 0,\n", " \"flip_aug\": flip_aug,\n", " \"color_aug\": False,\n", " \"face_crop_aug_range\": None,\n", " \"subsets\": subsets,\n", " }\n", " ],\n", "}\n", "\n", "if token_to_captions and keep_tokens < 2:\n", " keep_tokens = 1\n", "\n", "process_folder_recursively(train_data_dir)\n", "\n", "if train_supported_images:\n", " subsets.append({\n", " \"image_dir\": train_data_dir,\n", " \"class_tokens\": activation_word,\n", " \"num_repeats\": dataset_repeats,\n", " })\n", "\n", "for subfolder in train_subfolders:\n", " num_repeats = get_num_repeats(subfolder)\n", " extracted_class_token = get_class_token_from_folder_name(subfolder, train_data_dir)\n", " subsets.append({\n", " \"image_dir\": subfolder,\n", " \"class_tokens\": extracted_class_token if extracted_class_token else None,\n", " \"num_repeats\": num_repeats,\n", " })\n", "\n", "for subset in subsets:\n", " if not glob.glob(f\"{subset['image_dir']}/*.txt\"):\n", " subset[\"class_tokens\"] = activation_word\n", "\n", "dataset_config = os.path.join(config_dir, \"dataset_config.toml\")\n", "\n", "for key in config:\n", " if isinstance(config[key], dict):\n", " for sub_key in config[key]:\n", " if config[key][sub_key] == \"\":\n", " config[key][sub_key] = None\n", " elif config[key] == \"\":\n", " config[key] = None\n", "\n", "config_str = toml.dumps(config)\n", "\n", "with open(dataset_config, \"w\") as f:\n", " f.write(config_str)\n", "\n", "print(config_str)\n", "\n", "#Config\n", "optimizer_args = False\n", "conv_dim = 4\n", "conv_alpha = 1\n", "\n", "network_module = \"networks.lora\"\n", "network_args = \"\"\n", "\n", "config = {\n", " \"model_arguments\": {\n", " \"v2\": False,\n", " \"v_parameterization\": False,\n", " \"pretrained_model_name_or_path\": model_path,\n", " \"vae\": vae,\n", " },\n", " \"additional_network_arguments\": {\n", " \"no_metadata\": False,\n", " \"unet_lr\": float(unet_lr),\n", " \"text_encoder_lr\": float(text_encoder_lr),\n", " \"network_module\": network_module,\n", " \"network_dim\": 64,\n", " \"network_alpha\": 48,\n", " \"training_comment\": \"GSGI Trainer\",\n", " },\n", " \"optimizer_arguments\": {\n", " \"optimizer_type\": \"AdamW8bit\",\n", " \"optimizer_args\": eval(optimizer_args) if optimizer_args else None,\n", " \"learning_rate\": unet_lr,\n", " \"max_grad_norm\": 1.0,\n", " \"lr_scheduler\": \"cosine_with_restarts\",\n", " \"lr_scheduler_num_cycles\": 4,\n", " },\n", " \"dataset_arguments\": {\n", " \"cache_latents\": True,\n", " \"debug_dataset\": False,\n", " \"vae_batch_size\": Batch_size,\n", " },\n", " \"training_arguments\": {\n", " \"output_dir\": output_dir,\n", " \"output_name\": Loraname,\n", " \"save_precision\": \"fp16\",\n", " \"save_every_n_epochs\": save_n_epochs_type_value,\n", " \"train_batch_size\": Batch_size,\n", " \"max_token_length\": 225,\n", " \"mem_eff_attn\": False,\n", " \"xformers\": True,\n", " \"max_train_epochs\": num_epochs,\n", " \"max_data_loader_n_workers\": 8,\n", " \"persistent_data_loader_workers\": True,\n", " \"gradient_checkpointing\": False,\n", " \"gradient_accumulation_steps\": 1,\n", " \"mixed_precision\": \"fp16\",\n", " \"clip_skip\": 1,\n", " \"logging_dir\": \"/content/Dreamboot-Config/logs\",\n", " \"log_prefix\": Loraname,\n", " \"lowram\": True,\n", " \"training_comment\" : \"train by GSGI Trainer\",\n", " },\n", " \"sample_prompt_arguments\": {\n", " \"sample_every_n_steps\": 200,\n", " \"sample_every_n_epochs\": 1,\n", " \"sample_sampler\": \"euler\",\n", " },\n", " \"dreambooth_arguments\": {\n", " \"prior_loss_weight\": 1,\n", " },\n", " \"saving_arguments\": {\n", " \"save_model_as\": \"safetensors\",\n", " },\n", "}\n", "SamplePrompt = f\"{Loraname},front view, masterpiece,best quality\"\n", "sample_str = f\"\"\"\n", " {SamplePrompt}\\\n", " --n lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry \\\n", " --w 512 \\\n", " --h 768 \\\n", " --l 7 \\\n", " --s 30\n", "\"\"\"\n", "config_path = os.path.join(config_dir, \"config_file.toml\")\n", "prompt_path = os.path.join(config_dir, \"sample_prompt.txt\")\n", "\n", "for key in config:\n", " if isinstance(config[key], dict):\n", " for sub_key in config[key]:\n", " if config[key][sub_key] == \"\":\n", " config[key][sub_key] = None\n", " elif config[key] == \"\":\n", " config[key] = None\n", "\n", "config_str = toml.dumps(config)\n", "\n", "def write_file(filename, contents):\n", " with open(filename, \"w\") as f:\n", " f.write(contents)\n", "\n", "write_file(config_path, config_str)\n", "write_file(prompt_path, sample_str)\n", "\n", "print(config_str)\n", "\n", "os.chdir(repo_dir)\n", "\n", "\n", "train_file = \"train_network.py\"\n", "ConfigFolder = \"/content/Dreamboot-Config/config\"\n", "sample_prompt = f\"{ConfigFolder}/sample_prompt.txt\"\n", "config_file = f\"{ConfigFolder}/config_file.toml\"\n", "dataset_config = f\"{ConfigFolder}/dataset_config.toml\"\n", "accelerate_conf = {\n", " \"config_file\" : accelerate_config,\n", " \"num_cpu_threads_per_process\" : 1,\n", "}\n", "\n", "train_conf = {\n", " \"sample_prompts\" : sample_prompt,\n", " \"dataset_config\" : dataset_config,\n", " \"config_file\" : config_file\n", "}\n", "\n", "def train(config):\n", " args = \"\"\n", " for k, v in config.items():\n", " if k.startswith(\"_\"):\n", " args += f'\"{v}\" '\n", " elif isinstance(v, str):\n", " args += f'--{k}=\"{v}\" '\n", " elif isinstance(v, bool) and v:\n", " args += f\"--{k} \"\n", " elif isinstance(v, float) and not isinstance(v, bool):\n", " args += f\"--{k}={v} \"\n", " elif isinstance(v, int) and not isinstance(v, bool):\n", " args += f\"--{k}={v} \"\n", "\n", " return args\n", "\n", "accelerate_args = train(accelerate_conf)\n", "train_args = train(train_conf)\n", "final_args = f\"accelerate launch {accelerate_args} {train_file} {train_args}\"\n" ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 2 }