diff --git a/user-baichuan2-13b-v2-3.6/README.md b/user-baichuan2-13b-v2-3.6/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce1bd2761c3523956c8b33648178f270f7e4ebb5
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/README.md
@@ -0,0 +1,23 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- _load_in_8bit: False
+- _load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: float16
+- load_in_4bit: True
+- load_in_8bit: False
+### Framework versions
+
+
+- PEFT 0.4.0
diff --git a/user-baichuan2-13b-v2-3.6/adapter_config.json b/user-baichuan2-13b-v2-3.6/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..28839560afde65524c5af65f2743a33b6d957f2b
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/adapter_config.json
@@ -0,0 +1,24 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "/home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 16,
+ "revision": null,
+ "target_modules": [
+ "o_proj",
+ "W_pack",
+ "down_proj",
+ "up_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/user-baichuan2-13b-v2-3.6/adapter_model.safetensors b/user-baichuan2-13b-v2-3.6/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..04113614ba6ebf4eb4c1993f91a14d6aae25e9b5
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5fa2928717257a823e5ece47fa40497bfd62df2de1dad1d22b7189be1eaae1fc
+size 223203704
diff --git a/user-baichuan2-13b-v2-3.6/all_results.json b/user-baichuan2-13b-v2-3.6/all_results.json
new file mode 100644
index 0000000000000000000000000000000000000000..66781f3367a3cc573706a06639c9eb0c9e2d9855
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/all_results.json
@@ -0,0 +1,7 @@
+{
+ "epoch": 1.0,
+ "train_loss": 0.5017403132133569,
+ "train_runtime": 75900.5046,
+ "train_samples_per_second": 0.102,
+ "train_steps_per_second": 0.006
+}
\ No newline at end of file
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/README.md b/user-baichuan2-13b-v2-3.6/checkpoint-200/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..11816908935e06260bec6426c6abdf1783e1374a
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/README.md
@@ -0,0 +1,53 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- _load_in_8bit: False
+- _load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: float16
+- load_in_4bit: True
+- load_in_8bit: False
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- _load_in_8bit: False
+- _load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: float16
+- load_in_4bit: True
+- load_in_8bit: False
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- _load_in_8bit: False
+- _load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: float16
+- load_in_4bit: True
+- load_in_8bit: False
+### Framework versions
+
+- PEFT 0.4.0
+- PEFT 0.4.0
+
+- PEFT 0.4.0
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/adapter_config.json b/user-baichuan2-13b-v2-3.6/checkpoint-200/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..28839560afde65524c5af65f2743a33b6d957f2b
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/adapter_config.json
@@ -0,0 +1,24 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "/home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 16,
+ "revision": null,
+ "target_modules": [
+ "o_proj",
+ "W_pack",
+ "down_proj",
+ "up_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/adapter_model.safetensors b/user-baichuan2-13b-v2-3.6/checkpoint-200/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9bba54592b8e6008e62b1d22955fa9356550d9cc
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38a30f766c18c946f733b21a20bbed87b6c9fc7fcd352632d9f10275b9bcafec
+size 223203704
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/optimizer.pt b/user-baichuan2-13b-v2-3.6/checkpoint-200/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8966b0e0e1148fdedd5afdd48481fc6b82e81ff7
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:244f09da90f8637279a30da6c1ab06000f824a132e5721b107eae147dab5ec76
+size 446541509
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/rng_state.pth b/user-baichuan2-13b-v2-3.6/checkpoint-200/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..86590d966fe35b2b58b54caab340144dda59245c
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82057afe356e5d07dcf306e6c4328a22809f76704092cbfcfc589f5f6ca4ecfb
+size 14575
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/scheduler.pt b/user-baichuan2-13b-v2-3.6/checkpoint-200/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1fa77436bae6936f033fce17b6fefa64f3f9eab7
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d7d91431adcc602a99f3b7dfb114d8f98fcd9283452a4e00f4d0e20d836e409d
+size 627
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/special_tokens_map.json b/user-baichuan2-13b-v2-3.6/checkpoint-200/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..4835406b0da9f2bed097895a32056ca040085b4f
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/special_tokens_map.json
@@ -0,0 +1,30 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ }
+}
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/tokenization_baichuan.py b/user-baichuan2-13b-v2-3.6/checkpoint-200/tokenization_baichuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..c529657fd26b20eaff4903faba8b56f2c39c55d1
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/tokenization_baichuan.py
@@ -0,0 +1,258 @@
+# Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {},
+ "tokenizer_file": {},
+}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
+
+
+class BaichuanTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = (
+ AddedToken(bos_token, lstrip=False, rstrip=False)
+ if isinstance(bos_token, str)
+ else bos_token
+ )
+ eos_token = (
+ AddedToken(eos_token, lstrip=False, rstrip=False)
+ if isinstance(eos_token, str)
+ else eos_token
+ )
+ unk_token = (
+ AddedToken(unk_token, lstrip=False, rstrip=False)
+ if isinstance(unk_token, str)
+ else unk_token
+ )
+ pad_token = (
+ AddedToken(pad_token, lstrip=False, rstrip=False)
+ if isinstance(pad_token, str)
+ else pad_token
+ )
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text):
+ """Returns a tokenized string."""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(
+ self, save_directory, filename_prefix: Optional[str] = None
+ ) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory,
+ (filename_prefix + "-" if filename_prefix else "")
+ + VOCAB_FILES_NAMES["vocab_file"],
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
+ out_vocab_file
+ ) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self,
+ token_ids_0: List[int],
+ token_ids_1: Optional[List[int]] = None,
+ already_has_special_tokens: bool = False,
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0,
+ token_ids_1=token_ids_1,
+ already_has_special_tokens=True,
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/tokenizer.model b/user-baichuan2-13b-v2-3.6/checkpoint-200/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..b3902c4521d7f34868ac76dd16150ff5ca41b000
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
+size 2001107
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/tokenizer_config.json b/user-baichuan2-13b-v2-3.6/checkpoint-200/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..46070f6c3ec83e940bb7cf87705b41b203509aa4
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": false,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ }
+ },
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_baichuan.BaichuanTokenizer",
+ null
+ ]
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "model_max_length": 4096,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "BaichuanTokenizer",
+ "unk_token": ""
+}
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/trainer_state.json b/user-baichuan2-13b-v2-3.6/checkpoint-200/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..3a7c6547971901d0f9ae086611fb9e9f75707ac4
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/trainer_state.json
@@ -0,0 +1,161 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.41450777202072536,
+ "eval_steps": 500,
+ "global_step": 200,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.02,
+ "grad_norm": 4.99941873550415,
+ "learning_rate": 2e-05,
+ "loss": 9.9329,
+ "step": 10
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.741065502166748,
+ "learning_rate": 4e-05,
+ "loss": 11.0746,
+ "step": 20
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 1.4727320671081543,
+ "learning_rate": 6e-05,
+ "loss": 2.7159,
+ "step": 30
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1335960477590561,
+ "learning_rate": 8e-05,
+ "loss": 0.3969,
+ "step": 40
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.0014472692273557186,
+ "learning_rate": 0.0001,
+ "loss": 0.0032,
+ "step": 50
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.0010780546581372619,
+ "learning_rate": 0.0001,
+ "loss": 0.0002,
+ "step": 60
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 1.03132963180542,
+ "learning_rate": 0.0001,
+ "loss": 0.0002,
+ "step": 70
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.008827299810945988,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.0002956670359708369,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 90
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.0003419867134653032,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.0003681881644297391,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.0002884200366679579,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.00011985149467363954,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.0003195986500941217,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.00010149635636480525,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.00010508792183827609,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.00011793687008321285,
+ "learning_rate": 0.0001,
+ "loss": 0.006,
+ "step": 170
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 8.076676749624312e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.0007808339432813227,
+ "learning_rate": 0.0001,
+ "loss": 0.006,
+ "step": 190
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.11711683869361877,
+ "learning_rate": 0.0001,
+ "loss": 0.003,
+ "step": 200
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 482,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 1,
+ "save_steps": 100,
+ "total_flos": 3.335841878562816e+17,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-200/training_args.bin b/user-baichuan2-13b-v2-3.6/checkpoint-200/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..969e0e491f106769e00accc686da59e7e3816367
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-200/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1abab34fc571ab2be46c8abdf765b96b9a09ab4144528e95270f1af465c0f19c
+size 4475
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/README.md b/user-baichuan2-13b-v2-3.6/checkpoint-300/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce1bd2761c3523956c8b33648178f270f7e4ebb5
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/README.md
@@ -0,0 +1,23 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- _load_in_8bit: False
+- _load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: float16
+- load_in_4bit: True
+- load_in_8bit: False
+### Framework versions
+
+
+- PEFT 0.4.0
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/adapter_config.json b/user-baichuan2-13b-v2-3.6/checkpoint-300/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..28839560afde65524c5af65f2743a33b6d957f2b
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/adapter_config.json
@@ -0,0 +1,24 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "/home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 16,
+ "revision": null,
+ "target_modules": [
+ "o_proj",
+ "W_pack",
+ "down_proj",
+ "up_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/adapter_model.safetensors b/user-baichuan2-13b-v2-3.6/checkpoint-300/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..aea0912dd95e5d416dc0d265bb1286747be5398b
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3abb43148c7b50ee5cbd1d6ca51b35ce5a55e74d837bf441cb247d3ea6a7c56a
+size 223203704
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/optimizer.pt b/user-baichuan2-13b-v2-3.6/checkpoint-300/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ac6754d3049eebb212d286cc89613b31864d63d8
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6626b117cfcf7dfb7c1d3cf79bd87cac30bfdbb5b6d6138c5282bda6d59f86cb
+size 446541893
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/rng_state.pth b/user-baichuan2-13b-v2-3.6/checkpoint-300/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..aeec14df08cdf263ad7596242ce6cae13e286a6c
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:939982b34814a29d20630db8d8bb9ca8ffaaca659852333f9bf40ca5f715ffd0
+size 14575
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/scheduler.pt b/user-baichuan2-13b-v2-3.6/checkpoint-300/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c0ec470706774d069b46e7066f05152b9106d5dc
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:47e1286a69d6cf9d5865b0808d9a438a85cffe270c5273c3518b2a5557084aa5
+size 627
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/special_tokens_map.json b/user-baichuan2-13b-v2-3.6/checkpoint-300/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..4835406b0da9f2bed097895a32056ca040085b4f
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/special_tokens_map.json
@@ -0,0 +1,30 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ }
+}
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/tokenization_baichuan.py b/user-baichuan2-13b-v2-3.6/checkpoint-300/tokenization_baichuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..c529657fd26b20eaff4903faba8b56f2c39c55d1
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/tokenization_baichuan.py
@@ -0,0 +1,258 @@
+# Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {},
+ "tokenizer_file": {},
+}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
+
+
+class BaichuanTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = (
+ AddedToken(bos_token, lstrip=False, rstrip=False)
+ if isinstance(bos_token, str)
+ else bos_token
+ )
+ eos_token = (
+ AddedToken(eos_token, lstrip=False, rstrip=False)
+ if isinstance(eos_token, str)
+ else eos_token
+ )
+ unk_token = (
+ AddedToken(unk_token, lstrip=False, rstrip=False)
+ if isinstance(unk_token, str)
+ else unk_token
+ )
+ pad_token = (
+ AddedToken(pad_token, lstrip=False, rstrip=False)
+ if isinstance(pad_token, str)
+ else pad_token
+ )
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text):
+ """Returns a tokenized string."""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(
+ self, save_directory, filename_prefix: Optional[str] = None
+ ) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory,
+ (filename_prefix + "-" if filename_prefix else "")
+ + VOCAB_FILES_NAMES["vocab_file"],
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
+ out_vocab_file
+ ) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self,
+ token_ids_0: List[int],
+ token_ids_1: Optional[List[int]] = None,
+ already_has_special_tokens: bool = False,
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0,
+ token_ids_1=token_ids_1,
+ already_has_special_tokens=True,
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/tokenizer.model b/user-baichuan2-13b-v2-3.6/checkpoint-300/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..b3902c4521d7f34868ac76dd16150ff5ca41b000
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
+size 2001107
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/tokenizer_config.json b/user-baichuan2-13b-v2-3.6/checkpoint-300/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..46070f6c3ec83e940bb7cf87705b41b203509aa4
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": false,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ }
+ },
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_baichuan.BaichuanTokenizer",
+ null
+ ]
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "model_max_length": 4096,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "BaichuanTokenizer",
+ "unk_token": ""
+}
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/trainer_state.json b/user-baichuan2-13b-v2-3.6/checkpoint-300/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..e96110738f04e29f7a0804ed5ceeb666ccb1a317
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/trainer_state.json
@@ -0,0 +1,231 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.6217616580310881,
+ "eval_steps": 500,
+ "global_step": 300,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.02,
+ "grad_norm": 4.99941873550415,
+ "learning_rate": 2e-05,
+ "loss": 9.9329,
+ "step": 10
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.741065502166748,
+ "learning_rate": 4e-05,
+ "loss": 11.0746,
+ "step": 20
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 1.4727320671081543,
+ "learning_rate": 6e-05,
+ "loss": 2.7159,
+ "step": 30
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1335960477590561,
+ "learning_rate": 8e-05,
+ "loss": 0.3969,
+ "step": 40
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.0014472692273557186,
+ "learning_rate": 0.0001,
+ "loss": 0.0032,
+ "step": 50
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.0010780546581372619,
+ "learning_rate": 0.0001,
+ "loss": 0.0002,
+ "step": 60
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 1.03132963180542,
+ "learning_rate": 0.0001,
+ "loss": 0.0002,
+ "step": 70
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.008827299810945988,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.0002956670359708369,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 90
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.0003419867134653032,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.0003681881644297391,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.0002884200366679579,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.00011985149467363954,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.0003195986500941217,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.00010149635636480525,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.00010508792183827609,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.00011793687008321285,
+ "learning_rate": 0.0001,
+ "loss": 0.006,
+ "step": 170
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 8.076676749624312e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.0007808339432813227,
+ "learning_rate": 0.0001,
+ "loss": 0.006,
+ "step": 190
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.11711683869361877,
+ "learning_rate": 0.0001,
+ "loss": 0.003,
+ "step": 200
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.0002039404644165188,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 210
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.00873592495918274,
+ "learning_rate": 0.0001,
+ "loss": 0.0209,
+ "step": 220
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 3.0506539344787598,
+ "learning_rate": 0.0001,
+ "loss": 0.0201,
+ "step": 230
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.05903371796011925,
+ "learning_rate": 0.0001,
+ "loss": 0.0026,
+ "step": 240
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.0002484666183590889,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 250
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.0003493047261144966,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 260
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.0008058947860263288,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 270
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.0004198936221655458,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 280
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.0002983050071634352,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 290
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.0002279053587699309,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 300
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 482,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 1,
+ "save_steps": 100,
+ "total_flos": 5.004587702980301e+17,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-300/training_args.bin b/user-baichuan2-13b-v2-3.6/checkpoint-300/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..969e0e491f106769e00accc686da59e7e3816367
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-300/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1abab34fc571ab2be46c8abdf765b96b9a09ab4144528e95270f1af465c0f19c
+size 4475
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/README.md b/user-baichuan2-13b-v2-3.6/checkpoint-400/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce1bd2761c3523956c8b33648178f270f7e4ebb5
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/README.md
@@ -0,0 +1,23 @@
+---
+library_name: peft
+---
+## Training procedure
+
+
+The following `bitsandbytes` quantization config was used during training:
+- quant_method: bitsandbytes
+- _load_in_8bit: False
+- _load_in_4bit: True
+- llm_int8_threshold: 6.0
+- llm_int8_skip_modules: None
+- llm_int8_enable_fp32_cpu_offload: False
+- llm_int8_has_fp16_weight: False
+- bnb_4bit_quant_type: nf4
+- bnb_4bit_use_double_quant: True
+- bnb_4bit_compute_dtype: float16
+- load_in_4bit: True
+- load_in_8bit: False
+### Framework versions
+
+
+- PEFT 0.4.0
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/adapter_config.json b/user-baichuan2-13b-v2-3.6/checkpoint-400/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..28839560afde65524c5af65f2743a33b6d957f2b
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/adapter_config.json
@@ -0,0 +1,24 @@
+{
+ "auto_mapping": null,
+ "base_model_name_or_path": "/home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 16,
+ "revision": null,
+ "target_modules": [
+ "o_proj",
+ "W_pack",
+ "down_proj",
+ "up_proj",
+ "gate_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/adapter_model.safetensors b/user-baichuan2-13b-v2-3.6/checkpoint-400/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f3afde65cd9e15c749e5b03949b4c52122cdc529
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a761a3ff3848cad96ebcb4d93d82af85ae86c2703ec031d10338d35d93aff15
+size 223203704
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/optimizer.pt b/user-baichuan2-13b-v2-3.6/checkpoint-400/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c80dd3ff91474f7c2fa252c45e3b5c57ff5495ad
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b55a4a50d2c66907dc3b95e3f979aac9b320a415e3a978859a220e686a76d9a
+size 446541893
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/rng_state.pth b/user-baichuan2-13b-v2-3.6/checkpoint-400/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fb4103c7996f6eb32e8f09ed5bf87da7e516ed5b
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54a11743bad40439e1bbf592fb8f66b1a3c8dbde2539b8897aec5e85c29fcc1c
+size 14575
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/scheduler.pt b/user-baichuan2-13b-v2-3.6/checkpoint-400/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a4b5df1f2812048241ec8e18c18453384701d7e5
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6536a67a17be52e1d2b7b314f3abff272bd0f976aca319628b666d64bd161a64
+size 627
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/special_tokens_map.json b/user-baichuan2-13b-v2-3.6/checkpoint-400/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..4835406b0da9f2bed097895a32056ca040085b4f
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/special_tokens_map.json
@@ -0,0 +1,30 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ }
+}
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/tokenization_baichuan.py b/user-baichuan2-13b-v2-3.6/checkpoint-400/tokenization_baichuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..c529657fd26b20eaff4903faba8b56f2c39c55d1
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/tokenization_baichuan.py
@@ -0,0 +1,258 @@
+# Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {},
+ "tokenizer_file": {},
+}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
+
+
+class BaichuanTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = (
+ AddedToken(bos_token, lstrip=False, rstrip=False)
+ if isinstance(bos_token, str)
+ else bos_token
+ )
+ eos_token = (
+ AddedToken(eos_token, lstrip=False, rstrip=False)
+ if isinstance(eos_token, str)
+ else eos_token
+ )
+ unk_token = (
+ AddedToken(unk_token, lstrip=False, rstrip=False)
+ if isinstance(unk_token, str)
+ else unk_token
+ )
+ pad_token = (
+ AddedToken(pad_token, lstrip=False, rstrip=False)
+ if isinstance(pad_token, str)
+ else pad_token
+ )
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text):
+ """Returns a tokenized string."""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(
+ self, save_directory, filename_prefix: Optional[str] = None
+ ) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory,
+ (filename_prefix + "-" if filename_prefix else "")
+ + VOCAB_FILES_NAMES["vocab_file"],
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
+ out_vocab_file
+ ) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self,
+ token_ids_0: List[int],
+ token_ids_1: Optional[List[int]] = None,
+ already_has_special_tokens: bool = False,
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0,
+ token_ids_1=token_ids_1,
+ already_has_special_tokens=True,
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/tokenizer.model b/user-baichuan2-13b-v2-3.6/checkpoint-400/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..b3902c4521d7f34868ac76dd16150ff5ca41b000
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
+size 2001107
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/tokenizer_config.json b/user-baichuan2-13b-v2-3.6/checkpoint-400/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..46070f6c3ec83e940bb7cf87705b41b203509aa4
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": false,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ }
+ },
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_baichuan.BaichuanTokenizer",
+ null
+ ]
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "model_max_length": 4096,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "BaichuanTokenizer",
+ "unk_token": ""
+}
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/trainer_state.json b/user-baichuan2-13b-v2-3.6/checkpoint-400/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..53893d759ea06629e4c9acb8caafc297d2585c47
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/trainer_state.json
@@ -0,0 +1,301 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.8290155440414507,
+ "eval_steps": 500,
+ "global_step": 400,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.02,
+ "grad_norm": 4.99941873550415,
+ "learning_rate": 2e-05,
+ "loss": 9.9329,
+ "step": 10
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.741065502166748,
+ "learning_rate": 4e-05,
+ "loss": 11.0746,
+ "step": 20
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 1.4727320671081543,
+ "learning_rate": 6e-05,
+ "loss": 2.7159,
+ "step": 30
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1335960477590561,
+ "learning_rate": 8e-05,
+ "loss": 0.3969,
+ "step": 40
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.0014472692273557186,
+ "learning_rate": 0.0001,
+ "loss": 0.0032,
+ "step": 50
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.0010780546581372619,
+ "learning_rate": 0.0001,
+ "loss": 0.0002,
+ "step": 60
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 1.03132963180542,
+ "learning_rate": 0.0001,
+ "loss": 0.0002,
+ "step": 70
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.008827299810945988,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.0002956670359708369,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 90
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.0003419867134653032,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.0003681881644297391,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.0002884200366679579,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.00011985149467363954,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.0003195986500941217,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.00010149635636480525,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.00010508792183827609,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.00011793687008321285,
+ "learning_rate": 0.0001,
+ "loss": 0.006,
+ "step": 170
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 8.076676749624312e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.0007808339432813227,
+ "learning_rate": 0.0001,
+ "loss": 0.006,
+ "step": 190
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.11711683869361877,
+ "learning_rate": 0.0001,
+ "loss": 0.003,
+ "step": 200
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.0002039404644165188,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 210
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.00873592495918274,
+ "learning_rate": 0.0001,
+ "loss": 0.0209,
+ "step": 220
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 3.0506539344787598,
+ "learning_rate": 0.0001,
+ "loss": 0.0201,
+ "step": 230
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.05903371796011925,
+ "learning_rate": 0.0001,
+ "loss": 0.0026,
+ "step": 240
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.0002484666183590889,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 250
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.0003493047261144966,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 260
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.0008058947860263288,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 270
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.0004198936221655458,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 280
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.0002983050071634352,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 290
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.0002279053587699309,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 300
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.00015332824841607362,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 310
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.00011723622446879745,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 320
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.0001235378731507808,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 330
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.00010625163122313097,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 340
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 7.50239341869019e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 350
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.00010148331784876063,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 360
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 7.368126534856856e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 370
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.00012744461128022522,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 380
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 5.87971335335169e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 390
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 6.405858584912494e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 400
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 482,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 1,
+ "save_steps": 100,
+ "total_flos": 6.625291354656461e+17,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/user-baichuan2-13b-v2-3.6/checkpoint-400/training_args.bin b/user-baichuan2-13b-v2-3.6/checkpoint-400/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..969e0e491f106769e00accc686da59e7e3816367
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/checkpoint-400/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1abab34fc571ab2be46c8abdf765b96b9a09ab4144528e95270f1af465c0f19c
+size 4475
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_16-02-53_u/events.out.tfevents.1709741241.u.349083.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-02-53_u/events.out.tfevents.1709741241.u.349083.0
new file mode 100644
index 0000000000000000000000000000000000000000..ee21cb2bee6f554e5cbbce0679dc0a95bff1538d
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-02-53_u/events.out.tfevents.1709741241.u.349083.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3014282355673d8fdcaa570d4c5296d240c5dd75bf87a842af9ec4b76a4e9116
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_16-15-19_u/events.out.tfevents.1709741991.u.349593.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-15-19_u/events.out.tfevents.1709741991.u.349593.0
new file mode 100644
index 0000000000000000000000000000000000000000..7c1d809d44e12f1ed9e1b2c28710f0a58a5abae2
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-15-19_u/events.out.tfevents.1709741991.u.349593.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9058c4ffc24a76a37dcc37c3d6b8fc0c8aa81301fb6b58fe0305a01b7081d8f2
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_16-27-57_u/events.out.tfevents.1709742755.u.350734.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-27-57_u/events.out.tfevents.1709742755.u.350734.0
new file mode 100644
index 0000000000000000000000000000000000000000..8f7c65884dafecf049052341f48107b70a09e710
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-27-57_u/events.out.tfevents.1709742755.u.350734.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a35a2b472b2864493ef9958235b77f8c4c7af27b4b88b478d703f99b4f10f718
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_16-37-25_u/events.out.tfevents.1709743386.u.351776.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-37-25_u/events.out.tfevents.1709743386.u.351776.0
new file mode 100644
index 0000000000000000000000000000000000000000..23be3ef4080c14d2f60441bcb2cd5101a2f88d0d
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-37-25_u/events.out.tfevents.1709743386.u.351776.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c388e6f60bbcf55c3e512b8af87941e03d9a7648390ab77bfe714ffdc48c7bac
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_16-46-23_u/events.out.tfevents.1709743925.u.352180.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-46-23_u/events.out.tfevents.1709743925.u.352180.0
new file mode 100644
index 0000000000000000000000000000000000000000..ff9d12c5761e008c80eb66c12b4dc358b46bbd49
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-46-23_u/events.out.tfevents.1709743925.u.352180.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a087c12a11786c7f7b12c642ee0a6c4098d3859d1e2df9abf3e218f2e3365ec
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_16-55-14_u/events.out.tfevents.1709744402.u.352650.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-55-14_u/events.out.tfevents.1709744402.u.352650.0
new file mode 100644
index 0000000000000000000000000000000000000000..198fa711c4ba0edd82375b24294f4f5c2becfc7a
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_16-55-14_u/events.out.tfevents.1709744402.u.352650.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0aca9a5ecc84224df331de22088bfa3fe20a25c6903fd402a0be863a9566699d
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_17-03-22_u/events.out.tfevents.1709744890.u.353116.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-03-22_u/events.out.tfevents.1709744890.u.353116.0
new file mode 100644
index 0000000000000000000000000000000000000000..9b2e8c40e353b6677c183fe21fe4ddffd94fb557
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-03-22_u/events.out.tfevents.1709744890.u.353116.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dfea604250d4aac14bc3ed7c865337977033589c98558e199805f39e062fa8d8
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_17-13-29_u/events.out.tfevents.1709745516.u.353684.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-13-29_u/events.out.tfevents.1709745516.u.353684.0
new file mode 100644
index 0000000000000000000000000000000000000000..bf0ea7f25594317849d5911979e7e5651751e7ea
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-13-29_u/events.out.tfevents.1709745516.u.353684.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:94b3e44490b8dd88a5a161c7a6b9a835026ef7a8218a9fb171acaa8737033db7
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_17-30-51_u/events.out.tfevents.1709746552.u.354572.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-30-51_u/events.out.tfevents.1709746552.u.354572.0
new file mode 100644
index 0000000000000000000000000000000000000000..1d62fc94358f67b712d1957737ce8e56461fb68a
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-30-51_u/events.out.tfevents.1709746552.u.354572.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2a7c82c63e3cb38c34975b02f2f0e377684b0dfa72aa1dfcfaad954da7c93e2
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_17-42-56_u/events.out.tfevents.1709747302.u.355650.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-42-56_u/events.out.tfevents.1709747302.u.355650.0
new file mode 100644
index 0000000000000000000000000000000000000000..3b885f361146d58e653802c4dc1d4836c7dd286d
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-42-56_u/events.out.tfevents.1709747302.u.355650.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cf7786e2daf0473ba392d547dc007aaa018e30f7c97ce3bca26d196f68798142
+size 5164
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_17-52-18_u/events.out.tfevents.1709747824.u.356294.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-52-18_u/events.out.tfevents.1709747824.u.356294.0
new file mode 100644
index 0000000000000000000000000000000000000000..8b6ae66f8177c946d87bb6e99116fcabd634b981
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_17-52-18_u/events.out.tfevents.1709747824.u.356294.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d194e1fd4dc3c2a02a81c8673bf1a3a81620aa83df3b205494d63612c025586d
+size 5164
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar06_18-08-25_u/events.out.tfevents.1709748801.u.357253.0 b/user-baichuan2-13b-v2-3.6/runs/Mar06_18-08-25_u/events.out.tfevents.1709748801.u.357253.0
new file mode 100644
index 0000000000000000000000000000000000000000..04ffbda4acec95ac7b1c7af1f085435ea33b7555
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar06_18-08-25_u/events.out.tfevents.1709748801.u.357253.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6817600f8d35db7ef49417a02a144ff2a816dafcd07f8f50ce6f3bd94b96f20a
+size 5164
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar07_03-31-34_u/events.out.tfevents.1709782584.u.381738.0 b/user-baichuan2-13b-v2-3.6/runs/Mar07_03-31-34_u/events.out.tfevents.1709782584.u.381738.0
new file mode 100644
index 0000000000000000000000000000000000000000..f7ccc2f451737540029d52b00e196f0ff288f25c
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar07_03-31-34_u/events.out.tfevents.1709782584.u.381738.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:acd88b21c630e4122dc0b6fb5c8830b898b4c257ce4ce4e569faa88c59877a2a
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar07_03-49-43_u/events.out.tfevents.1709783729.u.385470.0 b/user-baichuan2-13b-v2-3.6/runs/Mar07_03-49-43_u/events.out.tfevents.1709783729.u.385470.0
new file mode 100644
index 0000000000000000000000000000000000000000..b0691d3a5806d7031e1fb4fe7900ef61a6e1760c
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar07_03-49-43_u/events.out.tfevents.1709783729.u.385470.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df5fac76e20244f743f2a360fbb235655eefefb50e291b34867c718a58cdf411
+size 5161
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar07_07-52-18_u/events.out.tfevents.1709798258.u.438424.0 b/user-baichuan2-13b-v2-3.6/runs/Mar07_07-52-18_u/events.out.tfevents.1709798258.u.438424.0
new file mode 100644
index 0000000000000000000000000000000000000000..d8ddcb8bbf23279f4010d56041950243bba316db
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar07_07-52-18_u/events.out.tfevents.1709798258.u.438424.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:140b88171455da8dd72598272e994d1bfc9b4ff0d77a0590eeed7ddd64faae25
+size 5161
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar07_08-49-42_u/events.out.tfevents.1709801673.u.450774.0 b/user-baichuan2-13b-v2-3.6/runs/Mar07_08-49-42_u/events.out.tfevents.1709801673.u.450774.0
new file mode 100644
index 0000000000000000000000000000000000000000..46bdedeadf9f9e267df7f601d9e4e1e7e6deff39
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar07_08-49-42_u/events.out.tfevents.1709801673.u.450774.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:37c8a311584758ec0be5b1241be5384239dd1210fa803c16e731ccb711cdd1e8
+size 5161
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar07_09-09-20_u/events.out.tfevents.1709802924.u.456437.0 b/user-baichuan2-13b-v2-3.6/runs/Mar07_09-09-20_u/events.out.tfevents.1709802924.u.456437.0
new file mode 100644
index 0000000000000000000000000000000000000000..511ce373f32b0c1a2c0c863f85c98a4760638ce3
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar07_09-09-20_u/events.out.tfevents.1709802924.u.456437.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c0eea59c5062e965d2507abf69d31d39764e08c84df42f2ee54ab7670e2331da
+size 5372
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar07_10-05-57_u/events.out.tfevents.1709806267.u.459853.0 b/user-baichuan2-13b-v2-3.6/runs/Mar07_10-05-57_u/events.out.tfevents.1709806267.u.459853.0
new file mode 100644
index 0000000000000000000000000000000000000000..dd7fd1a29f268f8e3c3997e87ed8ee98c75e48a3
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar07_10-05-57_u/events.out.tfevents.1709806267.u.459853.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f433667c7bebc237141213bfa01bc012df8d3305589f93f052d45ebc80c617f
+size 5161
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar07_10-36-26_u/events.out.tfevents.1709808103.u.463209.0 b/user-baichuan2-13b-v2-3.6/runs/Mar07_10-36-26_u/events.out.tfevents.1709808103.u.463209.0
new file mode 100644
index 0000000000000000000000000000000000000000..8eb5c2d566c588e5d7787f179fee31c5cdcbc7d1
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar07_10-36-26_u/events.out.tfevents.1709808103.u.463209.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0f4e93f8fc75f9da0b1ee4b9b18bbb9f13a9456072935f922f2ef83b4c10636f
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar07_10-46-02_u/events.out.tfevents.1709808674.u.463766.0 b/user-baichuan2-13b-v2-3.6/runs/Mar07_10-46-02_u/events.out.tfevents.1709808674.u.463766.0
new file mode 100644
index 0000000000000000000000000000000000000000..c2c00f14d35d2dfa1e4dc152fb1415d6bc354bf5
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar07_10-46-02_u/events.out.tfevents.1709808674.u.463766.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c490aebdfb1cdebde240692ba7c95e9c036f1b7b8b75f018a185e6a68135eef
+size 5162
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar07_10-55-29_u/events.out.tfevents.1709809260.u.465229.0 b/user-baichuan2-13b-v2-3.6/runs/Mar07_10-55-29_u/events.out.tfevents.1709809260.u.465229.0
new file mode 100644
index 0000000000000000000000000000000000000000..35d04ada0a6f696f3ddf5081d382c06bfbdaaf86
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar07_10-55-29_u/events.out.tfevents.1709809260.u.465229.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c7e300cd7b1e8c2ab44e28ec0c8e46d0bdab2ea6c88af7c9e39972c0d913ea8c
+size 5373
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar08_02-30-06_u/events.out.tfevents.1709865296.u.474365.0 b/user-baichuan2-13b-v2-3.6/runs/Mar08_02-30-06_u/events.out.tfevents.1709865296.u.474365.0
new file mode 100644
index 0000000000000000000000000000000000000000..e5453fddcaaff3271866d183dc00be29a20831fc
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar08_02-30-06_u/events.out.tfevents.1709865296.u.474365.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f24c66fe18474454e912e79aa0f0c13eedb3ddf999df9a385654cccdea842eae
+size 5369
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar08_07-54-06_u/events.out.tfevents.1709884742.u.490887.0 b/user-baichuan2-13b-v2-3.6/runs/Mar08_07-54-06_u/events.out.tfevents.1709884742.u.490887.0
new file mode 100644
index 0000000000000000000000000000000000000000..bedeb6d6dd2585f0420ea9d8fd8ef1ebd4db9363
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar08_07-54-06_u/events.out.tfevents.1709884742.u.490887.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:42986d0b68aeeb28b6f1b253cdf746811be009c4cb7865a9d7a12437a95f3fa4
+size 11020
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar09_11-53-09_u/events.out.tfevents.1709985476.u.545110.0 b/user-baichuan2-13b-v2-3.6/runs/Mar09_11-53-09_u/events.out.tfevents.1709985476.u.545110.0
new file mode 100644
index 0000000000000000000000000000000000000000..c9abbbb54d660927efe0c5cb917af7eea94910bb
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar09_11-53-09_u/events.out.tfevents.1709985476.u.545110.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d3b9974118151ae003ea50b82c069b04dddcb6c436169031aa9555332bed52a5
+size 11020
diff --git a/user-baichuan2-13b-v2-3.6/runs/Mar10_00-56-39_u/events.out.tfevents.1710032484.u.546891.0 b/user-baichuan2-13b-v2-3.6/runs/Mar10_00-56-39_u/events.out.tfevents.1710032484.u.546891.0
new file mode 100644
index 0000000000000000000000000000000000000000..0753af4ef6b32e15928306a78cf80cabdc5b30ba
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/runs/Mar10_00-56-39_u/events.out.tfevents.1710032484.u.546891.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6f3f4a3746e0083f9a8039d49a7cdef14f9851107aa3a0d1bf702d32dc35be4d
+size 15594
diff --git a/user-baichuan2-13b-v2-3.6/special_tokens_map.json b/user-baichuan2-13b-v2-3.6/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..4835406b0da9f2bed097895a32056ca040085b4f
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/special_tokens_map.json
@@ -0,0 +1,30 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true
+ }
+}
diff --git a/user-baichuan2-13b-v2-3.6/tokenization_baichuan.py b/user-baichuan2-13b-v2-3.6/tokenization_baichuan.py
new file mode 100644
index 0000000000000000000000000000000000000000..c529657fd26b20eaff4903faba8b56f2c39c55d1
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/tokenization_baichuan.py
@@ -0,0 +1,258 @@
+# Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
+from transformers.utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
+
+PRETRAINED_VOCAB_FILES_MAP = {
+ "vocab_file": {},
+ "tokenizer_file": {},
+}
+PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
+
+
+class BaichuanTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ pad_token=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ add_bos_token=True,
+ add_eos_token=False,
+ clean_up_tokenization_spaces=False,
+ **kwargs,
+ ):
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ bos_token = (
+ AddedToken(bos_token, lstrip=False, rstrip=False)
+ if isinstance(bos_token, str)
+ else bos_token
+ )
+ eos_token = (
+ AddedToken(eos_token, lstrip=False, rstrip=False)
+ if isinstance(eos_token, str)
+ else eos_token
+ )
+ unk_token = (
+ AddedToken(unk_token, lstrip=False, rstrip=False)
+ if isinstance(unk_token, str)
+ else unk_token
+ )
+ pad_token = (
+ AddedToken(pad_token, lstrip=False, rstrip=False)
+ if isinstance(pad_token, str)
+ else pad_token
+ )
+ self.vocab_file = vocab_file
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ sp_model_kwargs=self.sp_model_kwargs,
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
+ **kwargs,
+ )
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return self.sp_model.get_piece_size()
+
+ def get_vocab(self):
+ """Returns vocab as a dict"""
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text):
+ """Returns a tokenized string."""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for i, token in enumerate(tokens):
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special and i != 0:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string
+
+ def save_vocabulary(
+ self, save_directory, filename_prefix: Optional[str] = None
+ ) -> Tuple[str]:
+ """
+ Save the vocabulary and special tokens file to a directory.
+
+ Args:
+ save_directory (`str`):
+ The directory in which to save the vocabulary.
+
+ Returns:
+ `Tuple(str)`: Paths to the files saved.
+ """
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory,
+ (filename_prefix + "-" if filename_prefix else "")
+ + VOCAB_FILES_NAMES["vocab_file"],
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
+ out_vocab_file
+ ) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ def get_special_tokens_mask(
+ self,
+ token_ids_0: List[int],
+ token_ids_1: Optional[List[int]] = None,
+ already_has_special_tokens: bool = False,
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0,
+ token_ids_1=token_ids_1,
+ already_has_special_tokens=True,
+ )
+
+ bos_token_id = [1] if self.add_bos_token else []
+ eos_token_id = [1] if self.add_eos_token else []
+
+ if token_ids_1 is None:
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
+ return (
+ bos_token_id
+ + ([0] * len(token_ids_0))
+ + eos_token_id
+ + bos_token_id
+ + ([0] * len(token_ids_1))
+ + eos_token_id
+ )
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
+
+ if token_ids_1 is not None:
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
+
+ return output
diff --git a/user-baichuan2-13b-v2-3.6/tokenizer.model b/user-baichuan2-13b-v2-3.6/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..b3902c4521d7f34868ac76dd16150ff5ca41b000
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:79452955be6b419a65984273a9f08af86042e1c2a75ee3ba989cbf620a133cc2
+size 2001107
diff --git a/user-baichuan2-13b-v2-3.6/tokenizer_config.json b/user-baichuan2-13b-v2-3.6/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..46070f6c3ec83e940bb7cf87705b41b203509aa4
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/tokenizer_config.json
@@ -0,0 +1,44 @@
+{
+ "add_bos_token": false,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": true,
+ "rstrip": false,
+ "single_word": true,
+ "special": true
+ }
+ },
+ "auto_map": {
+ "AutoTokenizer": [
+ "tokenization_baichuan.BaichuanTokenizer",
+ null
+ ]
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "model_max_length": 4096,
+ "pad_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "BaichuanTokenizer",
+ "unk_token": ""
+}
diff --git a/user-baichuan2-13b-v2-3.6/train.log b/user-baichuan2-13b-v2-3.6/train.log
new file mode 100644
index 0000000000000000000000000000000000000000..b1d1612f52650b670055517760bb7f09b4f904f1
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/train.log
@@ -0,0 +1,10406 @@
+2024-03-06 12:17:28.140 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-17-28_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:17:28.150 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-17-28_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:17:28.150 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-17-28_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:17:28.199 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:17:28.199 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:17:28.199 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:25:27.169 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-25-27_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:25:27.170 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:25:27.173 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-25-27_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:25:27.174 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-25-27_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:25:27.181 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-25-27_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:25:27.183 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:25:27.183 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:25:27.183 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:32:56.084 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-32-56_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:32:56.086 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:32:56.087 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-32-56_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:32:56.088 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:32:56.096 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-32-56_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:32:56.096 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-32-56_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:32:56.098 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:32:56.098 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:45:07.048 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-45-07_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:45:07.050 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:45:07.056 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-45-07_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:45:07.056 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-45-07_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:45:07.056 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_12-45-07_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 12:45:07.057 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:45:07.057 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 12:45:07.058 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:10:28.241 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-10-28_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:10:28.242 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:10:28.243 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-10-28_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:10:28.245 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:10:28.246 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-10-28_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:10:28.247 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:10:28.248 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-10-28_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:10:28.249 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:12:34.796 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-12-34_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:12:34.798 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:12:34.809 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-12-34_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:12:34.809 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-12-34_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:12:34.811 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:12:34.811 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-12-34_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:12:34.811 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:12:34.813 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:17:44.571 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-17-44_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:17:44.573 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:17:44.580 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-17-44_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:17:44.580 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-17-44_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:17:44.581 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-17-44_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:17:44.581 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:17:44.582 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:17:44.583 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:19:29.951 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-19-29_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:19:29.953 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:19:29.960 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-19-29_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:19:29.961 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-19-29_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:19:29.962 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:19:29.963 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:19:29.965 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-19-29_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:19:29.967 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:19:30.387 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:19:30.388 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:19:30.388 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:19:30.389 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:19:30.389 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:19:30.389 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:19:30.391 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:19:30.391 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:19:30.391 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:19:30.392 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:19:30.393 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:19:30.393 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:23:03.589 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-23-03_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:23:03.589 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-23-03_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:23:03.589 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-23-03_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:23:03.600 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-23-03_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:23:03.641 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:23:03.641 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:23:03.641 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:23:03.641 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:23:04.430 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:23:04.430 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:23:04.431 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:23:04.437 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:23:04.437 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:23:04.437 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:23:04.438 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:23:04.438 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:23:04.439 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:23:04.443 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:23:04.443 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:23:04.443 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:31:12.733 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-31-12_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:31:12.733 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-31-12_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:31:12.733 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-31-12_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:31:12.743 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-31-12_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:31:12.768 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:31:12.768 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:31:12.768 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:31:12.768 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:31:13.284 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:31:13.284 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:31:13.284 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:31:13.284 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:31:13.285 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:31:13.285 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:31:13.288 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:31:13.288 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:31:13.288 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:31:13.294 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:31:13.294 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:31:13.294 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:50:28.919 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_13-50-28_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 13:50:28.944 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 13:50:29.442 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 13:50:29.442 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 13:50:29.442 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 13:52:59.674 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['up_proj', 'W_pack', 'o_proj', 'gate_proj', 'down_proj']
+2024-03-06 13:55:12.061 | INFO | __main__:load_model:283 - memory footprint of model: 11.499347686767578 GB
+2024-03-06 13:55:12.072 | INFO | __main__:load_model:295 - Total model params: 7815.26M
+2024-03-06 13:55:12.073 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 13:55:12.073 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 13:55:12.073 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 13:55:12.170 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 13:55:12.170 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 15:36:52.894 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_15-36-52_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 15:36:52.902 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 15:36:53.359 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 15:36:53.360 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 15:36:53.360 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 15:39:10.814 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['up_proj', 'o_proj', 'W_pack', 'gate_proj', 'down_proj']
+2024-03-06 15:41:23.526 | INFO | __main__:load_model:283 - memory footprint of model: 11.499347686767578 GB
+2024-03-06 15:41:23.538 | INFO | __main__:load_model:295 - Total model params: 7815.26M
+2024-03-06 15:41:23.538 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 15:41:23.538 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 15:41:23.538 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 15:41:23.635 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 15:41:23.635 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 16:02:53.895 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-02-53_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:02:53.910 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:02:54.355 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:02:54.355 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:02:54.356 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:05:08.407 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['up_proj', 'down_proj', 'gate_proj', 'W_pack', 'o_proj']
+2024-03-06 16:07:21.360 | INFO | __main__:load_model:283 - memory footprint of model: 11.499347686767578 GB
+2024-03-06 16:07:21.372 | INFO | __main__:load_model:295 - Total model params: 7815.26M
+2024-03-06 16:07:21.372 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 16:07:21.372 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 16:07:21.372 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 16:07:21.448 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 16:07:21.449 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 16:07:21.667 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 16:15:19.800 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-15-19_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:15:19.810 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:15:20.359 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:15:20.360 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:15:20.360 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:17:39.499 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['down_proj', 'gate_proj', 'W_pack', 'up_proj', 'o_proj']
+2024-03-06 16:19:51.334 | INFO | __main__:load_model:283 - memory footprint of model: 11.499347686767578 GB
+2024-03-06 16:19:51.345 | INFO | __main__:load_model:295 - Total model params: 7815.26M
+2024-03-06 16:19:51.345 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 16:19:51.345 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 16:19:51.345 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 16:19:51.520 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 16:19:51.521 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 16:19:51.693 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 16:27:57.389 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-27-57_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:27:57.395 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:27:57.855 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:27:57.855 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:27:57.855 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:30:23.733 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['W_pack', 'up_proj', 'o_proj', 'gate_proj', 'down_proj']
+2024-03-06 16:32:35.436 | INFO | __main__:load_model:283 - memory footprint of model: 11.499347686767578 GB
+2024-03-06 16:32:35.447 | INFO | __main__:load_model:295 - Total model params: 7815.26M
+2024-03-06 16:32:35.448 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 16:32:35.448 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 16:32:35.448 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 16:32:35.524 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 16:32:35.524 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 16:32:35.559 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 16:34:24.864 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-34-24_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:34:24.867 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-34-24_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:34:24.869 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-34-24_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:34:24.870 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-34-24_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:34:24.876 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:34:24.876 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:34:24.876 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:34:24.876 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:34:25.444 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:34:25.445 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:34:25.445 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:34:25.445 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:34:25.446 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:34:25.446 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:34:25.446 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:34:25.446 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:34:25.446 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:34:25.447 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:34:25.448 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:34:25.448 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:37:25.850 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-37-25_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:37:25.890 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:37:26.425 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:37:26.426 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:37:26.426 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:40:56.142 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['up_proj', 'gate_proj', 'o_proj', 'down_proj', 'W_pack']
+2024-03-06 16:43:05.675 | INFO | __main__:load_model:283 - memory footprint of model: 10.875873565673828 GB
+2024-03-06 16:43:05.686 | INFO | __main__:load_model:295 - Total model params: 7647.89M
+2024-03-06 16:43:05.687 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 16:43:05.687 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 16:43:05.687 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 16:43:05.879 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 16:43:05.879 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 16:43:05.938 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 16:46:23.963 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-46-23_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:46:23.981 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:46:24.473 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:46:24.473 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:46:24.473 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:49:53.376 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['up_proj', 'gate_proj', 'down_proj', 'W_pack', 'o_proj']
+2024-03-06 16:52:04.003 | INFO | __main__:load_model:283 - memory footprint of model: 10.771961212158203 GB
+2024-03-06 16:52:04.028 | INFO | __main__:load_model:295 - Total model params: 7620.00M
+2024-03-06 16:52:04.029 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 16:52:04.029 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 16:52:04.030 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 16:52:04.187 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 16:52:04.188 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 16:52:05.224 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 16:55:14.931 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_16-55-14_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 16:55:14.948 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 16:55:15.475 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 16:55:15.476 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 16:55:15.476 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 16:57:51.689 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['o_proj', 'gate_proj', 'W_pack', 'down_proj', 'up_proj']
+2024-03-06 17:00:00.799 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-06 17:00:00.848 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-06 17:00:00.848 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 17:00:00.848 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 17:00:00.849 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 17:00:01.112 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 17:00:01.112 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 17:00:02.163 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 17:03:22.743 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_17-03-22_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 17:03:22.757 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 17:03:23.280 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 17:03:23.281 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 17:03:23.281 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 17:05:59.052 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['gate_proj', 'down_proj', 'W_pack', 'o_proj', 'up_proj']
+2024-03-06 17:08:08.852 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-06 17:08:08.864 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-06 17:08:08.864 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 17:08:08.864 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 17:08:08.864 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 17:08:08.983 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 17:08:08.983 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 17:08:09.858 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 17:13:29.709 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_17-13-29_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 17:13:29.754 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 17:13:30.211 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 17:13:30.212 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 17:13:30.212 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 17:16:25.025 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['W_pack', 'up_proj', 'gate_proj', 'o_proj', 'down_proj']
+2024-03-06 17:18:34.386 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-06 17:18:34.412 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-06 17:18:34.413 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 17:18:34.413 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 17:18:34.413 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 17:18:34.583 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 17:18:34.584 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 17:18:35.600 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 17:30:51.904 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_17-30-51_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 17:30:51.906 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 17:30:52.389 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 17:30:52.390 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 17:30:52.390 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 17:33:41.988 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['gate_proj', 'W_pack', 'down_proj', 'o_proj', 'up_proj']
+2024-03-06 17:35:50.901 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-06 17:35:50.928 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-06 17:35:50.928 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 17:35:50.928 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 17:35:50.929 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 17:35:51.127 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 17:35:51.128 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 17:35:51.999 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 17:41:19.871 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_17-41-19_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 17:42:56.902 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_17-42-56_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 17:42:56.954 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 17:42:57.467 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 17:42:57.467 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 17:42:57.468 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 17:46:11.699 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['down_proj', 'W_pack', 'gate_proj', 'up_proj', 'o_proj']
+2024-03-06 17:48:21.583 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-06 17:48:21.611 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-06 17:48:21.612 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 17:48:21.612 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 17:48:21.612 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 17:48:21.844 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 17:48:21.845 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 17:48:22.555 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 17:52:18.060 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_17-52-18_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 17:52:18.061 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 17:52:18.503 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 17:52:18.503 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 17:52:18.504 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 17:54:55.504 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['down_proj', 'up_proj', 'gate_proj', 'o_proj', 'W_pack']
+2024-03-06 17:57:03.863 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-06 17:57:03.875 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-06 17:57:03.875 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 17:57:03.875 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 17:57:03.876 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 17:57:03.989 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 17:57:03.989 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 17:57:04.145 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 18:08:25.434 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_18-08-25_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 18:08:25.435 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 18:08:25.871 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 18:08:25.871 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 18:08:25.871 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 18:11:10.721 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['o_proj', 'down_proj', 'up_proj', 'W_pack', 'gate_proj']
+2024-03-06 18:13:19.934 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-06 18:13:19.963 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-06 18:13:19.964 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-06 18:13:19.964 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-06 18:13:19.964 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-06 18:13:20.065 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-06 18:13:20.066 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-06 18:13:20.816 | INFO | __main__:main:387 - *** starting training ***
+2024-03-06 18:21:14.358 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_18-21-14_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 18:21:14.361 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_18-21-14_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 18:21:14.361 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_18-21-14_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 18:21:14.364 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar06_18-21-14_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-06 18:21:14.393 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 18:21:14.393 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 18:21:14.393 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 18:21:14.393 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-06 18:21:14.919 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 18:21:14.921 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 18:21:14.921 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 18:21:14.921 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 18:21:14.921 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 18:21:14.921 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 18:21:14.922 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 18:21:14.922 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 18:21:14.923 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-06 18:21:14.926 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-06 18:21:14.926 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-06 18:21:14.927 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 02:58:39.260 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_02-58-39_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 02:58:39.265 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=2,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_02-58-39_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 02:58:39.273 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=3,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_02-58-39_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 02:58:39.285 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=1,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=True,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=False,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=1,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_02-58-39_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 02:58:39.309 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-07 02:58:39.309 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-07 02:58:39.309 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-07 02:58:39.309 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-07 02:58:39.812 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 02:58:39.813 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 02:58:39.814 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 02:58:39.818 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 02:58:39.819 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 02:58:39.819 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 02:58:39.820 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 02:58:39.820 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 02:58:39.821 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 02:58:39.821 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 02:58:39.821 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 02:58:39.821 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 03:31:34.586 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_03-31-34_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 03:31:34.637 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-07 03:31:35.144 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 03:31:35.144 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 03:31:35.144 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 03:34:13.120 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['o_proj', 'down_proj', 'gate_proj', 'up_proj', 'W_pack']
+2024-03-07 03:36:24.122 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-07 03:36:24.143 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-07 03:36:24.143 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-07 03:36:24.143 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-07 03:36:24.144 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-07 03:36:24.250 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-07 03:36:24.250 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-07 03:36:24.475 | INFO | __main__:main:387 - *** starting training ***
+2024-03-07 03:49:43.178 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=1,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_03-49-43_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 03:49:43.225 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-07 03:49:43.702 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 03:49:43.702 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 03:49:43.702 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 03:53:15.915 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['W_pack', 'o_proj', 'up_proj', 'down_proj', 'gate_proj']
+2024-03-07 03:55:27.542 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-07 03:55:27.605 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-07 03:55:27.606 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-07 03:55:27.606 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-07 03:55:27.606 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-07 03:55:27.831 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-07 03:55:27.831 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-07 03:55:28.564 | INFO | __main__:main:387 - *** starting training ***
+2024-03-07 07:52:18.910 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=1,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_07-52-18_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 07:52:19.058 | INFO | __main__:init_components:333 - Initializing components...
+2024-03-07 07:52:19.748 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 07:52:19.748 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 07:52:19.748 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 07:55:27.178 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['down_proj', 'W_pack', 'o_proj', 'up_proj', 'gate_proj']
+2024-03-07 07:57:37.681 | INFO | __main__:load_model:283 - memory footprint of model: 10.72000503540039 GB
+2024-03-07 07:57:37.694 | INFO | __main__:load_model:295 - Total model params: 7606.05M
+2024-03-07 07:57:37.694 | INFO | __main__:init_components:349 - Train model with sft task
+2024-03-07 07:57:37.694 | INFO | __main__:load_sft_dataset:315 - Loading data with UnifiedSFTDataset
+2024-03-07 07:57:37.694 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-07 07:57:37.909 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-07 07:57:37.909 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-07 07:57:38.206 | INFO | __main__:main:387 - *** starting training ***
+2024-03-07 08:49:42.226 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=1,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_08-49-42_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 08:49:42.257 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-07 08:49:42.738 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 08:49:42.738 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 08:49:42.738 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 08:52:19.758 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['o_proj', 'up_proj', 'down_proj', 'W_pack', 'gate_proj']
+2024-03-07 08:54:32.900 | INFO | __main__:load_model:284 - memory footprint of model: 10.875873565673828 GB
+2024-03-07 08:54:32.913 | INFO | __main__:load_model:296 - Total model params: 7647.89M
+2024-03-07 08:54:32.913 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-07 08:54:32.913 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-07 08:54:32.913 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-07 08:54:33.037 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-07 08:54:33.037 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-07 08:54:33.340 | INFO | __main__:main:388 - *** starting training ***
+2024-03-07 09:05:50.446 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=1,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_09-05-50_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 09:05:50.465 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-07 09:05:50.907 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 09:05:50.907 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 09:05:50.907 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 09:09:20.856 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=1,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_09-09-20_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 09:09:20.870 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-07 09:09:21.431 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 09:09:21.431 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 09:09:21.432 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 09:13:11.015 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['o_proj', 'down_proj', 'W_pack', 'up_proj', 'gate_proj']
+2024-03-07 09:15:23.202 | INFO | __main__:load_model:284 - memory footprint of model: 11.083698272705078 GB
+2024-03-07 09:15:23.213 | INFO | __main__:load_model:296 - Total model params: 7703.68M
+2024-03-07 09:15:23.214 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-07 09:15:23.214 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-07 09:15:23.214 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-07 09:15:23.367 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-07 09:15:23.367 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-07 09:15:23.679 | INFO | __main__:main:388 - *** starting training ***
+2024-03-07 10:05:57.292 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=1,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_10-05-57_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 10:05:57.302 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-07 10:05:57.768 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 10:05:57.769 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 10:05:57.770 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 10:08:54.041 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['down_proj', 'o_proj', 'W_pack', 'gate_proj', 'up_proj']
+2024-03-07 10:11:05.358 | INFO | __main__:load_model:284 - memory footprint of model: 11.083698272705078 GB
+2024-03-07 10:11:05.385 | INFO | __main__:load_model:296 - Total model params: 7703.68M
+2024-03-07 10:11:05.390 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-07 10:11:05.391 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-07 10:11:05.391 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-07 10:11:05.544 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-07 10:11:05.545 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-07 10:11:06.795 | INFO | __main__:main:388 - *** starting training ***
+2024-03-07 10:34:30.258 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=1,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_10-34-30_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 10:34:30.290 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-07 10:34:30.732 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 10:34:30.732 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 10:34:30.733 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 10:35:46.474 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=1,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_10-35-46_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 10:35:46.476 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-07 10:35:46.857 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 10:35:46.857 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 10:35:46.857 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 10:36:26.464 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_10-36-26_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 10:36:26.465 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-07 10:36:26.844 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 10:36:26.844 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 10:36:26.845 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 10:39:05.123 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['up_proj', 'W_pack', 'gate_proj', 'o_proj', 'down_proj']
+2024-03-07 10:41:40.778 | INFO | __main__:load_model:284 - memory footprint of model: 11.083698272705078 GB
+2024-03-07 10:41:40.807 | INFO | __main__:load_model:296 - Total model params: 7703.68M
+2024-03-07 10:41:40.807 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-07 10:41:40.808 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-07 10:41:40.808 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-07 10:41:41.037 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-07 10:41:41.037 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-07 10:41:42.979 | INFO | __main__:main:388 - *** starting training ***
+2024-03-07 10:46:02.696 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_10-46-02_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=2,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 10:46:02.706 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-07 10:46:03.241 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 10:46:03.243 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 10:46:03.244 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 10:48:59.776 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['o_proj', 'W_pack', 'up_proj', 'gate_proj', 'down_proj']
+2024-03-07 10:51:13.440 | INFO | __main__:load_model:284 - memory footprint of model: 11.083698272705078 GB
+2024-03-07 10:51:13.467 | INFO | __main__:load_model:296 - Total model params: 7703.68M
+2024-03-07 10:51:13.467 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-07 10:51:13.468 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-07 10:51:13.468 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-07 10:51:13.576 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-07 10:51:13.577 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-07 10:51:14.237 | INFO | __main__:main:388 - *** starting training ***
+2024-03-07 10:55:29.646 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar07_10-55-29_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=200,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=500,
+save_strategy=steps,
+save_total_limit=1,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-07 10:55:29.660 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-07 10:55:30.232 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-07 10:55:30.233 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-07 10:55:30.233 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-07 10:58:47.891 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['W_pack', 'down_proj', 'up_proj', 'o_proj', 'gate_proj']
+2024-03-07 11:00:58.999 | INFO | __main__:load_model:284 - memory footprint of model: 11.083698272705078 GB
+2024-03-07 11:00:59.018 | INFO | __main__:load_model:296 - Total model params: 7703.68M
+2024-03-07 11:00:59.019 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-07 11:00:59.019 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-07 11:00:59.019 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-07 11:00:59.169 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-07 11:00:59.169 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-07 11:01:00.013 | INFO | __main__:main:388 - *** starting training ***
+2024-03-08 02:30:06.655 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar08_02-30-06_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=100,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=3,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=100,
+weight_decay=0,
+)
+2024-03-08 02:30:06.661 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-08 02:30:07.096 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-08 02:30:07.097 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-08 02:30:07.097 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-08 02:32:45.873 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['gate_proj', 'up_proj', 'W_pack', 'down_proj', 'o_proj']
+2024-03-08 02:34:55.868 | INFO | __main__:load_model:284 - memory footprint of model: 11.083698272705078 GB
+2024-03-08 02:34:55.879 | INFO | __main__:load_model:296 - Total model params: 7703.68M
+2024-03-08 02:34:55.880 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-08 02:34:55.880 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-08 02:34:55.881 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-08 02:34:56.008 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-08 02:34:56.008 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-08 02:34:56.038 | INFO | __main__:main:388 - *** starting training ***
+2024-03-08 07:54:06.009 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar08_07-54-06_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=10,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=3,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=50,
+weight_decay=0,
+)
+2024-03-08 07:54:06.036 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-08 07:54:06.447 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-08 07:54:06.448 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-08 07:54:06.448 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-08 07:56:49.939 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['o_proj', 'down_proj', 'up_proj', 'gate_proj', 'W_pack']
+2024-03-08 07:59:01.455 | INFO | __main__:load_model:284 - memory footprint of model: 11.083698272705078 GB
+2024-03-08 07:59:01.470 | INFO | __main__:load_model:296 - Total model params: 7703.68M
+2024-03-08 07:59:01.470 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-08 07:59:01.470 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-08 07:59:01.470 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-08 07:59:01.614 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-08 07:59:01.615 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-08 07:59:02.224 | INFO | __main__:main:388 - *** starting training ***
+2024-03-09 11:53:09.770 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar09_11-53-09_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=10,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=3,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=50,
+weight_decay=0,
+)
+2024-03-09 11:53:09.801 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-09 11:53:10.289 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-09 11:53:10.290 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-09 11:53:10.290 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-09 11:55:46.092 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['o_proj', 'down_proj', 'W_pack', 'gate_proj', 'up_proj']
+2024-03-09 11:57:56.297 | INFO | __main__:load_model:284 - memory footprint of model: 11.083698272705078 GB
+2024-03-09 11:57:56.308 | INFO | __main__:load_model:296 - Total model params: 7703.68M
+2024-03-09 11:57:56.309 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-09 11:57:56.309 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-09 11:57:56.309 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-09 11:57:56.424 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-09 11:57:56.425 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-09 11:57:56.469 | INFO | __main__:main:388 - *** starting training ***
+2024-03-10 00:56:39.151 | INFO | __main__:setup_everything:52 - train_args:TrainingArguments(
+_n_gpu=4,
+accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True},
+adafactor=False,
+adam_beta1=0.9,
+adam_beta2=0.999,
+adam_epsilon=1e-08,
+auto_find_batch_size=False,
+bf16=False,
+bf16_full_eval=False,
+data_seed=None,
+dataloader_drop_last=False,
+dataloader_num_workers=0,
+dataloader_persistent_workers=False,
+dataloader_pin_memory=True,
+dataloader_prefetch_factor=None,
+ddp_backend=None,
+ddp_broadcast_buffers=None,
+ddp_bucket_cap_mb=None,
+ddp_find_unused_parameters=None,
+ddp_timeout=1800,
+debug=[],
+deepspeed=None,
+disable_tqdm=False,
+dispatch_batches=None,
+do_eval=False,
+do_predict=False,
+do_train=False,
+eval_accumulation_steps=None,
+eval_delay=0,
+eval_steps=None,
+evaluation_strategy=no,
+fp16=True,
+fp16_backend=auto,
+fp16_full_eval=False,
+fp16_opt_level=O1,
+fsdp=[],
+fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
+fsdp_min_num_params=0,
+fsdp_transformer_layer_cls_to_wrap=None,
+full_determinism=False,
+gradient_accumulation_steps=16,
+gradient_checkpointing=True,
+gradient_checkpointing_kwargs=None,
+greater_is_better=None,
+group_by_length=False,
+half_precision_backend=auto,
+hub_always_push=False,
+hub_model_id=None,
+hub_private_repo=False,
+hub_strategy=every_save,
+hub_token=,
+ignore_data_skip=False,
+include_inputs_for_metrics=False,
+include_num_input_tokens_seen=False,
+include_tokens_per_second=False,
+jit_mode_eval=False,
+label_names=None,
+label_smoothing_factor=0.0,
+learning_rate=0.0001,
+length_column_name=length,
+load_best_model_at_end=False,
+local_rank=0,
+log_level=passive,
+log_level_replica=warning,
+log_on_each_node=True,
+logging_dir=output/user-baichuan2-13b-v2-3.6/runs/Mar10_00-56-39_u,
+logging_first_step=False,
+logging_nan_inf_filter=True,
+logging_steps=10,
+logging_strategy=steps,
+lr_scheduler_kwargs={},
+lr_scheduler_type=constant_with_warmup,
+max_grad_norm=0.3,
+max_steps=-1,
+metric_for_best_model=None,
+mp_parameters=,
+neftune_noise_alpha=None,
+no_cuda=False,
+num_train_epochs=1,
+optim=paged_adamw_32bit,
+optim_args=None,
+output_dir=output/user-baichuan2-13b-v2-3.6,
+overwrite_output_dir=False,
+past_index=-1,
+per_device_eval_batch_size=8,
+per_device_train_batch_size=1,
+prediction_loss_only=False,
+push_to_hub=False,
+push_to_hub_model_id=None,
+push_to_hub_organization=None,
+push_to_hub_token=,
+ray_scope=last,
+remove_unused_columns=False,
+report_to=['tensorboard'],
+resume_from_checkpoint=None,
+run_name=output/user-baichuan2-13b-v2-3.6,
+save_on_each_node=False,
+save_only_model=False,
+save_safetensors=True,
+save_steps=100,
+save_strategy=steps,
+save_total_limit=3,
+seed=42,
+skip_memory_metrics=True,
+split_batches=None,
+tf32=None,
+torch_compile=False,
+torch_compile_backend=None,
+torch_compile_mode=None,
+torchdynamo=None,
+tpu_metrics_debug=False,
+tpu_num_cores=None,
+use_cpu=False,
+use_ipex=False,
+use_legacy_prediction_loop=False,
+use_mps_device=False,
+warmup_ratio=0.0,
+warmup_steps=50,
+weight_decay=0,
+)
+2024-03-10 00:56:39.157 | INFO | __main__:init_components:334 - Initializing components...
+2024-03-10 00:56:39.568 | INFO | __main__:load_tokenizer:211 - vocab_size of tokenizer: 125696
+2024-03-10 00:56:39.568 | INFO | __main__:load_model:220 - Loading model from base model: /home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat
+2024-03-10 00:56:39.569 | INFO | __main__:load_model:221 - Train model with qlora
+2024-03-10 00:59:13.672 | INFO | __main__:find_all_linear_names:85 - LoRA target module names: ['o_proj', 'W_pack', 'down_proj', 'up_proj', 'gate_proj']
+2024-03-10 01:01:23.156 | INFO | __main__:load_model:284 - memory footprint of model: 10.875873565673828 GB
+2024-03-10 01:01:23.167 | INFO | __main__:load_model:296 - Total model params: 7647.89M
+2024-03-10 01:01:23.168 | INFO | __main__:init_components:350 - Train model with sft task
+2024-03-10 01:01:23.168 | INFO | __main__:load_sft_dataset:316 - Loading data with UnifiedSFTDataset
+2024-03-10 01:01:23.168 | INFO | component.dataset:__init__:19 - Loading data: ./data/train.jsonl
+2024-03-10 01:01:23.306 | INFO | component.dataset:__init__:22 - Use template "baichuan2" for training
+2024-03-10 01:01:23.307 | INFO | component.dataset:__init__:23 - There are 7720 data in dataset
+2024-03-10 01:01:23.869 | INFO | __main__:main:388 - *** starting training ***
diff --git a/user-baichuan2-13b-v2-3.6/train_args.json b/user-baichuan2-13b-v2-3.6/train_args.json
new file mode 100644
index 0000000000000000000000000000000000000000..884833c985247750f8ba279be0b62a4bab035c33
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/train_args.json
@@ -0,0 +1,31 @@
+{
+ "output_dir": "output/user-baichuan2-13b-v2-3.6",
+ "model_name_or_path": "/home/jiakangxiang/.cache/modelscope/hub/baichuan-inc/Baichuan2-13B-Chat",
+ "train_file": "./data/train.jsonl",
+ "template_name": "baichuan2",
+ "num_train_epochs": 1,
+ "per_device_train_batch_size": 1,
+ "gradient_accumulation_steps": 16,
+ "learning_rate": 0.0001,
+ "max_seq_length": 3200,
+ "logging_steps": 10,
+ "save_steps": 100,
+ "save_total_limit": 3,
+ "lr_scheduler_type": "constant_with_warmup",
+ "warmup_steps": 50,
+ "lora_rank": 16,
+ "lora_alpha": 16,
+ "lora_dropout": 0.05,
+ "gradient_checkpointing": true,
+ "disable_tqdm": false,
+ "optim": "paged_adamw_32bit",
+ "seed": 42,
+ "fp16": true,
+ "bf16": false,
+ "report_to": "tensorboard",
+ "dataloader_num_workers": 0,
+ "save_strategy": "steps",
+ "weight_decay": 0,
+ "max_grad_norm": 0.3,
+ "remove_unused_columns": false
+}
\ No newline at end of file
diff --git a/user-baichuan2-13b-v2-3.6/train_results.json b/user-baichuan2-13b-v2-3.6/train_results.json
new file mode 100644
index 0000000000000000000000000000000000000000..66781f3367a3cc573706a06639c9eb0c9e2d9855
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/train_results.json
@@ -0,0 +1,7 @@
+{
+ "epoch": 1.0,
+ "train_loss": 0.5017403132133569,
+ "train_runtime": 75900.5046,
+ "train_samples_per_second": 0.102,
+ "train_steps_per_second": 0.006
+}
\ No newline at end of file
diff --git a/user-baichuan2-13b-v2-3.6/trainer_state.json b/user-baichuan2-13b-v2-3.6/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..8137c4c26067a18eb27de77d760fca263c72e629
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/trainer_state.json
@@ -0,0 +1,366 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.9989637305699481,
+ "eval_steps": 500,
+ "global_step": 482,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.02,
+ "grad_norm": 4.99941873550415,
+ "learning_rate": 2e-05,
+ "loss": 9.9329,
+ "step": 10
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 1.741065502166748,
+ "learning_rate": 4e-05,
+ "loss": 11.0746,
+ "step": 20
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 1.4727320671081543,
+ "learning_rate": 6e-05,
+ "loss": 2.7159,
+ "step": 30
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.1335960477590561,
+ "learning_rate": 8e-05,
+ "loss": 0.3969,
+ "step": 40
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.0014472692273557186,
+ "learning_rate": 0.0001,
+ "loss": 0.0032,
+ "step": 50
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.0010780546581372619,
+ "learning_rate": 0.0001,
+ "loss": 0.0002,
+ "step": 60
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 1.03132963180542,
+ "learning_rate": 0.0001,
+ "loss": 0.0002,
+ "step": 70
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.008827299810945988,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 80
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.0002956670359708369,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 90
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.0003419867134653032,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 100
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.0003681881644297391,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 110
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.0002884200366679579,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 120
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.00011985149467363954,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 130
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.0003195986500941217,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 140
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.00010149635636480525,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 150
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.00010508792183827609,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 160
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.00011793687008321285,
+ "learning_rate": 0.0001,
+ "loss": 0.006,
+ "step": 170
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 8.076676749624312e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 180
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.0007808339432813227,
+ "learning_rate": 0.0001,
+ "loss": 0.006,
+ "step": 190
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.11711683869361877,
+ "learning_rate": 0.0001,
+ "loss": 0.003,
+ "step": 200
+ },
+ {
+ "epoch": 0.44,
+ "grad_norm": 0.0002039404644165188,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 210
+ },
+ {
+ "epoch": 0.46,
+ "grad_norm": 0.00873592495918274,
+ "learning_rate": 0.0001,
+ "loss": 0.0209,
+ "step": 220
+ },
+ {
+ "epoch": 0.48,
+ "grad_norm": 3.0506539344787598,
+ "learning_rate": 0.0001,
+ "loss": 0.0201,
+ "step": 230
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.05903371796011925,
+ "learning_rate": 0.0001,
+ "loss": 0.0026,
+ "step": 240
+ },
+ {
+ "epoch": 0.52,
+ "grad_norm": 0.0002484666183590889,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 250
+ },
+ {
+ "epoch": 0.54,
+ "grad_norm": 0.0003493047261144966,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 260
+ },
+ {
+ "epoch": 0.56,
+ "grad_norm": 0.0008058947860263288,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 270
+ },
+ {
+ "epoch": 0.58,
+ "grad_norm": 0.0004198936221655458,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 280
+ },
+ {
+ "epoch": 0.6,
+ "grad_norm": 0.0002983050071634352,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 290
+ },
+ {
+ "epoch": 0.62,
+ "grad_norm": 0.0002279053587699309,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 300
+ },
+ {
+ "epoch": 0.64,
+ "grad_norm": 0.00015332824841607362,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 310
+ },
+ {
+ "epoch": 0.66,
+ "grad_norm": 0.00011723622446879745,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 320
+ },
+ {
+ "epoch": 0.68,
+ "grad_norm": 0.0001235378731507808,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 330
+ },
+ {
+ "epoch": 0.7,
+ "grad_norm": 0.00010625163122313097,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 340
+ },
+ {
+ "epoch": 0.73,
+ "grad_norm": 7.50239341869019e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 350
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.00010148331784876063,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 360
+ },
+ {
+ "epoch": 0.77,
+ "grad_norm": 7.368126534856856e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 370
+ },
+ {
+ "epoch": 0.79,
+ "grad_norm": 0.00012744461128022522,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 380
+ },
+ {
+ "epoch": 0.81,
+ "grad_norm": 5.87971335335169e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 390
+ },
+ {
+ "epoch": 0.83,
+ "grad_norm": 6.405858584912494e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 400
+ },
+ {
+ "epoch": 0.85,
+ "grad_norm": 9.752299229148775e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 410
+ },
+ {
+ "epoch": 0.87,
+ "grad_norm": 4.5175151171861216e-05,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 420
+ },
+ {
+ "epoch": 0.89,
+ "grad_norm": 0.00013234459038358182,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 430
+ },
+ {
+ "epoch": 0.91,
+ "grad_norm": 0.00013048920664004982,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 440
+ },
+ {
+ "epoch": 0.93,
+ "grad_norm": 0.0004233328509144485,
+ "learning_rate": 0.0001,
+ "loss": 0.0001,
+ "step": 450
+ },
+ {
+ "epoch": 0.95,
+ "grad_norm": 0.00019652876653708518,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 460
+ },
+ {
+ "epoch": 0.97,
+ "grad_norm": 0.000157060450874269,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 470
+ },
+ {
+ "epoch": 0.99,
+ "grad_norm": 0.00014773521979805082,
+ "learning_rate": 0.0001,
+ "loss": 0.0,
+ "step": 480
+ },
+ {
+ "epoch": 1.0,
+ "step": 482,
+ "total_flos": 7.979272829443277e+17,
+ "train_loss": 0.5017403132133569,
+ "train_runtime": 75900.5046,
+ "train_samples_per_second": 0.102,
+ "train_steps_per_second": 0.006
+ }
+ ],
+ "logging_steps": 10,
+ "max_steps": 482,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 1,
+ "save_steps": 100,
+ "total_flos": 7.979272829443277e+17,
+ "train_batch_size": 1,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/user-baichuan2-13b-v2-3.6/training_args.bin b/user-baichuan2-13b-v2-3.6/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..969e0e491f106769e00accc686da59e7e3816367
--- /dev/null
+++ b/user-baichuan2-13b-v2-3.6/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1abab34fc571ab2be46c8abdf765b96b9a09ab4144528e95270f1af465c0f19c
+size 4475