File size: 3,238 Bytes
d1dd6f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
base_model: allura-org/TQ2.5-14B-Sugarquill-v1
strict: false
plugins:
- axolotl.integrations.liger.LigerPlugin
liger_rope: true
liger_rms_norm: true
liger_swiglu: true
liger_fused_linear_cross_entropy: true
# Output and HuggingFace
hub_model_id: NewEden/control-14b-lora
hf_use_auth_token: true
hub_strategy: "all_checkpoints"
wandb_project: huggingface
wandb_entity:
wandb_name: Control-14B
chat_template: chatml
group_by_length: false
datasets:
- path: Nitral-AI/Creative_Writing-ShareGPT
type: chat_template
roles_to_train: ["gpt"]
field_messages: conversations
message_field_role: from
message_field_content: value
train_on_eos: turn
- path: Nitral-AI/ARES-ShareGPT
type: chat_template
chat_template: chatml
roles_to_train: ["gpt"]
field_messages: conversations
message_field_role: from
message_field_content: value
train_on_eos: turn
- path: NewEden/Claude-Instruct-5K
type: chat_template
chat_template: chatml
roles_to_train: ["gpt"]
field_messages: conversations
message_field_role: from
message_field_content: value
train_on_eos: turn
- path: NewEden/OpenCAI-ShareGPT
type: chat_template
roles_to_train: ["gpt"]
field_messages: conversations
message_field_role: from
message_field_content: value
train_on_eos: turn
- path: NewEden/PIPPA-Mega-Filtered
type: chat_template
chat_template: chatml
roles_to_train: ["gpt"]
field_messages: conversations
message_field_role: from
message_field_content: value
train_on_eos: turn
- path: NewEden/Roleplay-Logs-Sharegpt-Ngram-cleaned
type: chat_template
chat_template: chatml
roles_to_train: ["gpt"]
field_messages: conversations
message_field_role: from
message_field_content: value
train_on_eos: turn
- path: Nitral-AI/Creative_Writing-ShareGPT
type: chat_template
chat_template: chatml
roles_to_train: ["gpt"]
field_messages: conversations
message_field_role: from
message_field_content: value
train_on_eos: turn
#val_set_size: 0.01
#evals_per_epoch: 1
# eval_table_size:
# eval_max_new_tokens: 128
num_epochs: 2
sequence_len: 8192
save_safetensors: true
saves_per_epoch: 2
logging_steps: 1
special_tokens:
# Quantization
bf16: auto
fp16:
tf32: false
## For LoRA
load_in_8bit: false
load_in_4bit: True
# LoRA
peft_use_rslora: true
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.05
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
## if oom
# lora_r: 64
# lora_alpha: 32
# lora_dropout: 0.1
weight_decay: 0.02
max_grad_norm: 1.0
warmup_ratio: 0.05
learning_rate: 0.00002
lr_scheduler: cosine
#lr_scheduler_kwargs:
optimizer: paged_adamw_8bit # usually adamw_torch or paged_adamw_8bit
## Batch Size
gradient_accumulation_steps: 8
micro_batch_size: 1
eval_batch_size: 1
# Optimizations
pad_to_sequence_len: true
sample_packing: true
eval_sample_packing: false
flash_attention: true
xformers_attention:
gradient_checkpointing: "unsloth"
gradient_checkpointing_kwargs:
use_reentrant: true
local_rank:
early_stopping_patience:
debug:
special_tokens:
pad_token: <|endoftext|>
eos_token: <|im_end|> |