File size: 2,149 Bytes
43264b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83ce644
43264b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
base_model: Delta-Vector/Holland-4B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer

trust_remote_code: true

load_in_8bit: false
load_in_4bit: false
strict: false

datasets:
  - path: NewEden/CivitAI-SD-Prompts
#    type:
#      system_prompt: ""
#      system_format: "<|im_start|>system\n{system}<|im_end|>\n"
#      field_system: instruction
#      field_instruction: input
#      field_input: ""
#      field_output: output
#      no_input_format: "<|im_start|>user\n{instruction}<|im_end|>\n<|im_start|>assistant\n"

#      system_prompt: ""
#      field_instruction: instruction
#      field_input: input
#      field_output: output
#      format: |-
#        <|im_start|>system
#        {instruction}<|im_end|>
#        <|im_start|>user
#        {input}<|im_end|>
#        <|im_start|>assistant
#        {output}

    type: alpaca
    conversation: mpt-30b-instruct
#    field_system: instruction
#    field_instruction: input
#    field_input: input
#    field_output: output
chat_template: alpaca

dataset_prepared_path:
val_set_size: 0.02
output_dir: ./outputs/out2
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

plugins:
  - axolotl.integrations.liger.LigerPlugin
liger_rope: true
liger_rms_norm: true
liger_swiglu: true
liger_fused_linear_cross_entropy: true

adapter:
lora_model_dir:
lora_r:
lora_alpha:
lora_dropout:
lora_target_linear: true
lora_fan_in_fan_out:

wandb_project: SDprompterV2
wandb_entity:
wandb_watch:
wandb_name: SDprompterV2
wandb_log_model:

gradient_accumulation_steps: 32
micro_batch_size: 1
num_epochs: 2
optimizer: adamw_torch
lr_scheduler: cosine
learning_rate: 0.00002

train_on_inputs: false
group_by_length: false
bf16: auto
fp16:
tf32: true

gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true

warmup_ratio: 0.05
evals_per_epoch: 4
saves_per_epoch: 1
debug:
weight_decay: 0.05
deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json
special_tokens:
  pad_token: <|finetune_right_pad_id|>