Delta-Vector commited on
Commit
9bc5ea7
·
verified ·
1 Parent(s): 8d7283e

Upload 4Bcook.yml

Browse files
Files changed (1) hide show
  1. 4Bcook.yml +111 -0
4Bcook.yml ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: IntervitensInc/Llama-3.1-Minitron-4B-Width-Base-chatml
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ plugins:
6
+ - axolotl.integrations.liger.LigerPlugin
7
+ liger_rope: true
8
+ liger_rms_norm: true
9
+ liger_swiglu: true
10
+ liger_fused_linear_cross_entropy: true
11
+
12
+ load_in_8bit: false
13
+ load_in_4bit: false
14
+ strict: false
15
+
16
+ datasets:
17
+ # - path: anthracite-core/c2_logs_32k_mistral-v3_v1.2
18
+ # type: sharegpt
19
+ # conversation: chatml
20
+ - path: ./datasets/c2_deduped_32k_mistral-v3_tok_deanon_dsclean_1.2.jsonl
21
+ type: sharegpt
22
+ conversation: chatml
23
+ # - path: anthracite-org/kalo-opus-instruct-22k-no-refusal
24
+ # type: sharegpt
25
+ # conversation: chatml
26
+ - path: ./datasets/opus-instruct-22k-no_refusals.jsonl
27
+ type: sharegpt
28
+ conversation: chatml
29
+ # - path: lodrick-the-lafted/kalo-opus-instruct-3k-filtered
30
+ # type: sharegpt
31
+ # conversation: chatml
32
+ - path: ./datasets/kalo-3k-filtered.jsonl
33
+ type: sharegpt
34
+ conversation: chatml
35
+ # - path: anthracite-org/nopm_claude_writing_fixed
36
+ # type: sharegpt
37
+ # conversation: chatml
38
+ - path: ./datasets/claudewritingNopm.jsonl
39
+ type: sharegpt
40
+ conversation: chatml
41
+ # - path: anthracite-org/kalo_opus_misc_240827
42
+ # type: sharegpt
43
+ # conversation: chatml
44
+ - path: ./datasets/kalo_opus_misc_240827.jsonl
45
+ type: sharegpt
46
+ conversation: chatml
47
+ # - path: anthracite-org/kalo_misc_part2
48
+ # type: sharegpt
49
+ # conversation: chatml
50
+ - path: ./datasets/kalo_misc_part2.jsonl
51
+ type: sharegpt
52
+ conversation: chatml
53
+ # - path: NewEden/Claude-Instruct-5K
54
+ # type: sharegpt
55
+ # conversation: chatml
56
+ - path: ./datasets/5k.jsonl
57
+ type: sharegpt
58
+ conversation: chatml
59
+
60
+ #chat_template: chatml
61
+ shuffle_merged_datasets: true
62
+ #default_system_message: "You are an assistant that responds to the user."
63
+ dataset_prepared_path: ./magnum-22b-data
64
+ val_set_size: 0.0
65
+ output_dir: ./22b-fft-out
66
+
67
+ sequence_len: 16000
68
+ sample_packing: true
69
+ pad_to_sequence_len: true
70
+
71
+
72
+ wandb_project: 4bmagnum
73
+ wandb_entity:
74
+ wandb_watch:
75
+ wandb_name: 4magnum
76
+ wandb_log_model:
77
+
78
+ gradient_accumulation_steps: 32
79
+ micro_batch_size: 1
80
+ num_epochs: 2
81
+ optimizer: adamw_bnb_8bit
82
+ lr_scheduler: cosine
83
+ learning_rate: 0.000005
84
+
85
+ train_on_inputs: false
86
+ group_by_length: false
87
+ bf16: auto
88
+ fp16:
89
+ tf32: false
90
+
91
+ gradient_checkpointing: true
92
+ early_stopping_patience:
93
+ resume_from_checkpoint:
94
+ local_rank:
95
+ logging_steps: 1
96
+ xformers_attention:
97
+ flash_attention: true
98
+
99
+ warmup_steps: 40
100
+ evals_per_epoch:
101
+ eval_table_size:
102
+ eval_max_new_tokens:
103
+ saves_per_epoch: 2
104
+ debug:
105
+ #deepspeed: /workspace/axolotl/deepspeed_configs/zero3_bf16.json
106
+ deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json
107
+ weight_decay: 0.01
108
+ fsdp:
109
+ fsdp_config:
110
+ special_tokens:
111
+ pad_token: <|finetune_right_pad_id|>