PEFT
PyTorch
Safetensors
llama
Generated from Trainer
mtasic85 commited on
Commit
1b46671
·
1 Parent(s): eb046b2

initial version of axolotl-config.yml

Browse files
Files changed (1) hide show
  1. axolotl-config.yml +72 -0
axolotl-config.yml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: pints-ai/1.5-Pints-16K-v0.1
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: true
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: tangledgroup/tangled-llama-pints-1.5b-v0.2-dataset
11
+ type: sharegpt
12
+ conversation: chatml
13
+ chat_template: chatml
14
+ dataset_prepared_path:
15
+ val_set_size: 0.05
16
+ output_dir: ./outputs/qlora-out
17
+
18
+ adapter: qlora
19
+ lora_model_dir:
20
+
21
+ sequence_len: 16384
22
+ sample_packing: true
23
+ pad_to_sequence_len: true
24
+
25
+ lora_r: 32
26
+ lora_alpha: 16
27
+ lora_dropout: 0.05
28
+ lora_target_modules:
29
+ lora_target_linear: true
30
+ lora_fan_in_fan_out:
31
+
32
+ wandb_project:
33
+ wandb_entity:
34
+ wandb_watch:
35
+ wandb_name:
36
+ wandb_log_model:
37
+
38
+ gradient_accumulation_steps: 4
39
+ micro_batch_size: 2
40
+ num_epochs: 3
41
+ optimizer: paged_adamw_32bit
42
+ # optimizer: adamw_torch_fused
43
+ lr_scheduler: cosine
44
+ learning_rate: 0.0002
45
+
46
+ train_on_inputs: false
47
+ group_by_length: false
48
+ bf16: auto
49
+ fp16:
50
+ tf32: false
51
+
52
+ gradient_checkpointing: true
53
+ early_stopping_patience:
54
+ resume_from_checkpoint:
55
+ local_rank:
56
+ logging_steps: 1
57
+ xformers_attention:
58
+ flash_attention: true
59
+
60
+ loss_watchdog_threshold: 15.0
61
+ loss_watchdog_patience: 3
62
+
63
+ warmup_steps: 10
64
+ evals_per_epoch: 3
65
+ eval_table_size:
66
+ saves_per_epoch: 1
67
+ debug:
68
+ deepspeed:
69
+ weight_decay: 0.0
70
+ fsdp:
71
+ fsdp_config:
72
+ special_tokens: