sedrickkeh
commited on
Commit
•
f1a4b8c
1
Parent(s):
8a62cdf
Upload configs.yaml with huggingface_hub
Browse files- configs.yaml +47 -0
configs.yaml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: '0.9'
|
2 |
+
adam_beta2: '0.999'
|
3 |
+
assistant_tag: gpt
|
4 |
+
bf16: 'True'
|
5 |
+
content_tag: value
|
6 |
+
cutoff_len: '2048'
|
7 |
+
dataset: mlfoundations-dev/oh-dcft-v1.2_no-curation_gpt-4o-mini
|
8 |
+
dataset_dir: ONLINE
|
9 |
+
ddp_timeout: '180000000'
|
10 |
+
deepspeed: /opt/ml/code/zero3.json
|
11 |
+
do_train: 'True'
|
12 |
+
enable_liger_kernel: 'False'
|
13 |
+
eval_strategy: epoch
|
14 |
+
finetuning_type: full
|
15 |
+
formatting: sharegpt
|
16 |
+
global_batch_size: '512'
|
17 |
+
gradient_accumulation_steps: '8'
|
18 |
+
gradient_checkpointing: 'True'
|
19 |
+
hub_model_id: mlfoundations-dev/hp_ablations_mistral_epoch2_dcftv1.2
|
20 |
+
learning_rate: 5e-06
|
21 |
+
logging_steps: '10'
|
22 |
+
lr_scheduler_type: constant
|
23 |
+
max_grad_norm: '1'
|
24 |
+
messages: conversations
|
25 |
+
model_name_or_path: mistralai/Mistral-7B-v0.1
|
26 |
+
neat_packing: 'True'
|
27 |
+
num_train_epochs: '2.0'
|
28 |
+
output_dir: /opt/ml/model
|
29 |
+
overwrite_cache: 'True'
|
30 |
+
overwrite_output_dir: 'True'
|
31 |
+
packing: 'True'
|
32 |
+
per_device_train_batch_size: '8'
|
33 |
+
plot_loss: 'True'
|
34 |
+
preprocessing_num_workers: '16'
|
35 |
+
push_to_db: 'True'
|
36 |
+
push_to_hub: 'True'
|
37 |
+
report_to: wandb
|
38 |
+
role_tag: from
|
39 |
+
run_name: hp_ablations_mistral_epoch2_dcftv1.2
|
40 |
+
save_strategy: epoch
|
41 |
+
stage: sft
|
42 |
+
template: mistral
|
43 |
+
user_tag: human
|
44 |
+
val_size: '0.05'
|
45 |
+
warmup_ratio: '0.1'
|
46 |
+
warmup_steps: '1738'
|
47 |
+
weight_decay: '0.1'
|