dreamerdeo
commited on
Commit
·
7abf113
1
Parent(s):
a1f410b
Upload folder using huggingface_hub
Browse files- run_train_test.sh +33 -0
run_train_test.sh
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
LOG_ARGS="--log_interval 1 --save_interval 2500 --eval_interval 2500 --eval_iters 10"
|
3 |
+
|
4 |
+
TRAIN_ARGS="--train_iters 50000 --lr_decay_style cosine
|
5 |
+
--lr_warmup_iters 50 --lr 1e-5 --min_lr 1e-6 --use_flash_attn
|
6 |
+
--attention_dropout 0.0 --adam_beta1 0.9 --adam_beta2 0.95 --adam_eps 1e-5"
|
7 |
+
|
8 |
+
DISTRIBUTED_ARGS="--nproc_per_node 8 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000"
|
9 |
+
# DISTRIBUTED_ARGS="--nproc_per_node 16 --nnodes 2 --node_rank $RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
|
10 |
+
|
11 |
+
LLAMA2_ARGS=" --no_bias_gelu_fusion --no_bias_dropout_fusion
|
12 |
+
--seq_length 4096 --max_position_embeddings 4096
|
13 |
+
--hidden_dropout 0.0 --rope_scaling_factor 1.0"
|
14 |
+
|
15 |
+
CUDA_DEVICE_MAX_CONNECTIONS=1 \
|
16 |
+
torchrun $DISTRIBUTED_ARGS finetune.py \
|
17 |
+
--tensor_model_parallel_size 8 \
|
18 |
+
--pipeline_model_parallel_size 1 \
|
19 |
+
--load megatron_test/llama2-megatron-t8p1 \
|
20 |
+
--save megatron_test/model_test \
|
21 |
+
--tensorboard_dir megatron_test/model_test/logs/ \
|
22 |
+
--data_path megatron_test/indonesian_mix_data/indonesian_mix_data_text_document \
|
23 |
+
--model_name llama2 \
|
24 |
+
--tokenizer_type SentencePieceTokenizer \
|
25 |
+
--vocab_file megatron_test/llama2_tokenizer.model \
|
26 |
+
--bf16 \
|
27 |
+
--micro_batch_size 4 \
|
28 |
+
--global_batch_size 16 \
|
29 |
+
--sequence_parallel \
|
30 |
+
--recompute_granularity selective \
|
31 |
+
--use_checkpoint_args \
|
32 |
+
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA2_ARGS
|
33 |
+
|