File size: 1,534 Bytes
7abf113
 
 
 
 
 
 
e1bc658
7abf113
e1bc658
 
ba6090d
7abf113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1bc658
7abf113
 
 
 
 
e1bc658
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38

LOG_ARGS="--log_interval 1 --save_interval 2500 --eval_interval 2500 --eval_iters 10"

TRAIN_ARGS="--train_iters 50000 --lr_decay_style cosine 
--lr_warmup_iters 50 --lr 1e-5 --min_lr 1e-6  --use_flash_attn 
--attention_dropout 0.0 --adam_beta1 0.9 --adam_beta2 0.95 --adam_eps 1e-5"

### for one node, 8GPUs
DISTRIBUTED_ARGS="--nproc_per_node 8 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000"

# ### for multi nodes, 8GPUs
# DISTRIBUTED_ARGS="--nproc_per_node 8 --nnodes 2 --node_rank $RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"

LLAMA2_ARGS=" --no_bias_gelu_fusion --no_bias_dropout_fusion 
--seq_length 4096 --max_position_embeddings 4096 
--hidden_dropout 0.0 --rope_scaling_factor 1.0"

CUDA_DEVICE_MAX_CONNECTIONS=1 \
torchrun $DISTRIBUTED_ARGS finetune.py \
	--tensor_model_parallel_size 8 \
	--pipeline_model_parallel_size 1 \
	--load megatron_test/llama2-megatron-t8p1 \
	--save megatron_test/model_test \
	--tensorboard_dir megatron_test/model_test/logs/ \
	--data_path megatron_test/indonesian_mix_data/indonesian_mix_data_text_document  \
	--model_name llama2 \
	--tokenizer_type SentencePieceTokenizer \
	--vocab_file megatron_test/llama2_tokenizer.model \
	--bf16 \
	--micro_batch_size 4 \
	--global_batch_size 128 \
	--sequence_parallel \
    --recompute_granularity selective \
	--use_checkpoint_args \
    $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA2_ARGS

### Increase the micro_batch_size, if you have 80G GPU
### Decrease the micro_batch_size, if you need to train larger model