dreamerdeo commited on
Commit
e1bc658
·
verified ·
1 Parent(s): ba6090d

Update run_train_test.sh

Browse files
Files changed (1) hide show
  1. run_train_test.sh +6 -1
run_train_test.sh CHANGED
@@ -5,7 +5,10 @@ TRAIN_ARGS="--train_iters 50000 --lr_decay_style cosine
5
  --lr_warmup_iters 50 --lr 1e-5 --min_lr 1e-6 --use_flash_attn
6
  --attention_dropout 0.0 --adam_beta1 0.9 --adam_beta2 0.95 --adam_eps 1e-5"
7
 
 
8
  DISTRIBUTED_ARGS="--nproc_per_node 8 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000"
 
 
9
  # DISTRIBUTED_ARGS="--nproc_per_node 8 --nnodes 2 --node_rank $RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
10
 
11
  LLAMA2_ARGS=" --no_bias_gelu_fusion --no_bias_dropout_fusion
@@ -25,9 +28,11 @@ torchrun $DISTRIBUTED_ARGS finetune.py \
25
  --vocab_file megatron_test/llama2_tokenizer.model \
26
  --bf16 \
27
  --micro_batch_size 4 \
28
- --global_batch_size 16 \
29
  --sequence_parallel \
30
  --recompute_granularity selective \
31
  --use_checkpoint_args \
32
  $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA2_ARGS
33
 
 
 
 
5
  --lr_warmup_iters 50 --lr 1e-5 --min_lr 1e-6 --use_flash_attn
6
  --attention_dropout 0.0 --adam_beta1 0.9 --adam_beta2 0.95 --adam_eps 1e-5"
7
 
8
+ ### for one node, 8GPUs
9
  DISTRIBUTED_ARGS="--nproc_per_node 8 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000"
10
+
11
+ # ### for multi nodes, 8GPUs
12
  # DISTRIBUTED_ARGS="--nproc_per_node 8 --nnodes 2 --node_rank $RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
13
 
14
  LLAMA2_ARGS=" --no_bias_gelu_fusion --no_bias_dropout_fusion
 
28
  --vocab_file megatron_test/llama2_tokenizer.model \
29
  --bf16 \
30
  --micro_batch_size 4 \
31
+ --global_batch_size 128 \
32
  --sequence_parallel \
33
  --recompute_granularity selective \
34
  --use_checkpoint_args \
35
  $COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA2_ARGS
36
 
37
+ ### Increase the micro_batch_size, if you have 80G GPU
38
+ ### Decrease the micro_batch_size, if you need to train larger model