|
|
|
LOG_ARGS="--log_interval 1 --save_interval 2500 --eval_interval 2500 --eval_iters 10" |
|
|
|
TRAIN_ARGS="--train_iters 50000 --lr_decay_style cosine |
|
--lr_warmup_iters 50 --lr 1e-5 --min_lr 1e-6 --use_flash_attn |
|
--attention_dropout 0.0 --adam_beta1 0.9 --adam_beta2 0.95 --adam_eps 1e-5" |
|
|
|
|
|
DISTRIBUTED_ARGS="--nproc_per_node 8 --nnodes 1 --node_rank 0 --master_addr localhost --master_port 8000" |
|
|
|
|
|
|
|
|
|
LLAMA2_ARGS=" --no_bias_gelu_fusion --no_bias_dropout_fusion |
|
--seq_length 4096 --max_position_embeddings 4096 |
|
--hidden_dropout 0.0 --rope_scaling_factor 1.0" |
|
|
|
CUDA_DEVICE_MAX_CONNECTIONS=1 \ |
|
torchrun $DISTRIBUTED_ARGS finetune.py \ |
|
--tensor_model_parallel_size 8 \ |
|
--pipeline_model_parallel_size 1 \ |
|
--load megatron_test/llama2-megatron-t8p1 \ |
|
--save megatron_test/model_test \ |
|
--tensorboard_dir megatron_test/model_test/logs/ \ |
|
--data_path megatron_test/indonesian_mix_data/indonesian_mix_data_text_document \ |
|
--model_name llama2 \ |
|
--tokenizer_type SentencePieceTokenizer \ |
|
--vocab_file megatron_test/llama2_tokenizer.model \ |
|
--bf16 \ |
|
--micro_batch_size 4 \ |
|
--global_batch_size 128 \ |
|
--sequence_parallel \ |
|
--recompute_granularity selective \ |
|
--use_checkpoint_args \ |
|
$COMMON_ARGS $LOG_ARGS $TRAIN_ARGS $LLAMA2_ARGS |
|
|
|
|
|
|