Merge branch 'main' of https://huggingface.co/khoicrtp/cog-llama-test
Browse files- train_model.sh +0 -2
train_model.sh
CHANGED
@@ -4,7 +4,6 @@ torchrun --nproc_per_node=1 --master_port=9292 train.py \
|
|
4 |
--tokenizer_name_or_path /src/weights/tokenizer \
|
5 |
--data_path ./alpaca_data.json \
|
6 |
--model_name_or_path /src/weights/llama-7b \
|
7 |
-
--bf16 True \
|
8 |
--output_dir alpaca_out \
|
9 |
--num_train_epochs 3 \
|
10 |
--per_device_train_batch_size 4 \
|
@@ -20,4 +19,3 @@ torchrun --nproc_per_node=1 --master_port=9292 train.py \
|
|
20 |
--logging_steps 1 \
|
21 |
--fsdp "full_shard auto_wrap" \
|
22 |
--fsdp_transformer_layer_cls_to_wrap 'LLaMADecoderLayer' \
|
23 |
-
--tf32 True \
|
|
|
4 |
--tokenizer_name_or_path /src/weights/tokenizer \
|
5 |
--data_path ./alpaca_data.json \
|
6 |
--model_name_or_path /src/weights/llama-7b \
|
|
|
7 |
--output_dir alpaca_out \
|
8 |
--num_train_epochs 3 \
|
9 |
--per_device_train_batch_size 4 \
|
|
|
19 |
--logging_steps 1 \
|
20 |
--fsdp "full_shard auto_wrap" \
|
21 |
--fsdp_transformer_layer_cls_to_wrap 'LLaMADecoderLayer' \
|
|