alpaca-7b-sft / arguments.json
calico-1226's picture
Upload model
5e0db46
{
"model_name_or_path": "models/alpaca-7b-reproduced",
"max_length": 1024,
"trust_remote_code": true,
"train_datasets": [
[
"alpaca",
{
"proportion": 1.0
}
]
],
"eval_datasets": null,
"epochs": 3,
"per_device_train_batch_size": 8,
"per_device_eval_batch_size": 8,
"gradient_accumulation_steps": 8,
"gradient_checkpointing": true,
"lr": 2e-05,
"lr_scheduler_type": "cosine",
"lr_warmup_ratio": 0.03,
"weight_decay": 0.0,
"seed": 42,
"fp16": false,
"bf16": true,
"tf32": true,
"eval_strategy": "epoch",
"eval_interval": 1000000,
"need_eval": false,
"eval_split_ratio": null,
"output_dir": "/home/juntao/Projects/roo/models/alpaca-7b-sft",
"log_type": "wandb",
"log_dir": "/home/juntao/Projects/roo/models/alpaca-7b-sft",
"log_project": "SFT-alpaca",
"log_group": null,
"log_run_name": "sft-2024-09-04-12-52-34",
"save_16bit": false,
"save_interval": 1000000,
"local_rank": 0,
"zero_stage": 3,
"offload": "none",
"deepspeed": false,
"deepspeed_config": null,
"deepscale": false,
"deepscale_config": null,
"global_rank": 0,
"device": {
"type": "torch.device",
"repr": "device(type='cuda', index=0)"
},
"num_update_steps_per_epoch": 102,
"total_training_steps": 306
}