File size: 1,651 Bytes
ea33e13 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
{
"model_name_or_path": "output/sft_opt_1b",
"max_length": 512,
"trust_remote_code": true,
"train_datasets": [
[
"PKU-SafeRLHF/train",
{
"proportion": 1.0
}
]
],
"eval_datasets": [
[
"PKU-SafeRLHF/test",
{
"proportion": 1.0
}
]
],
"scale_coeff": 0.1,
"epochs": 2,
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"gradient_accumulation_steps": 1,
"gradient_checkpointing": true,
"lr": 1e-06,
"lr_scheduler_type": "cosine",
"lr_warmup_ratio": 0.03,
"weight_decay": 0.05,
"seed": 42,
"fp16": true,
"bf16": false,
"tf32": null,
"eval_strategy": "epoch",
"eval_interval": 1000000,
"need_eval": true,
"eval_split_ratio": null,
"output_dir": "/home/rwliang/Reinforcement-Learning/FINAL_PROJECT/rl-final/safe-rlhf/output/dpo_opt_1b_non_helpful",
"log_type": "wandb",
"log_dir": "/home/rwliang/Reinforcement-Learning/FINAL_PROJECT/rl-final/safe-rlhf/output/dpo_opt_1b_non_helpful",
"log_project": "Safe-RLHF-DPO",
"log_run_name": "dpo-2024-11-30-21-20-04",
"save_16bit": false,
"save_interval": 1000000,
"local_rank": 0,
"zero_stage": 1,
"offload": "none",
"deepspeed": false,
"deepspeed_config": null,
"deepscale": false,
"deepscale_config": null,
"global_rank": 0,
"device": {
"type": "torch.device",
"repr": "device(type='cuda', index=0)"
},
"num_update_steps_per_epoch": 4620,
"total_training_steps": 9240
} |