{ "actor_model_name_or_path": "output/sft_opt_1b", "reward_model_name_or_path": "output/cm_opt", "reward_critic_model_name_or_path": "output/cm_opt", "max_length": 512, "trust_remote_code": true, "train_datasets": [ [ "PKU-SafeRLHF/train", { "proportion": 1.0 } ] ], "ptx_datasets": [ [ "alpaca", { "proportion": 1.0 } ] ], "eval_datasets": null, "kl_coeff": 0.02, "clip_range_ratio": 0.2, "clip_range_score": 50.0, "clip_range_value": 5.0, "ptx_coeff": 16.0, "epochs": 1, "update_iters": 1, "per_device_prompt_batch_size": 16, "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "gradient_accumulation_steps": 2, "actor_lr": 1e-05, "actor_weight_decay": 0.01, "actor_lr_scheduler_type": "cosine", "actor_lr_warmup_ratio": 0.03, "actor_gradient_checkpointing": true, "critic_lr": 5e-06, "critic_weight_decay": 0.0, "critic_lr_scheduler_type": "constant", "critic_lr_warmup_ratio": 0.03, "critic_gradient_checkpointing": true, "normalize_reward": false, "seed": 42, "fp16": true, "bf16": false, "tf32": null, "temperature": 1.0, "top_p": 1.0, "num_return_sequences": 1, "repetition_penalty": 1.0, "eval_strategy": "epoch", "eval_interval": 1000000, "need_eval": false, "eval_split_ratio": null, "output_dir": "/home/rwliang/Reinforcement-Learning/FINAL_PROJECT/rl-final/safe-rlhf/output/ppo_cm_opt", "log_type": "wandb", "log_dir": "/home/rwliang/Reinforcement-Learning/FINAL_PROJECT/rl-final/safe-rlhf/output/ppo_cm_opt", "log_project": "Safe-RLHF-PPO", "log_run_name": "ppo-2024-11-29-22-20-30", "save_16bit": false, "save_interval": 1000000, "local_rank": 0, "zero_stage": 1, "offload": "none", "deepspeed": false, "deepspeed_config": null, "deepscale": false, "deepscale_config": null, "global_rank": 0, "device": { "type": "torch.device", "repr": "device(type='cuda', index=0)" }, "total_training_steps": 1208 }