ShenaoZ's picture
Model save
bd247d7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 7.7730547112344635,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.807276487350464,
"logits/rejected": -2.7759768962860107,
"logps/chosen": -315.42626953125,
"logps/rejected": -227.5915985107422,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 7.459164668463289,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.7555456161499023,
"logits/rejected": -2.7461628913879395,
"logps/chosen": -271.9652099609375,
"logps/rejected": -260.6877136230469,
"loss": 0.6915,
"rewards/accuracies": 0.4965277910232544,
"rewards/chosen": 0.005047248676419258,
"rewards/margins": 0.0033153239637613297,
"rewards/rejected": 0.0017319255275651813,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.712012764608614,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.787020206451416,
"logits/rejected": -2.768185615539551,
"logps/chosen": -261.4574890136719,
"logps/rejected": -250.9368896484375,
"loss": 0.6745,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.0322418287396431,
"rewards/margins": 0.041185710579156876,
"rewards/rejected": -0.008943881839513779,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 7.374097934015646,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.7877423763275146,
"logits/rejected": -2.7687172889709473,
"logps/chosen": -293.801025390625,
"logps/rejected": -254.2049102783203,
"loss": 0.6486,
"rewards/accuracies": 0.6781250238418579,
"rewards/chosen": -0.023449189960956573,
"rewards/margins": 0.12701918184757233,
"rewards/rejected": -0.1504683643579483,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 8.143971290047324,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.760338544845581,
"logits/rejected": -2.737778663635254,
"logps/chosen": -264.13775634765625,
"logps/rejected": -255.5776824951172,
"loss": 0.6353,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -0.11218307912349701,
"rewards/margins": 0.1604871302843094,
"rewards/rejected": -0.2726702094078064,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 8.540975248834071,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.7702414989471436,
"logits/rejected": -2.750225305557251,
"logps/chosen": -279.04254150390625,
"logps/rejected": -287.20135498046875,
"loss": 0.6202,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.22033901512622833,
"rewards/margins": 0.13654986023902893,
"rewards/rejected": -0.35688892006874084,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6490067789110087,
"train_runtime": 1651.119,
"train_samples_per_second": 9.256,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}