ShenaoZ's picture
Model save
003bfd4 verified
raw
history blame
3.86 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 7.772415691005638,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.807276487350464,
"logits/rejected": -2.7759768962860107,
"logps/chosen": -315.42626953125,
"logps/rejected": -227.5915985107422,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 7.463806215651064,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.7555131912231445,
"logits/rejected": -2.746166706085205,
"logps/chosen": -271.9668884277344,
"logps/rejected": -260.6821594238281,
"loss": 0.6915,
"rewards/accuracies": 0.5173611044883728,
"rewards/chosen": 0.0050306967459619045,
"rewards/margins": 0.003242988372221589,
"rewards/rejected": 0.0017877084901556373,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.7249970911142904,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.7868175506591797,
"logits/rejected": -2.7679882049560547,
"logps/chosen": -261.43414306640625,
"logps/rejected": -250.8629150390625,
"loss": 0.6745,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": 0.03247567266225815,
"rewards/margins": 0.04067990183830261,
"rewards/rejected": -0.008204231038689613,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 7.390766950490043,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.7876508235931396,
"logits/rejected": -2.768584966659546,
"logps/chosen": -293.8196716308594,
"logps/rejected": -254.1870574951172,
"loss": 0.6487,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.023635780438780785,
"rewards/margins": 0.1266542375087738,
"rewards/rejected": -0.15028999745845795,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 8.096328388494332,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.7604479789733887,
"logits/rejected": -2.7379133701324463,
"logps/chosen": -264.1959228515625,
"logps/rejected": -255.66079711914062,
"loss": 0.6353,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -0.11276473850011826,
"rewards/margins": 0.16073670983314514,
"rewards/rejected": -0.2735014259815216,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 8.569402237485592,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.7702622413635254,
"logits/rejected": -2.7503457069396973,
"logps/chosen": -279.10943603515625,
"logps/rejected": -287.271728515625,
"loss": 0.6201,
"rewards/accuracies": 0.628125011920929,
"rewards/chosen": -0.22100774943828583,
"rewards/margins": 0.13658460974693298,
"rewards/rejected": -0.3575924038887024,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6489952216714115,
"train_runtime": 1643.7514,
"train_samples_per_second": 9.298,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}