ShenaoZ's picture
Model save
c85a984 verified
raw
history blame
4.39 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 56.26818620363976,
"learning_rate": 5e-08,
"logits/chosen": -2.545705795288086,
"logits/rejected": -2.5019917488098145,
"logps/chosen": -274.9046630859375,
"logps/pi_response": -169.24205017089844,
"logps/ref_response": -169.24205017089844,
"logps/rejected": -458.52459716796875,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 54.427899684240565,
"learning_rate": 2.9580343711206163e-07,
"logits/chosen": -2.5388073921203613,
"logits/rejected": -2.4851911067962646,
"logps/chosen": -286.4378662109375,
"logps/pi_response": -156.18502807617188,
"logps/ref_response": -150.80105590820312,
"logps/rejected": -439.7905578613281,
"loss": 0.6708,
"rewards/accuracies": 0.5729166865348816,
"rewards/chosen": -0.10314265638589859,
"rewards/margins": 0.07712497562170029,
"rewards/rejected": -0.18026763200759888,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 54.980640270066274,
"learning_rate": 2.512474502277316e-07,
"logits/chosen": -2.4677674770355225,
"logits/rejected": -2.4007956981658936,
"logps/chosen": -322.31732177734375,
"logps/pi_response": -178.76998901367188,
"logps/ref_response": -160.28256225585938,
"logps/rejected": -552.8850708007812,
"loss": 0.5572,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": -0.5668123364448547,
"rewards/margins": 0.6728711128234863,
"rewards/rejected": -1.2396835088729858,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 36.46322338041782,
"learning_rate": 1.721469846003722e-07,
"logits/chosen": -2.331505060195923,
"logits/rejected": -2.259660243988037,
"logps/chosen": -406.34173583984375,
"logps/pi_response": -196.53720092773438,
"logps/ref_response": -152.65841674804688,
"logps/rejected": -605.3541259765625,
"loss": 0.5347,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.1331523656845093,
"rewards/margins": 0.8151086568832397,
"rewards/rejected": -1.9482609033584595,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 36.52867903824387,
"learning_rate": 8.549021965852197e-08,
"logits/chosen": -2.3737633228302,
"logits/rejected": -2.285168170928955,
"logps/chosen": -365.64825439453125,
"logps/pi_response": -197.98513793945312,
"logps/ref_response": -156.18771362304688,
"logps/rejected": -638.6150512695312,
"loss": 0.5065,
"rewards/accuracies": 0.7593749761581421,
"rewards/chosen": -0.9340522885322571,
"rewards/margins": 0.991562008857727,
"rewards/rejected": -1.9256141185760498,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 34.881990414936325,
"learning_rate": 2.0843458234896666e-08,
"logits/chosen": -2.3647232055664062,
"logits/rejected": -2.3200135231018066,
"logps/chosen": -372.10552978515625,
"logps/pi_response": -202.2058868408203,
"logps/ref_response": -155.8397674560547,
"logps/rejected": -605.3728637695312,
"loss": 0.5094,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.969250500202179,
"rewards/margins": 0.7957654595375061,
"rewards/rejected": -1.765015959739685,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.5495932223433155,
"train_runtime": 2619.5522,
"train_samples_per_second": 5.834,
"train_steps_per_second": 0.023
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}