|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 61, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 5.823829876222625, |
|
"learning_rate": 7.142857142857142e-08, |
|
"logits/chosen": -1.0549558401107788, |
|
"logits/rejected": -0.9334831833839417, |
|
"logps/chosen": -681.2974853515625, |
|
"logps/rejected": -846.9592895507812, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 5.674915170284527, |
|
"learning_rate": 4.96201938253052e-07, |
|
"logits/chosen": -1.1824066638946533, |
|
"logits/rejected": -0.937175989151001, |
|
"logps/chosen": -563.275634765625, |
|
"logps/rejected": -894.625732421875, |
|
"loss": 0.6921, |
|
"rewards/accuracies": 0.5138888955116272, |
|
"rewards/chosen": 0.00011738391185645014, |
|
"rewards/margins": 0.0022102664224803448, |
|
"rewards/rejected": -0.0020928820595145226, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 6.471345406032975, |
|
"learning_rate": 4.3184341039326217e-07, |
|
"logits/chosen": -1.1980640888214111, |
|
"logits/rejected": -0.9605765342712402, |
|
"logps/chosen": -577.985595703125, |
|
"logps/rejected": -858.2009887695312, |
|
"loss": 0.675, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.0019469285616651177, |
|
"rewards/margins": 0.03744823858141899, |
|
"rewards/rejected": -0.03939516842365265, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 7.131737915304761, |
|
"learning_rate": 3.0765396768561004e-07, |
|
"logits/chosen": -1.18826162815094, |
|
"logits/rejected": -0.9675145149230957, |
|
"logps/chosen": -519.7886962890625, |
|
"logps/rejected": -910.9063720703125, |
|
"loss": 0.6241, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": -0.014793062582612038, |
|
"rewards/margins": 0.13844963908195496, |
|
"rewards/rejected": -0.15324273705482483, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 7.706616708059976, |
|
"learning_rate": 1.6449496416858282e-07, |
|
"logits/chosen": -1.187648892402649, |
|
"logits/rejected": -0.9329156875610352, |
|
"logps/chosen": -602.1197509765625, |
|
"logps/rejected": -954.8004150390625, |
|
"loss": 0.543, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": -0.06608068197965622, |
|
"rewards/margins": 0.4169215261936188, |
|
"rewards/rejected": -0.483002245426178, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 7.642531846302412, |
|
"learning_rate": 4.9469201811239035e-08, |
|
"logits/chosen": -1.1323978900909424, |
|
"logits/rejected": -0.9658657312393188, |
|
"logps/chosen": -556.5906982421875, |
|
"logps/rejected": -980.1487426757812, |
|
"loss": 0.5093, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": -0.13172821700572968, |
|
"rewards/margins": 0.5675451755523682, |
|
"rewards/rejected": -0.6992733478546143, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 7.37383781369435, |
|
"learning_rate": 4.2296043218295606e-10, |
|
"logits/chosen": -1.1500413417816162, |
|
"logits/rejected": -0.9949632883071899, |
|
"logps/chosen": -577.3805541992188, |
|
"logps/rejected": -966.5217895507812, |
|
"loss": 0.4811, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": -0.1981235146522522, |
|
"rewards/margins": 0.5871651768684387, |
|
"rewards/rejected": -0.7852886915206909, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 61, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5870200258786561, |
|
"train_runtime": 812.4392, |
|
"train_samples_per_second": 4.783, |
|
"train_steps_per_second": 0.075 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 61, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|