|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9874476987447699, |
|
"eval_steps": 500, |
|
"global_step": 59, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": -2.330733299255371, |
|
"logits/rejected": -2.316999912261963, |
|
"logps/chosen": -411.3883972167969, |
|
"logps/rejected": -413.6264343261719, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.930057285201027e-07, |
|
"logits/chosen": -2.288774251937866, |
|
"logits/rejected": -2.2626779079437256, |
|
"logps/chosen": -393.1912841796875, |
|
"logps/rejected": -471.7352294921875, |
|
"loss": 0.6677, |
|
"rewards/accuracies": 0.6180555820465088, |
|
"rewards/chosen": -0.07132196426391602, |
|
"rewards/margins": 0.05536883696913719, |
|
"rewards/rejected": -0.1266908049583435, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.187457503795526e-07, |
|
"logits/chosen": -1.9292032718658447, |
|
"logits/rejected": -1.8881103992462158, |
|
"logps/chosen": -504.60784912109375, |
|
"logps/rejected": -635.0429077148438, |
|
"loss": 0.5686, |
|
"rewards/accuracies": 0.753125011920929, |
|
"rewards/chosen": -1.2000805139541626, |
|
"rewards/margins": 0.6865085363388062, |
|
"rewards/rejected": -1.8865890502929688, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.8691164100062034e-07, |
|
"logits/chosen": -1.9375982284545898, |
|
"logits/rejected": -1.797930359840393, |
|
"logps/chosen": -505.9959411621094, |
|
"logps/rejected": -627.4420776367188, |
|
"loss": 0.5084, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.128210425376892, |
|
"rewards/margins": 0.88212651014328, |
|
"rewards/rejected": -2.0103371143341064, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4248369943086995e-07, |
|
"logits/chosen": -1.9036223888397217, |
|
"logits/rejected": -1.7595268487930298, |
|
"logps/chosen": -421.82745361328125, |
|
"logps/rejected": -559.4238891601562, |
|
"loss": 0.5093, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.7564447522163391, |
|
"rewards/margins": 0.6973710060119629, |
|
"rewards/rejected": -1.4538156986236572, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.473909705816111e-08, |
|
"logits/chosen": -1.8024383783340454, |
|
"logits/rejected": -1.680045485496521, |
|
"logps/chosen": -437.82403564453125, |
|
"logps/rejected": -581.6019287109375, |
|
"loss": 0.4927, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.7874447107315063, |
|
"rewards/margins": 0.6873693466186523, |
|
"rewards/rejected": -1.4748141765594482, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 59, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5465719497809975, |
|
"train_runtime": 1938.7154, |
|
"train_samples_per_second": 7.883, |
|
"train_steps_per_second": 0.03 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 59, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|