|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984, |
|
"eval_steps": 100, |
|
"global_step": 156, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-07, |
|
"logits/chosen": -2.62762188911438, |
|
"logits/rejected": -2.586808681488037, |
|
"logps/chosen": -233.10275268554688, |
|
"logps/rejected": -220.01724243164062, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.125e-06, |
|
"logits/chosen": -2.640223741531372, |
|
"logits/rejected": -2.6421992778778076, |
|
"logps/chosen": -172.71710205078125, |
|
"logps/rejected": -174.91177368164062, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4270833432674408, |
|
"rewards/chosen": -0.0005670329555869102, |
|
"rewards/margins": -0.00034490120015107095, |
|
"rewards/rejected": -0.00022213184274733067, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.989935734988098e-06, |
|
"logits/chosen": -2.6506519317626953, |
|
"logits/rejected": -2.6606290340423584, |
|
"logps/chosen": -163.25784301757812, |
|
"logps/rejected": -166.31251525878906, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.546875, |
|
"rewards/chosen": -0.005096795037388802, |
|
"rewards/margins": 0.002602009102702141, |
|
"rewards/rejected": -0.007698804140090942, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"logits/chosen": -2.6732003688812256, |
|
"logits/rejected": -2.6337249279022217, |
|
"logps/chosen": -179.1455535888672, |
|
"logps/rejected": -177.36630249023438, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.5218750238418579, |
|
"rewards/chosen": -0.07040824741125107, |
|
"rewards/margins": 0.0008022676920518279, |
|
"rewards/rejected": -0.07121052592992783, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.646121984004666e-06, |
|
"logits/chosen": -2.592510223388672, |
|
"logits/rejected": -2.569596767425537, |
|
"logps/chosen": -176.0773162841797, |
|
"logps/rejected": -177.98220825195312, |
|
"loss": 0.6939, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.06681038439273834, |
|
"rewards/margins": -0.002174162771552801, |
|
"rewards/rejected": -0.0646362155675888, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.3069871595684795e-06, |
|
"logits/chosen": -2.6304595470428467, |
|
"logits/rejected": -2.6132376194000244, |
|
"logps/chosen": -170.5052947998047, |
|
"logps/rejected": -170.62728881835938, |
|
"loss": 0.6925, |
|
"rewards/accuracies": 0.5218750238418579, |
|
"rewards/chosen": -0.03540392965078354, |
|
"rewards/margins": 0.004562810063362122, |
|
"rewards/rejected": -0.03996673971414566, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.8772424536302565e-06, |
|
"logits/chosen": -2.5987067222595215, |
|
"logits/rejected": -2.573859691619873, |
|
"logps/chosen": -180.9357452392578, |
|
"logps/rejected": -177.1747589111328, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.09346936643123627, |
|
"rewards/margins": 0.0016549427527934313, |
|
"rewards/rejected": -0.09512430429458618, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.3784370602033572e-06, |
|
"logits/chosen": -2.5544121265411377, |
|
"logits/rejected": -2.570075750350952, |
|
"logps/chosen": -172.70663452148438, |
|
"logps/rejected": -173.13766479492188, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.49687498807907104, |
|
"rewards/chosen": -0.15676796436309814, |
|
"rewards/margins": -0.002442942699417472, |
|
"rewards/rejected": -0.15432503819465637, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 2.835583164544139e-06, |
|
"logits/chosen": -2.604128122329712, |
|
"logits/rejected": -2.5767688751220703, |
|
"logps/chosen": -184.4633331298828, |
|
"logps/rejected": -183.540283203125, |
|
"loss": 0.6936, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -0.18969281017780304, |
|
"rewards/margins": -0.0006604200461879373, |
|
"rewards/rejected": -0.18903236091136932, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 2.2759017277414165e-06, |
|
"logits/chosen": -2.661609172821045, |
|
"logits/rejected": -2.5941507816314697, |
|
"logps/chosen": -191.8031463623047, |
|
"logps/rejected": -189.98135375976562, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.565625011920929, |
|
"rewards/chosen": -0.08525966852903366, |
|
"rewards/margins": 0.013879667036235332, |
|
"rewards/rejected": -0.09913934022188187, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.7274575140626318e-06, |
|
"logits/chosen": -2.6250574588775635, |
|
"logits/rejected": -2.6189353466033936, |
|
"logps/chosen": -178.07730102539062, |
|
"logps/rejected": -179.85862731933594, |
|
"loss": 0.6902, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -0.038252077996730804, |
|
"rewards/margins": 0.017853444442152977, |
|
"rewards/rejected": -0.05610552430152893, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_logits/chosen": -2.555354595184326, |
|
"eval_logits/rejected": -2.4580423831939697, |
|
"eval_logps/chosen": -310.5243835449219, |
|
"eval_logps/rejected": -302.46649169921875, |
|
"eval_loss": 0.6929731369018555, |
|
"eval_rewards/accuracies": 0.5139999985694885, |
|
"eval_rewards/chosen": -0.12465862929821014, |
|
"eval_rewards/margins": 0.00494543369859457, |
|
"eval_rewards/rejected": -0.12960407137870789, |
|
"eval_runtime": 383.981, |
|
"eval_samples_per_second": 5.209, |
|
"eval_steps_per_second": 0.651, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.217751806485235e-06, |
|
"logits/chosen": -2.651780128479004, |
|
"logits/rejected": -2.581507921218872, |
|
"logps/chosen": -194.07461547851562, |
|
"logps/rejected": -183.43362426757812, |
|
"loss": 0.6898, |
|
"rewards/accuracies": 0.4781250059604645, |
|
"rewards/chosen": -0.11336056143045425, |
|
"rewards/margins": 0.0036790375597774982, |
|
"rewards/rejected": -0.11703959852457047, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.723433775328385e-07, |
|
"logits/chosen": -2.6290206909179688, |
|
"logits/rejected": -2.6049842834472656, |
|
"logps/chosen": -186.02259826660156, |
|
"logps/rejected": -193.3793487548828, |
|
"loss": 0.6902, |
|
"rewards/accuracies": 0.534375011920929, |
|
"rewards/chosen": -0.11839202791452408, |
|
"rewards/margins": 0.004132995847612619, |
|
"rewards/rejected": -0.12252502143383026, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.1356686569674344e-07, |
|
"logits/chosen": -2.5702383518218994, |
|
"logits/rejected": -2.5472493171691895, |
|
"logps/chosen": -169.70834350585938, |
|
"logps/rejected": -174.14761352539062, |
|
"loss": 0.6891, |
|
"rewards/accuracies": 0.5562499761581421, |
|
"rewards/chosen": -0.09281644970178604, |
|
"rewards/margins": 0.012840485200285912, |
|
"rewards/rejected": -0.1056569367647171, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.59412823400657e-07, |
|
"logits/chosen": -2.5276107788085938, |
|
"logits/rejected": -2.5228161811828613, |
|
"logps/chosen": -174.78802490234375, |
|
"logps/rejected": -176.2909698486328, |
|
"loss": 0.6878, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -0.05636407807469368, |
|
"rewards/margins": 0.011297663673758507, |
|
"rewards/rejected": -0.06766173988580704, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.262559558016325e-08, |
|
"logits/chosen": -2.568333864212036, |
|
"logits/rejected": -2.586660623550415, |
|
"logps/chosen": -166.882080078125, |
|
"logps/rejected": -171.17140197753906, |
|
"loss": 0.6892, |
|
"rewards/accuracies": 0.565625011920929, |
|
"rewards/chosen": -0.061885736882686615, |
|
"rewards/margins": 0.011388251557946205, |
|
"rewards/rejected": -0.07327400147914886, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 156, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6914212252849188, |
|
"train_runtime": 7065.3773, |
|
"train_samples_per_second": 2.831, |
|
"train_steps_per_second": 0.022 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 156, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|