GaetanMichelet's picture
Model save
b77f2d5 verified
{
"best_metric": 1.3267595767974854,
"best_model_checkpoint": "data/Gemma-2-2B_task-1_120-samples_config-2/checkpoint-38",
"epoch": 14.0,
"eval_steps": 500,
"global_step": 77,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18181818181818182,
"grad_norm": 2.5245699882507324,
"learning_rate": 4.000000000000001e-06,
"loss": 2.5052,
"step": 1
},
{
"epoch": 0.36363636363636365,
"grad_norm": 2.7616875171661377,
"learning_rate": 8.000000000000001e-06,
"loss": 2.4313,
"step": 2
},
{
"epoch": 0.7272727272727273,
"grad_norm": 2.373145341873169,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.4033,
"step": 4
},
{
"epoch": 0.9090909090909091,
"eval_loss": 2.363821029663086,
"eval_runtime": 2.2328,
"eval_samples_per_second": 10.749,
"eval_steps_per_second": 10.749,
"step": 5
},
{
"epoch": 1.0909090909090908,
"grad_norm": 2.6781208515167236,
"learning_rate": 2.4e-05,
"loss": 2.3471,
"step": 6
},
{
"epoch": 1.4545454545454546,
"grad_norm": 2.3583812713623047,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.3318,
"step": 8
},
{
"epoch": 1.8181818181818183,
"grad_norm": 2.3059582710266113,
"learning_rate": 4e-05,
"loss": 2.1644,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 1.9085012674331665,
"eval_runtime": 2.2159,
"eval_samples_per_second": 10.831,
"eval_steps_per_second": 10.831,
"step": 11
},
{
"epoch": 2.1818181818181817,
"grad_norm": 1.878598690032959,
"learning_rate": 4.8e-05,
"loss": 1.9701,
"step": 12
},
{
"epoch": 2.5454545454545454,
"grad_norm": 1.6238771677017212,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.7284,
"step": 14
},
{
"epoch": 2.909090909090909,
"grad_norm": 1.4010099172592163,
"learning_rate": 6.400000000000001e-05,
"loss": 1.5867,
"step": 16
},
{
"epoch": 2.909090909090909,
"eval_loss": 1.6096315383911133,
"eval_runtime": 2.2189,
"eval_samples_per_second": 10.816,
"eval_steps_per_second": 10.816,
"step": 16
},
{
"epoch": 3.2727272727272725,
"grad_norm": 0.9943261742591858,
"learning_rate": 7.2e-05,
"loss": 1.5703,
"step": 18
},
{
"epoch": 3.6363636363636362,
"grad_norm": 1.0258581638336182,
"learning_rate": 8e-05,
"loss": 1.4806,
"step": 20
},
{
"epoch": 4.0,
"grad_norm": 0.9659519791603088,
"learning_rate": 8.800000000000001e-05,
"loss": 1.3937,
"step": 22
},
{
"epoch": 4.0,
"eval_loss": 1.4499346017837524,
"eval_runtime": 2.191,
"eval_samples_per_second": 10.954,
"eval_steps_per_second": 10.954,
"step": 22
},
{
"epoch": 4.363636363636363,
"grad_norm": 0.8161854147911072,
"learning_rate": 9.6e-05,
"loss": 1.3292,
"step": 24
},
{
"epoch": 4.7272727272727275,
"grad_norm": 0.7501208782196045,
"learning_rate": 9.999512620046522e-05,
"loss": 1.2999,
"step": 26
},
{
"epoch": 4.909090909090909,
"eval_loss": 1.3671011924743652,
"eval_runtime": 2.2834,
"eval_samples_per_second": 10.511,
"eval_steps_per_second": 10.511,
"step": 27
},
{
"epoch": 5.090909090909091,
"grad_norm": 0.7938846349716187,
"learning_rate": 9.995614150494293e-05,
"loss": 1.2026,
"step": 28
},
{
"epoch": 5.454545454545454,
"grad_norm": 0.758736252784729,
"learning_rate": 9.987820251299122e-05,
"loss": 1.1153,
"step": 30
},
{
"epoch": 5.818181818181818,
"grad_norm": 0.8454522490501404,
"learning_rate": 9.976136999909156e-05,
"loss": 1.1454,
"step": 32
},
{
"epoch": 6.0,
"eval_loss": 1.329568862915039,
"eval_runtime": 2.1499,
"eval_samples_per_second": 11.163,
"eval_steps_per_second": 11.163,
"step": 33
},
{
"epoch": 6.181818181818182,
"grad_norm": 0.806365966796875,
"learning_rate": 9.96057350657239e-05,
"loss": 1.0849,
"step": 34
},
{
"epoch": 6.545454545454545,
"grad_norm": 0.8713198900222778,
"learning_rate": 9.941141907232765e-05,
"loss": 0.9951,
"step": 36
},
{
"epoch": 6.909090909090909,
"grad_norm": 0.9592146277427673,
"learning_rate": 9.917857354066931e-05,
"loss": 0.9351,
"step": 38
},
{
"epoch": 6.909090909090909,
"eval_loss": 1.3267595767974854,
"eval_runtime": 2.298,
"eval_samples_per_second": 10.444,
"eval_steps_per_second": 10.444,
"step": 38
},
{
"epoch": 7.2727272727272725,
"grad_norm": 0.8650667667388916,
"learning_rate": 9.890738003669029e-05,
"loss": 0.8645,
"step": 40
},
{
"epoch": 7.636363636363637,
"grad_norm": 0.9249936938285828,
"learning_rate": 9.859805002892732e-05,
"loss": 0.8049,
"step": 42
},
{
"epoch": 8.0,
"grad_norm": 1.106773853302002,
"learning_rate": 9.825082472361557e-05,
"loss": 0.8108,
"step": 44
},
{
"epoch": 8.0,
"eval_loss": 1.355177879333496,
"eval_runtime": 2.2733,
"eval_samples_per_second": 10.557,
"eval_steps_per_second": 10.557,
"step": 44
},
{
"epoch": 8.363636363636363,
"grad_norm": 1.0761725902557373,
"learning_rate": 9.786597487660337e-05,
"loss": 0.7095,
"step": 46
},
{
"epoch": 8.727272727272727,
"grad_norm": 1.1069787740707397,
"learning_rate": 9.744380058222483e-05,
"loss": 0.6191,
"step": 48
},
{
"epoch": 8.909090909090908,
"eval_loss": 1.4658174514770508,
"eval_runtime": 2.2892,
"eval_samples_per_second": 10.484,
"eval_steps_per_second": 10.484,
"step": 49
},
{
"epoch": 9.090909090909092,
"grad_norm": 1.5594687461853027,
"learning_rate": 9.698463103929542e-05,
"loss": 0.6243,
"step": 50
},
{
"epoch": 9.454545454545455,
"grad_norm": 1.2610054016113281,
"learning_rate": 9.648882429441257e-05,
"loss": 0.5089,
"step": 52
},
{
"epoch": 9.818181818181818,
"grad_norm": 1.41365647315979,
"learning_rate": 9.595676696276172e-05,
"loss": 0.4271,
"step": 54
},
{
"epoch": 10.0,
"eval_loss": 1.7157014608383179,
"eval_runtime": 2.2343,
"eval_samples_per_second": 10.741,
"eval_steps_per_second": 10.741,
"step": 55
},
{
"epoch": 10.181818181818182,
"grad_norm": 1.3679523468017578,
"learning_rate": 9.538887392664544e-05,
"loss": 0.4122,
"step": 56
},
{
"epoch": 10.545454545454545,
"grad_norm": 1.6108626127243042,
"learning_rate": 9.478558801197065e-05,
"loss": 0.3251,
"step": 58
},
{
"epoch": 10.909090909090908,
"grad_norm": 1.9596925973892212,
"learning_rate": 9.414737964294636e-05,
"loss": 0.3163,
"step": 60
},
{
"epoch": 10.909090909090908,
"eval_loss": 2.0603339672088623,
"eval_runtime": 2.2678,
"eval_samples_per_second": 10.583,
"eval_steps_per_second": 10.583,
"step": 60
},
{
"epoch": 11.272727272727273,
"grad_norm": 1.2331981658935547,
"learning_rate": 9.347474647526095e-05,
"loss": 0.1779,
"step": 62
},
{
"epoch": 11.636363636363637,
"grad_norm": 1.564605474472046,
"learning_rate": 9.276821300802534e-05,
"loss": 0.1847,
"step": 64
},
{
"epoch": 12.0,
"grad_norm": 2.0967092514038086,
"learning_rate": 9.202833017478422e-05,
"loss": 0.1817,
"step": 66
},
{
"epoch": 12.0,
"eval_loss": 2.5517404079437256,
"eval_runtime": 2.2226,
"eval_samples_per_second": 10.798,
"eval_steps_per_second": 10.798,
"step": 66
},
{
"epoch": 12.363636363636363,
"grad_norm": 1.0268821716308594,
"learning_rate": 9.125567491391476e-05,
"loss": 0.0953,
"step": 68
},
{
"epoch": 12.727272727272727,
"grad_norm": 1.8212908506393433,
"learning_rate": 9.045084971874738e-05,
"loss": 0.104,
"step": 70
},
{
"epoch": 12.909090909090908,
"eval_loss": 2.906930923461914,
"eval_runtime": 2.268,
"eval_samples_per_second": 10.582,
"eval_steps_per_second": 10.582,
"step": 71
},
{
"epoch": 13.090909090909092,
"grad_norm": 1.7785996198654175,
"learning_rate": 8.961448216775954e-05,
"loss": 0.076,
"step": 72
},
{
"epoch": 13.454545454545455,
"grad_norm": 1.1120202541351318,
"learning_rate": 8.874722443520899e-05,
"loss": 0.0483,
"step": 74
},
{
"epoch": 13.818181818181818,
"grad_norm": 1.672891616821289,
"learning_rate": 8.784975278258783e-05,
"loss": 0.0403,
"step": 76
},
{
"epoch": 14.0,
"eval_loss": 3.1679420471191406,
"eval_runtime": 2.2313,
"eval_samples_per_second": 10.756,
"eval_steps_per_second": 10.756,
"step": 77
},
{
"epoch": 14.0,
"step": 77,
"total_flos": 4919776673529856.0,
"train_loss": 0.9845529866295976,
"train_runtime": 544.3113,
"train_samples_per_second": 8.084,
"train_steps_per_second": 0.459
}
],
"logging_steps": 2,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4919776673529856.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}