t5-small-full-finetune-tweetsumm / trainer_state.json
samuellimabraz's picture
End of training
4792c89 verified
raw
history blame contribute delete
No virus
58.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00909090909090909,
"grad_norm": 2.954815626144409,
"learning_rate": 0.0004984848484848485,
"loss": 3.3369,
"step": 1
},
{
"epoch": 0.01818181818181818,
"grad_norm": 3.4107534885406494,
"learning_rate": 0.000496969696969697,
"loss": 3.1991,
"step": 2
},
{
"epoch": 0.02727272727272727,
"grad_norm": 3.3636677265167236,
"learning_rate": 0.0004954545454545455,
"loss": 3.1677,
"step": 3
},
{
"epoch": 0.03636363636363636,
"grad_norm": 3.0105910301208496,
"learning_rate": 0.000493939393939394,
"loss": 2.856,
"step": 4
},
{
"epoch": 0.045454545454545456,
"grad_norm": 4.34075927734375,
"learning_rate": 0.0004924242424242425,
"loss": 3.1653,
"step": 5
},
{
"epoch": 0.05454545454545454,
"grad_norm": 2.3602547645568848,
"learning_rate": 0.0004909090909090909,
"loss": 3.0124,
"step": 6
},
{
"epoch": 0.06363636363636363,
"grad_norm": 2.8893001079559326,
"learning_rate": 0.0004893939393939393,
"loss": 2.2204,
"step": 7
},
{
"epoch": 0.07272727272727272,
"grad_norm": 2.5019915103912354,
"learning_rate": 0.00048787878787878784,
"loss": 3.2338,
"step": 8
},
{
"epoch": 0.08181818181818182,
"grad_norm": 2.3345084190368652,
"learning_rate": 0.0004863636363636364,
"loss": 2.8001,
"step": 9
},
{
"epoch": 0.09090909090909091,
"grad_norm": 3.4385030269622803,
"learning_rate": 0.0004848484848484849,
"loss": 2.3952,
"step": 10
},
{
"epoch": 0.1,
"grad_norm": 2.6727066040039062,
"learning_rate": 0.00048333333333333334,
"loss": 2.5821,
"step": 11
},
{
"epoch": 0.10909090909090909,
"grad_norm": 3.0186147689819336,
"learning_rate": 0.00048181818181818184,
"loss": 2.891,
"step": 12
},
{
"epoch": 0.11818181818181818,
"grad_norm": 3.460306406021118,
"learning_rate": 0.0004803030303030303,
"loss": 2.4936,
"step": 13
},
{
"epoch": 0.12727272727272726,
"grad_norm": 2.279752016067505,
"learning_rate": 0.0004787878787878788,
"loss": 2.7331,
"step": 14
},
{
"epoch": 0.13636363636363635,
"grad_norm": 2.352194309234619,
"learning_rate": 0.0004772727272727273,
"loss": 2.7177,
"step": 15
},
{
"epoch": 0.14545454545454545,
"grad_norm": 3.305408000946045,
"learning_rate": 0.0004757575757575758,
"loss": 2.3941,
"step": 16
},
{
"epoch": 0.15454545454545454,
"grad_norm": 2.333533525466919,
"learning_rate": 0.0004742424242424243,
"loss": 2.4421,
"step": 17
},
{
"epoch": 0.16363636363636364,
"grad_norm": 2.0298824310302734,
"learning_rate": 0.0004727272727272727,
"loss": 2.6125,
"step": 18
},
{
"epoch": 0.17272727272727273,
"grad_norm": 2.4428365230560303,
"learning_rate": 0.0004712121212121212,
"loss": 2.8149,
"step": 19
},
{
"epoch": 0.18181818181818182,
"grad_norm": 2.272669792175293,
"learning_rate": 0.0004696969696969697,
"loss": 2.5141,
"step": 20
},
{
"epoch": 0.19090909090909092,
"grad_norm": 2.508845329284668,
"learning_rate": 0.0004681818181818182,
"loss": 2.3062,
"step": 21
},
{
"epoch": 0.2,
"grad_norm": 2.711777925491333,
"learning_rate": 0.00046666666666666666,
"loss": 2.5206,
"step": 22
},
{
"epoch": 0.20909090909090908,
"grad_norm": 2.747939348220825,
"learning_rate": 0.00046515151515151516,
"loss": 2.5002,
"step": 23
},
{
"epoch": 0.21818181818181817,
"grad_norm": 2.085765838623047,
"learning_rate": 0.00046363636363636366,
"loss": 1.9021,
"step": 24
},
{
"epoch": 0.22727272727272727,
"grad_norm": 2.289708137512207,
"learning_rate": 0.0004621212121212121,
"loss": 2.3299,
"step": 25
},
{
"epoch": 0.23636363636363636,
"grad_norm": 1.7168821096420288,
"learning_rate": 0.00046060606060606066,
"loss": 2.1719,
"step": 26
},
{
"epoch": 0.24545454545454545,
"grad_norm": 2.610055446624756,
"learning_rate": 0.0004590909090909091,
"loss": 2.8064,
"step": 27
},
{
"epoch": 0.2545454545454545,
"grad_norm": 2.271897315979004,
"learning_rate": 0.0004575757575757576,
"loss": 2.9024,
"step": 28
},
{
"epoch": 0.2636363636363636,
"grad_norm": 2.4727253913879395,
"learning_rate": 0.00045606060606060605,
"loss": 2.6959,
"step": 29
},
{
"epoch": 0.2727272727272727,
"grad_norm": 2.28879451751709,
"learning_rate": 0.00045454545454545455,
"loss": 2.3795,
"step": 30
},
{
"epoch": 0.2818181818181818,
"grad_norm": 2.359353542327881,
"learning_rate": 0.000453030303030303,
"loss": 2.3459,
"step": 31
},
{
"epoch": 0.2909090909090909,
"grad_norm": 2.1187634468078613,
"learning_rate": 0.00045151515151515154,
"loss": 2.6984,
"step": 32
},
{
"epoch": 0.3,
"grad_norm": 2.420835018157959,
"learning_rate": 0.00045000000000000004,
"loss": 2.3945,
"step": 33
},
{
"epoch": 0.3090909090909091,
"grad_norm": 1.8960967063903809,
"learning_rate": 0.0004484848484848485,
"loss": 2.4853,
"step": 34
},
{
"epoch": 0.3181818181818182,
"grad_norm": 2.2166941165924072,
"learning_rate": 0.000446969696969697,
"loss": 2.5212,
"step": 35
},
{
"epoch": 0.32727272727272727,
"grad_norm": 2.4022252559661865,
"learning_rate": 0.00044545454545454543,
"loss": 2.0325,
"step": 36
},
{
"epoch": 0.33636363636363636,
"grad_norm": 2.11049747467041,
"learning_rate": 0.000443939393939394,
"loss": 2.5604,
"step": 37
},
{
"epoch": 0.34545454545454546,
"grad_norm": 1.9729962348937988,
"learning_rate": 0.00044242424242424243,
"loss": 2.7087,
"step": 38
},
{
"epoch": 0.35454545454545455,
"grad_norm": 2.079552412033081,
"learning_rate": 0.00044090909090909093,
"loss": 2.1064,
"step": 39
},
{
"epoch": 0.36363636363636365,
"grad_norm": 2.260704755783081,
"learning_rate": 0.0004393939393939394,
"loss": 2.0357,
"step": 40
},
{
"epoch": 0.37272727272727274,
"grad_norm": 3.1471402645111084,
"learning_rate": 0.00043787878787878787,
"loss": 2.6559,
"step": 41
},
{
"epoch": 0.38181818181818183,
"grad_norm": 2.2759957313537598,
"learning_rate": 0.00043636363636363637,
"loss": 2.8783,
"step": 42
},
{
"epoch": 0.39090909090909093,
"grad_norm": 2.507331132888794,
"learning_rate": 0.00043484848484848487,
"loss": 2.5133,
"step": 43
},
{
"epoch": 0.4,
"grad_norm": 3.8067524433135986,
"learning_rate": 0.00043333333333333337,
"loss": 1.8867,
"step": 44
},
{
"epoch": 0.4090909090909091,
"grad_norm": 1.9787497520446777,
"learning_rate": 0.0004318181818181818,
"loss": 1.9868,
"step": 45
},
{
"epoch": 0.41818181818181815,
"grad_norm": 2.349233388900757,
"learning_rate": 0.0004303030303030303,
"loss": 2.0075,
"step": 46
},
{
"epoch": 0.42727272727272725,
"grad_norm": 4.589637756347656,
"learning_rate": 0.00042878787878787876,
"loss": 2.6546,
"step": 47
},
{
"epoch": 0.43636363636363634,
"grad_norm": 2.19441819190979,
"learning_rate": 0.00042727272727272726,
"loss": 2.1989,
"step": 48
},
{
"epoch": 0.44545454545454544,
"grad_norm": 2.0757346153259277,
"learning_rate": 0.0004257575757575758,
"loss": 2.0471,
"step": 49
},
{
"epoch": 0.45454545454545453,
"grad_norm": 1.9916266202926636,
"learning_rate": 0.00042424242424242425,
"loss": 2.5848,
"step": 50
},
{
"epoch": 0.4636363636363636,
"grad_norm": 2.115003824234009,
"learning_rate": 0.00042272727272727275,
"loss": 2.711,
"step": 51
},
{
"epoch": 0.4727272727272727,
"grad_norm": 2.3350541591644287,
"learning_rate": 0.0004212121212121212,
"loss": 2.625,
"step": 52
},
{
"epoch": 0.4818181818181818,
"grad_norm": 2.430081605911255,
"learning_rate": 0.0004196969696969697,
"loss": 2.4819,
"step": 53
},
{
"epoch": 0.4909090909090909,
"grad_norm": 2.343895196914673,
"learning_rate": 0.00041818181818181814,
"loss": 2.5562,
"step": 54
},
{
"epoch": 0.5,
"grad_norm": 2.400465488433838,
"learning_rate": 0.0004166666666666667,
"loss": 2.0998,
"step": 55
},
{
"epoch": 0.509090909090909,
"grad_norm": 1.9675588607788086,
"learning_rate": 0.0004151515151515152,
"loss": 1.8595,
"step": 56
},
{
"epoch": 0.5181818181818182,
"grad_norm": 2.2525646686553955,
"learning_rate": 0.00041363636363636364,
"loss": 2.5453,
"step": 57
},
{
"epoch": 0.5272727272727272,
"grad_norm": 5.85708475112915,
"learning_rate": 0.00041212121212121214,
"loss": 2.2228,
"step": 58
},
{
"epoch": 0.5363636363636364,
"grad_norm": 1.9743397235870361,
"learning_rate": 0.0004106060606060606,
"loss": 2.3834,
"step": 59
},
{
"epoch": 0.5454545454545454,
"grad_norm": 2.1979870796203613,
"learning_rate": 0.00040909090909090913,
"loss": 2.0418,
"step": 60
},
{
"epoch": 0.5545454545454546,
"grad_norm": 2.1017322540283203,
"learning_rate": 0.0004075757575757576,
"loss": 2.4981,
"step": 61
},
{
"epoch": 0.5636363636363636,
"grad_norm": 2.2446625232696533,
"learning_rate": 0.0004060606060606061,
"loss": 2.1461,
"step": 62
},
{
"epoch": 0.5727272727272728,
"grad_norm": 3.56213116645813,
"learning_rate": 0.0004045454545454546,
"loss": 2.1976,
"step": 63
},
{
"epoch": 0.5818181818181818,
"grad_norm": 2.326568603515625,
"learning_rate": 0.000403030303030303,
"loss": 1.9419,
"step": 64
},
{
"epoch": 0.5909090909090909,
"grad_norm": 2.318174362182617,
"learning_rate": 0.0004015151515151515,
"loss": 2.1222,
"step": 65
},
{
"epoch": 0.6,
"grad_norm": 2.3187263011932373,
"learning_rate": 0.0004,
"loss": 2.1773,
"step": 66
},
{
"epoch": 0.6090909090909091,
"grad_norm": 2.2764594554901123,
"learning_rate": 0.0003984848484848485,
"loss": 3.0253,
"step": 67
},
{
"epoch": 0.6181818181818182,
"grad_norm": 2.485689878463745,
"learning_rate": 0.00039696969696969696,
"loss": 2.3372,
"step": 68
},
{
"epoch": 0.6272727272727273,
"grad_norm": 2.727031946182251,
"learning_rate": 0.00039545454545454546,
"loss": 2.3496,
"step": 69
},
{
"epoch": 0.6363636363636364,
"grad_norm": 1.9232404232025146,
"learning_rate": 0.0003939393939393939,
"loss": 1.7793,
"step": 70
},
{
"epoch": 0.6454545454545455,
"grad_norm": 1.9665184020996094,
"learning_rate": 0.0003924242424242424,
"loss": 2.0813,
"step": 71
},
{
"epoch": 0.6545454545454545,
"grad_norm": 1.599181056022644,
"learning_rate": 0.00039090909090909096,
"loss": 2.3862,
"step": 72
},
{
"epoch": 0.6636363636363637,
"grad_norm": 2.0285756587982178,
"learning_rate": 0.0003893939393939394,
"loss": 2.1869,
"step": 73
},
{
"epoch": 0.6727272727272727,
"grad_norm": 2.3004918098449707,
"learning_rate": 0.0003878787878787879,
"loss": 2.335,
"step": 74
},
{
"epoch": 0.6818181818181818,
"grad_norm": 2.390582799911499,
"learning_rate": 0.00038636363636363635,
"loss": 2.2488,
"step": 75
},
{
"epoch": 0.6909090909090909,
"grad_norm": 2.247906446456909,
"learning_rate": 0.00038484848484848485,
"loss": 2.1187,
"step": 76
},
{
"epoch": 0.7,
"grad_norm": 1.8525216579437256,
"learning_rate": 0.00038333333333333334,
"loss": 2.0979,
"step": 77
},
{
"epoch": 0.7090909090909091,
"grad_norm": 4.209506034851074,
"learning_rate": 0.00038181818181818184,
"loss": 2.1619,
"step": 78
},
{
"epoch": 0.7181818181818181,
"grad_norm": 3.623314142227173,
"learning_rate": 0.00038030303030303034,
"loss": 2.0937,
"step": 79
},
{
"epoch": 0.7272727272727273,
"grad_norm": 1.9494578838348389,
"learning_rate": 0.0003787878787878788,
"loss": 1.8908,
"step": 80
},
{
"epoch": 0.7363636363636363,
"grad_norm": 2.460625410079956,
"learning_rate": 0.0003772727272727273,
"loss": 2.2712,
"step": 81
},
{
"epoch": 0.7454545454545455,
"grad_norm": 1.8331961631774902,
"learning_rate": 0.00037575757575757573,
"loss": 2.1659,
"step": 82
},
{
"epoch": 0.7545454545454545,
"grad_norm": 2.1352946758270264,
"learning_rate": 0.0003742424242424243,
"loss": 2.4457,
"step": 83
},
{
"epoch": 0.7636363636363637,
"grad_norm": 2.0929653644561768,
"learning_rate": 0.00037272727272727273,
"loss": 2.0674,
"step": 84
},
{
"epoch": 0.7727272727272727,
"grad_norm": 1.8029730319976807,
"learning_rate": 0.00037121212121212123,
"loss": 1.8433,
"step": 85
},
{
"epoch": 0.7818181818181819,
"grad_norm": 2.4226315021514893,
"learning_rate": 0.00036969696969696967,
"loss": 2.5526,
"step": 86
},
{
"epoch": 0.7909090909090909,
"grad_norm": 1.864897608757019,
"learning_rate": 0.00036818181818181817,
"loss": 2.153,
"step": 87
},
{
"epoch": 0.8,
"grad_norm": 3.126366376876831,
"learning_rate": 0.00036666666666666667,
"loss": 3.0295,
"step": 88
},
{
"epoch": 0.8090909090909091,
"grad_norm": 2.0774729251861572,
"learning_rate": 0.00036515151515151517,
"loss": 2.4506,
"step": 89
},
{
"epoch": 0.8181818181818182,
"grad_norm": 2.350816011428833,
"learning_rate": 0.00036363636363636367,
"loss": 2.2185,
"step": 90
},
{
"epoch": 0.8272727272727273,
"grad_norm": 4.5536112785339355,
"learning_rate": 0.0003621212121212121,
"loss": 2.294,
"step": 91
},
{
"epoch": 0.8363636363636363,
"grad_norm": 2.0846188068389893,
"learning_rate": 0.0003606060606060606,
"loss": 2.0574,
"step": 92
},
{
"epoch": 0.8454545454545455,
"grad_norm": 2.7833001613616943,
"learning_rate": 0.00035909090909090906,
"loss": 2.2196,
"step": 93
},
{
"epoch": 0.8545454545454545,
"grad_norm": 2.067406415939331,
"learning_rate": 0.0003575757575757576,
"loss": 2.0928,
"step": 94
},
{
"epoch": 0.8636363636363636,
"grad_norm": 2.421410083770752,
"learning_rate": 0.0003560606060606061,
"loss": 1.777,
"step": 95
},
{
"epoch": 0.8727272727272727,
"grad_norm": NaN,
"learning_rate": 0.0003560606060606061,
"loss": 1.9642,
"step": 96
},
{
"epoch": 0.8818181818181818,
"grad_norm": 2.236882448196411,
"learning_rate": 0.00035454545454545455,
"loss": 1.8629,
"step": 97
},
{
"epoch": 0.8909090909090909,
"grad_norm": 1.8265429735183716,
"learning_rate": 0.00035303030303030305,
"loss": 2.2389,
"step": 98
},
{
"epoch": 0.9,
"grad_norm": 6.896456241607666,
"learning_rate": 0.0003515151515151515,
"loss": 2.0973,
"step": 99
},
{
"epoch": 0.9090909090909091,
"grad_norm": 1.7049111127853394,
"learning_rate": 0.00035,
"loss": 2.0509,
"step": 100
},
{
"epoch": 0.9181818181818182,
"grad_norm": 2.2542057037353516,
"learning_rate": 0.0003484848484848485,
"loss": 2.1774,
"step": 101
},
{
"epoch": 0.9272727272727272,
"grad_norm": 1.882677435874939,
"learning_rate": 0.000346969696969697,
"loss": 2.3698,
"step": 102
},
{
"epoch": 0.9363636363636364,
"grad_norm": 7.113732814788818,
"learning_rate": 0.00034545454545454544,
"loss": 2.0712,
"step": 103
},
{
"epoch": 0.9454545454545454,
"grad_norm": 2.472871780395508,
"learning_rate": 0.00034393939393939394,
"loss": 2.2019,
"step": 104
},
{
"epoch": 0.9545454545454546,
"grad_norm": 2.08988356590271,
"learning_rate": 0.00034242424242424244,
"loss": 1.9712,
"step": 105
},
{
"epoch": 0.9636363636363636,
"grad_norm": 2.2430801391601562,
"learning_rate": 0.0003409090909090909,
"loss": 2.0638,
"step": 106
},
{
"epoch": 0.9727272727272728,
"grad_norm": 1.8587884902954102,
"learning_rate": 0.00033939393939393943,
"loss": 1.8007,
"step": 107
},
{
"epoch": 0.9818181818181818,
"grad_norm": 1.7524383068084717,
"learning_rate": 0.0003378787878787879,
"loss": 1.8158,
"step": 108
},
{
"epoch": 0.990909090909091,
"grad_norm": 2.1730449199676514,
"learning_rate": 0.0003363636363636364,
"loss": 2.5347,
"step": 109
},
{
"epoch": 1.0,
"grad_norm": 2.3493597507476807,
"learning_rate": 0.0003348484848484848,
"loss": 2.3321,
"step": 110
},
{
"epoch": 1.0,
"eval_f1": 0.8916,
"eval_gen_len": 49.4818,
"eval_loss": 2.07224702835083,
"eval_precision": 0.8905,
"eval_recall": 0.893,
"eval_rouge1": 0.462,
"eval_rouge2": 0.2119,
"eval_rougeL": 0.3832,
"eval_rougeLsum": 0.429,
"eval_runtime": 20.6752,
"eval_samples_per_second": 5.32,
"eval_steps_per_second": 0.677,
"step": 110
},
{
"epoch": 1.009090909090909,
"grad_norm": 2.315150499343872,
"learning_rate": 0.0003333333333333333,
"loss": 2.1843,
"step": 111
},
{
"epoch": 1.018181818181818,
"grad_norm": 1.9279595613479614,
"learning_rate": 0.0003318181818181819,
"loss": 1.672,
"step": 112
},
{
"epoch": 1.0272727272727273,
"grad_norm": 2.603682518005371,
"learning_rate": 0.0003303030303030303,
"loss": 2.0274,
"step": 113
},
{
"epoch": 1.0363636363636364,
"grad_norm": 2.4382553100585938,
"learning_rate": 0.0003287878787878788,
"loss": 2.567,
"step": 114
},
{
"epoch": 1.0454545454545454,
"grad_norm": 1.9396408796310425,
"learning_rate": 0.00032727272727272726,
"loss": 1.7557,
"step": 115
},
{
"epoch": 1.0545454545454545,
"grad_norm": 2.012943744659424,
"learning_rate": 0.00032575757575757576,
"loss": 2.0774,
"step": 116
},
{
"epoch": 1.0636363636363637,
"grad_norm": 3.9621338844299316,
"learning_rate": 0.0003242424242424242,
"loss": 1.575,
"step": 117
},
{
"epoch": 1.0727272727272728,
"grad_norm": 1.8810157775878906,
"learning_rate": 0.00032272727272727276,
"loss": 1.8527,
"step": 118
},
{
"epoch": 1.0818181818181818,
"grad_norm": 2.341228723526001,
"learning_rate": 0.00032121212121212126,
"loss": 2.174,
"step": 119
},
{
"epoch": 1.0909090909090908,
"grad_norm": 2.27140736579895,
"learning_rate": 0.0003196969696969697,
"loss": 2.416,
"step": 120
},
{
"epoch": 1.1,
"grad_norm": 1.680935263633728,
"learning_rate": 0.0003181818181818182,
"loss": 1.8411,
"step": 121
},
{
"epoch": 1.1090909090909091,
"grad_norm": 2.060490369796753,
"learning_rate": 0.00031666666666666665,
"loss": 1.8284,
"step": 122
},
{
"epoch": 1.1181818181818182,
"grad_norm": 2.117558479309082,
"learning_rate": 0.00031515151515151515,
"loss": 2.2432,
"step": 123
},
{
"epoch": 1.1272727272727272,
"grad_norm": 2.2386701107025146,
"learning_rate": 0.00031363636363636365,
"loss": 2.0944,
"step": 124
},
{
"epoch": 1.1363636363636362,
"grad_norm": 1.9912633895874023,
"learning_rate": 0.00031212121212121214,
"loss": 1.9484,
"step": 125
},
{
"epoch": 1.1454545454545455,
"grad_norm": 2.0469374656677246,
"learning_rate": 0.0003106060606060606,
"loss": 1.8707,
"step": 126
},
{
"epoch": 1.1545454545454545,
"grad_norm": 10.590553283691406,
"learning_rate": 0.0003090909090909091,
"loss": 2.0068,
"step": 127
},
{
"epoch": 1.1636363636363636,
"grad_norm": 1.8103890419006348,
"learning_rate": 0.0003075757575757576,
"loss": 1.9611,
"step": 128
},
{
"epoch": 1.1727272727272728,
"grad_norm": 2.6478331089019775,
"learning_rate": 0.00030606060606060603,
"loss": 2.1548,
"step": 129
},
{
"epoch": 1.1818181818181819,
"grad_norm": 2.130384922027588,
"learning_rate": 0.0003045454545454546,
"loss": 2.3239,
"step": 130
},
{
"epoch": 1.190909090909091,
"grad_norm": 2.1625640392303467,
"learning_rate": 0.00030303030303030303,
"loss": 1.9455,
"step": 131
},
{
"epoch": 1.2,
"grad_norm": 1.9097338914871216,
"learning_rate": 0.00030151515151515153,
"loss": 2.0601,
"step": 132
},
{
"epoch": 1.209090909090909,
"grad_norm": 4.467080593109131,
"learning_rate": 0.0003,
"loss": 1.7412,
"step": 133
},
{
"epoch": 1.2181818181818183,
"grad_norm": 2.4197402000427246,
"learning_rate": 0.00029848484848484847,
"loss": 1.9152,
"step": 134
},
{
"epoch": 1.2272727272727273,
"grad_norm": 2.641479253768921,
"learning_rate": 0.000296969696969697,
"loss": 2.2888,
"step": 135
},
{
"epoch": 1.2363636363636363,
"grad_norm": 2.374185562133789,
"learning_rate": 0.00029545454545454547,
"loss": 2.1405,
"step": 136
},
{
"epoch": 1.2454545454545454,
"grad_norm": 1.7663893699645996,
"learning_rate": 0.00029393939393939397,
"loss": 1.4913,
"step": 137
},
{
"epoch": 1.2545454545454544,
"grad_norm": 1.7372636795043945,
"learning_rate": 0.0002924242424242424,
"loss": 1.7553,
"step": 138
},
{
"epoch": 1.2636363636363637,
"grad_norm": 1.9381648302078247,
"learning_rate": 0.0002909090909090909,
"loss": 1.9566,
"step": 139
},
{
"epoch": 1.2727272727272727,
"grad_norm": 3.2428243160247803,
"learning_rate": 0.00028939393939393936,
"loss": 2.2607,
"step": 140
},
{
"epoch": 1.2818181818181817,
"grad_norm": 2.1966135501861572,
"learning_rate": 0.0002878787878787879,
"loss": 2.1417,
"step": 141
},
{
"epoch": 1.290909090909091,
"grad_norm": 1.775622844696045,
"learning_rate": 0.00028636363636363636,
"loss": 1.7522,
"step": 142
},
{
"epoch": 1.3,
"grad_norm": 1.8064031600952148,
"learning_rate": 0.00028484848484848485,
"loss": 2.2365,
"step": 143
},
{
"epoch": 1.309090909090909,
"grad_norm": 1.6624155044555664,
"learning_rate": 0.00028333333333333335,
"loss": 1.6523,
"step": 144
},
{
"epoch": 1.3181818181818181,
"grad_norm": 1.8865635395050049,
"learning_rate": 0.0002818181818181818,
"loss": 2.2222,
"step": 145
},
{
"epoch": 1.3272727272727272,
"grad_norm": 2.075778007507324,
"learning_rate": 0.0002803030303030303,
"loss": 1.9718,
"step": 146
},
{
"epoch": 1.3363636363636364,
"grad_norm": 2.7333056926727295,
"learning_rate": 0.0002787878787878788,
"loss": 1.8671,
"step": 147
},
{
"epoch": 1.3454545454545455,
"grad_norm": 2.3341150283813477,
"learning_rate": 0.0002772727272727273,
"loss": 2.296,
"step": 148
},
{
"epoch": 1.3545454545454545,
"grad_norm": 2.298750877380371,
"learning_rate": 0.00027575757575757574,
"loss": 1.8589,
"step": 149
},
{
"epoch": 1.3636363636363638,
"grad_norm": 2.2849225997924805,
"learning_rate": 0.00027424242424242424,
"loss": 1.7672,
"step": 150
},
{
"epoch": 1.3727272727272728,
"grad_norm": 2.4873552322387695,
"learning_rate": 0.00027272727272727274,
"loss": 2.0295,
"step": 151
},
{
"epoch": 1.3818181818181818,
"grad_norm": 1.8006199598312378,
"learning_rate": 0.00027121212121212124,
"loss": 2.0144,
"step": 152
},
{
"epoch": 1.3909090909090909,
"grad_norm": 1.8441232442855835,
"learning_rate": 0.00026969696969696974,
"loss": 1.8709,
"step": 153
},
{
"epoch": 1.4,
"grad_norm": 1.7351133823394775,
"learning_rate": 0.0002681818181818182,
"loss": 1.978,
"step": 154
},
{
"epoch": 1.4090909090909092,
"grad_norm": 2.0104682445526123,
"learning_rate": 0.0002666666666666667,
"loss": 2.0207,
"step": 155
},
{
"epoch": 1.4181818181818182,
"grad_norm": 1.9547767639160156,
"learning_rate": 0.0002651515151515151,
"loss": 1.9761,
"step": 156
},
{
"epoch": 1.4272727272727272,
"grad_norm": 1.9457013607025146,
"learning_rate": 0.0002636363636363636,
"loss": 1.5643,
"step": 157
},
{
"epoch": 1.4363636363636363,
"grad_norm": 1.9068812131881714,
"learning_rate": 0.0002621212121212122,
"loss": 2.0376,
"step": 158
},
{
"epoch": 1.4454545454545453,
"grad_norm": 2.1232669353485107,
"learning_rate": 0.0002606060606060606,
"loss": 2.0019,
"step": 159
},
{
"epoch": 1.4545454545454546,
"grad_norm": 2.8518733978271484,
"learning_rate": 0.0002590909090909091,
"loss": 2.1732,
"step": 160
},
{
"epoch": 1.4636363636363636,
"grad_norm": 2.644965171813965,
"learning_rate": 0.00025757575757575756,
"loss": 1.9197,
"step": 161
},
{
"epoch": 1.4727272727272727,
"grad_norm": 1.7258949279785156,
"learning_rate": 0.00025606060606060606,
"loss": 1.5056,
"step": 162
},
{
"epoch": 1.481818181818182,
"grad_norm": 2.1850738525390625,
"learning_rate": 0.0002545454545454545,
"loss": 2.3507,
"step": 163
},
{
"epoch": 1.490909090909091,
"grad_norm": 2.1031181812286377,
"learning_rate": 0.00025303030303030306,
"loss": 1.7586,
"step": 164
},
{
"epoch": 1.5,
"grad_norm": 1.7486854791641235,
"learning_rate": 0.0002515151515151515,
"loss": 1.5661,
"step": 165
},
{
"epoch": 1.509090909090909,
"grad_norm": 2.525303602218628,
"learning_rate": 0.00025,
"loss": 2.2275,
"step": 166
},
{
"epoch": 1.518181818181818,
"grad_norm": 2.07893967628479,
"learning_rate": 0.0002484848484848485,
"loss": 1.9536,
"step": 167
},
{
"epoch": 1.5272727272727273,
"grad_norm": 2.0040087699890137,
"learning_rate": 0.000246969696969697,
"loss": 1.6692,
"step": 168
},
{
"epoch": 1.5363636363636364,
"grad_norm": 2.1064059734344482,
"learning_rate": 0.00024545454545454545,
"loss": 2.0401,
"step": 169
},
{
"epoch": 1.5454545454545454,
"grad_norm": 2.258216619491577,
"learning_rate": 0.00024393939393939392,
"loss": 1.6331,
"step": 170
},
{
"epoch": 1.5545454545454547,
"grad_norm": 2.363294839859009,
"learning_rate": 0.00024242424242424245,
"loss": 2.0516,
"step": 171
},
{
"epoch": 1.5636363636363635,
"grad_norm": 2.9346563816070557,
"learning_rate": 0.00024090909090909092,
"loss": 1.8948,
"step": 172
},
{
"epoch": 1.5727272727272728,
"grad_norm": 1.9507861137390137,
"learning_rate": 0.0002393939393939394,
"loss": 1.6701,
"step": 173
},
{
"epoch": 1.5818181818181818,
"grad_norm": 1.9509398937225342,
"learning_rate": 0.0002378787878787879,
"loss": 2.0272,
"step": 174
},
{
"epoch": 1.5909090909090908,
"grad_norm": 2.035423517227173,
"learning_rate": 0.00023636363636363636,
"loss": 1.9668,
"step": 175
},
{
"epoch": 1.6,
"grad_norm": 2.2366080284118652,
"learning_rate": 0.00023484848484848486,
"loss": 2.2121,
"step": 176
},
{
"epoch": 1.6090909090909091,
"grad_norm": 2.229384660720825,
"learning_rate": 0.00023333333333333333,
"loss": 1.8095,
"step": 177
},
{
"epoch": 1.6181818181818182,
"grad_norm": 2.0196542739868164,
"learning_rate": 0.00023181818181818183,
"loss": 2.0187,
"step": 178
},
{
"epoch": 1.6272727272727274,
"grad_norm": 1.5661934614181519,
"learning_rate": 0.00023030303030303033,
"loss": 1.9151,
"step": 179
},
{
"epoch": 1.6363636363636362,
"grad_norm": 1.9817898273468018,
"learning_rate": 0.0002287878787878788,
"loss": 2.0098,
"step": 180
},
{
"epoch": 1.6454545454545455,
"grad_norm": 2.218705892562866,
"learning_rate": 0.00022727272727272727,
"loss": 1.7758,
"step": 181
},
{
"epoch": 1.6545454545454545,
"grad_norm": 1.8907921314239502,
"learning_rate": 0.00022575757575757577,
"loss": 1.5012,
"step": 182
},
{
"epoch": 1.6636363636363636,
"grad_norm": 1.8486360311508179,
"learning_rate": 0.00022424242424242424,
"loss": 1.7079,
"step": 183
},
{
"epoch": 1.6727272727272728,
"grad_norm": 2.231969118118286,
"learning_rate": 0.00022272727272727272,
"loss": 2.3828,
"step": 184
},
{
"epoch": 1.6818181818181817,
"grad_norm": 2.2819623947143555,
"learning_rate": 0.00022121212121212121,
"loss": 2.1301,
"step": 185
},
{
"epoch": 1.690909090909091,
"grad_norm": 2.0879971981048584,
"learning_rate": 0.0002196969696969697,
"loss": 1.9639,
"step": 186
},
{
"epoch": 1.7,
"grad_norm": 3.0272915363311768,
"learning_rate": 0.00021818181818181818,
"loss": 1.7447,
"step": 187
},
{
"epoch": 1.709090909090909,
"grad_norm": 5.990971565246582,
"learning_rate": 0.00021666666666666668,
"loss": 2.0313,
"step": 188
},
{
"epoch": 1.7181818181818183,
"grad_norm": 1.5562996864318848,
"learning_rate": 0.00021515151515151516,
"loss": 1.5394,
"step": 189
},
{
"epoch": 1.7272727272727273,
"grad_norm": 2.520314931869507,
"learning_rate": 0.00021363636363636363,
"loss": 1.9998,
"step": 190
},
{
"epoch": 1.7363636363636363,
"grad_norm": 1.9438166618347168,
"learning_rate": 0.00021212121212121213,
"loss": 1.8079,
"step": 191
},
{
"epoch": 1.7454545454545456,
"grad_norm": 2.46296763420105,
"learning_rate": 0.0002106060606060606,
"loss": 1.9225,
"step": 192
},
{
"epoch": 1.7545454545454544,
"grad_norm": 1.603652834892273,
"learning_rate": 0.00020909090909090907,
"loss": 1.8334,
"step": 193
},
{
"epoch": 1.7636363636363637,
"grad_norm": 1.796787142753601,
"learning_rate": 0.0002075757575757576,
"loss": 2.2219,
"step": 194
},
{
"epoch": 1.7727272727272727,
"grad_norm": 2.157109498977661,
"learning_rate": 0.00020606060606060607,
"loss": 1.9068,
"step": 195
},
{
"epoch": 1.7818181818181817,
"grad_norm": 2.1227638721466064,
"learning_rate": 0.00020454545454545457,
"loss": 2.0476,
"step": 196
},
{
"epoch": 1.790909090909091,
"grad_norm": 2.4162797927856445,
"learning_rate": 0.00020303030303030304,
"loss": 1.4491,
"step": 197
},
{
"epoch": 1.8,
"grad_norm": 2.017754316329956,
"learning_rate": 0.0002015151515151515,
"loss": 1.9828,
"step": 198
},
{
"epoch": 1.809090909090909,
"grad_norm": 2.5105152130126953,
"learning_rate": 0.0002,
"loss": 2.237,
"step": 199
},
{
"epoch": 1.8181818181818183,
"grad_norm": 1.9045398235321045,
"learning_rate": 0.00019848484848484848,
"loss": 2.0976,
"step": 200
},
{
"epoch": 1.8272727272727272,
"grad_norm": 1.7948741912841797,
"learning_rate": 0.00019696969696969695,
"loss": 1.932,
"step": 201
},
{
"epoch": 1.8363636363636364,
"grad_norm": 2.290318012237549,
"learning_rate": 0.00019545454545454548,
"loss": 1.8835,
"step": 202
},
{
"epoch": 1.8454545454545455,
"grad_norm": 1.9325324296951294,
"learning_rate": 0.00019393939393939395,
"loss": 1.7144,
"step": 203
},
{
"epoch": 1.8545454545454545,
"grad_norm": 3.7647125720977783,
"learning_rate": 0.00019242424242424242,
"loss": 1.7836,
"step": 204
},
{
"epoch": 1.8636363636363638,
"grad_norm": 2.117750644683838,
"learning_rate": 0.00019090909090909092,
"loss": 1.7905,
"step": 205
},
{
"epoch": 1.8727272727272726,
"grad_norm": 8.982443809509277,
"learning_rate": 0.0001893939393939394,
"loss": 1.8886,
"step": 206
},
{
"epoch": 1.8818181818181818,
"grad_norm": 2.2815496921539307,
"learning_rate": 0.00018787878787878787,
"loss": 1.7791,
"step": 207
},
{
"epoch": 1.8909090909090909,
"grad_norm": 2.4422738552093506,
"learning_rate": 0.00018636363636363636,
"loss": 1.8431,
"step": 208
},
{
"epoch": 1.9,
"grad_norm": 2.5410265922546387,
"learning_rate": 0.00018484848484848484,
"loss": 1.9531,
"step": 209
},
{
"epoch": 1.9090909090909092,
"grad_norm": 3.0006237030029297,
"learning_rate": 0.00018333333333333334,
"loss": 1.8623,
"step": 210
},
{
"epoch": 1.9181818181818182,
"grad_norm": 1.8805768489837646,
"learning_rate": 0.00018181818181818183,
"loss": 1.7165,
"step": 211
},
{
"epoch": 1.9272727272727272,
"grad_norm": 2.3523027896881104,
"learning_rate": 0.0001803030303030303,
"loss": 2.0356,
"step": 212
},
{
"epoch": 1.9363636363636365,
"grad_norm": 1.89750075340271,
"learning_rate": 0.0001787878787878788,
"loss": 1.7039,
"step": 213
},
{
"epoch": 1.9454545454545453,
"grad_norm": 2.013911724090576,
"learning_rate": 0.00017727272727272728,
"loss": 1.9103,
"step": 214
},
{
"epoch": 1.9545454545454546,
"grad_norm": 2.6688437461853027,
"learning_rate": 0.00017575757575757575,
"loss": 2.0033,
"step": 215
},
{
"epoch": 1.9636363636363636,
"grad_norm": 2.563509225845337,
"learning_rate": 0.00017424242424242425,
"loss": 2.1378,
"step": 216
},
{
"epoch": 1.9727272727272727,
"grad_norm": 2.0422611236572266,
"learning_rate": 0.00017272727272727272,
"loss": 1.864,
"step": 217
},
{
"epoch": 1.981818181818182,
"grad_norm": 1.87980318069458,
"learning_rate": 0.00017121212121212122,
"loss": 1.6409,
"step": 218
},
{
"epoch": 1.990909090909091,
"grad_norm": 1.75395667552948,
"learning_rate": 0.00016969696969696972,
"loss": 2.324,
"step": 219
},
{
"epoch": 2.0,
"grad_norm": 2.494816780090332,
"learning_rate": 0.0001681818181818182,
"loss": 2.0488,
"step": 220
},
{
"epoch": 2.0,
"eval_f1": 0.8912,
"eval_gen_len": 49.5727,
"eval_loss": 2.0051822662353516,
"eval_precision": 0.8889,
"eval_recall": 0.8938,
"eval_rouge1": 0.453,
"eval_rouge2": 0.2025,
"eval_rougeL": 0.3721,
"eval_rougeLsum": 0.4167,
"eval_runtime": 12.2116,
"eval_samples_per_second": 9.008,
"eval_steps_per_second": 1.146,
"step": 220
},
{
"epoch": 2.0090909090909093,
"grad_norm": 1.901001214981079,
"learning_rate": 0.00016666666666666666,
"loss": 1.4219,
"step": 221
},
{
"epoch": 2.018181818181818,
"grad_norm": 1.9974595308303833,
"learning_rate": 0.00016515151515151516,
"loss": 1.7321,
"step": 222
},
{
"epoch": 2.0272727272727273,
"grad_norm": 1.9482543468475342,
"learning_rate": 0.00016363636363636363,
"loss": 1.8221,
"step": 223
},
{
"epoch": 2.036363636363636,
"grad_norm": 2.6568679809570312,
"learning_rate": 0.0001621212121212121,
"loss": 1.8601,
"step": 224
},
{
"epoch": 2.0454545454545454,
"grad_norm": 2.2491767406463623,
"learning_rate": 0.00016060606060606063,
"loss": 1.9924,
"step": 225
},
{
"epoch": 2.0545454545454547,
"grad_norm": 2.297182559967041,
"learning_rate": 0.0001590909090909091,
"loss": 2.4264,
"step": 226
},
{
"epoch": 2.0636363636363635,
"grad_norm": 1.9736173152923584,
"learning_rate": 0.00015757575757575757,
"loss": 1.6936,
"step": 227
},
{
"epoch": 2.0727272727272728,
"grad_norm": 2.579871416091919,
"learning_rate": 0.00015606060606060607,
"loss": 1.7578,
"step": 228
},
{
"epoch": 2.081818181818182,
"grad_norm": 1.4503185749053955,
"learning_rate": 0.00015454545454545454,
"loss": 1.4641,
"step": 229
},
{
"epoch": 2.090909090909091,
"grad_norm": 1.8198928833007812,
"learning_rate": 0.00015303030303030302,
"loss": 1.817,
"step": 230
},
{
"epoch": 2.1,
"grad_norm": 1.9079821109771729,
"learning_rate": 0.00015151515151515152,
"loss": 1.4318,
"step": 231
},
{
"epoch": 2.109090909090909,
"grad_norm": 1.8897184133529663,
"learning_rate": 0.00015,
"loss": 1.4288,
"step": 232
},
{
"epoch": 2.118181818181818,
"grad_norm": 2.2773196697235107,
"learning_rate": 0.0001484848484848485,
"loss": 1.6063,
"step": 233
},
{
"epoch": 2.1272727272727274,
"grad_norm": 2.318948745727539,
"learning_rate": 0.00014696969696969698,
"loss": 1.7259,
"step": 234
},
{
"epoch": 2.1363636363636362,
"grad_norm": 1.6040109395980835,
"learning_rate": 0.00014545454545454546,
"loss": 1.6096,
"step": 235
},
{
"epoch": 2.1454545454545455,
"grad_norm": 2.085674524307251,
"learning_rate": 0.00014393939393939396,
"loss": 2.0896,
"step": 236
},
{
"epoch": 2.1545454545454543,
"grad_norm": 1.7010502815246582,
"learning_rate": 0.00014242424242424243,
"loss": 1.5766,
"step": 237
},
{
"epoch": 2.1636363636363636,
"grad_norm": 2.531755208969116,
"learning_rate": 0.0001409090909090909,
"loss": 1.7934,
"step": 238
},
{
"epoch": 2.172727272727273,
"grad_norm": 1.9884897470474243,
"learning_rate": 0.0001393939393939394,
"loss": 1.9227,
"step": 239
},
{
"epoch": 2.1818181818181817,
"grad_norm": 2.264225482940674,
"learning_rate": 0.00013787878787878787,
"loss": 2.2716,
"step": 240
},
{
"epoch": 2.190909090909091,
"grad_norm": 2.0064761638641357,
"learning_rate": 0.00013636363636363637,
"loss": 1.607,
"step": 241
},
{
"epoch": 2.2,
"grad_norm": 1.8198459148406982,
"learning_rate": 0.00013484848484848487,
"loss": 1.2289,
"step": 242
},
{
"epoch": 2.209090909090909,
"grad_norm": 2.089911460876465,
"learning_rate": 0.00013333333333333334,
"loss": 2.3063,
"step": 243
},
{
"epoch": 2.2181818181818183,
"grad_norm": 2.63271164894104,
"learning_rate": 0.0001318181818181818,
"loss": 1.9975,
"step": 244
},
{
"epoch": 2.227272727272727,
"grad_norm": 2.545379400253296,
"learning_rate": 0.0001303030303030303,
"loss": 1.8202,
"step": 245
},
{
"epoch": 2.2363636363636363,
"grad_norm": 3.404466390609741,
"learning_rate": 0.00012878787878787878,
"loss": 2.0287,
"step": 246
},
{
"epoch": 2.2454545454545456,
"grad_norm": 2.354210376739502,
"learning_rate": 0.00012727272727272725,
"loss": 1.8104,
"step": 247
},
{
"epoch": 2.2545454545454544,
"grad_norm": 2.218528985977173,
"learning_rate": 0.00012575757575757575,
"loss": 1.935,
"step": 248
},
{
"epoch": 2.2636363636363637,
"grad_norm": 2.672149658203125,
"learning_rate": 0.00012424242424242425,
"loss": 2.0121,
"step": 249
},
{
"epoch": 2.2727272727272725,
"grad_norm": 2.4421417713165283,
"learning_rate": 0.00012272727272727272,
"loss": 2.1933,
"step": 250
},
{
"epoch": 2.2818181818181817,
"grad_norm": 2.0689334869384766,
"learning_rate": 0.00012121212121212122,
"loss": 1.4094,
"step": 251
},
{
"epoch": 2.290909090909091,
"grad_norm": 1.5508095026016235,
"learning_rate": 0.0001196969696969697,
"loss": 1.3085,
"step": 252
},
{
"epoch": 2.3,
"grad_norm": 2.1737687587738037,
"learning_rate": 0.00011818181818181818,
"loss": 1.8556,
"step": 253
},
{
"epoch": 2.309090909090909,
"grad_norm": 1.8110220432281494,
"learning_rate": 0.00011666666666666667,
"loss": 2.0457,
"step": 254
},
{
"epoch": 2.3181818181818183,
"grad_norm": 2.595646619796753,
"learning_rate": 0.00011515151515151516,
"loss": 1.678,
"step": 255
},
{
"epoch": 2.327272727272727,
"grad_norm": 2.1089718341827393,
"learning_rate": 0.00011363636363636364,
"loss": 1.8065,
"step": 256
},
{
"epoch": 2.3363636363636364,
"grad_norm": 2.368802785873413,
"learning_rate": 0.00011212121212121212,
"loss": 2.2071,
"step": 257
},
{
"epoch": 2.3454545454545457,
"grad_norm": 1.8251020908355713,
"learning_rate": 0.00011060606060606061,
"loss": 1.655,
"step": 258
},
{
"epoch": 2.3545454545454545,
"grad_norm": 2.265756607055664,
"learning_rate": 0.00010909090909090909,
"loss": 1.8728,
"step": 259
},
{
"epoch": 2.3636363636363638,
"grad_norm": 2.234041929244995,
"learning_rate": 0.00010757575757575758,
"loss": 1.9121,
"step": 260
},
{
"epoch": 2.3727272727272726,
"grad_norm": 2.3294894695281982,
"learning_rate": 0.00010606060606060606,
"loss": 1.6389,
"step": 261
},
{
"epoch": 2.381818181818182,
"grad_norm": 2.2377607822418213,
"learning_rate": 0.00010454545454545454,
"loss": 1.6644,
"step": 262
},
{
"epoch": 2.390909090909091,
"grad_norm": 1.9315197467803955,
"learning_rate": 0.00010303030303030303,
"loss": 1.2706,
"step": 263
},
{
"epoch": 2.4,
"grad_norm": 2.108189105987549,
"learning_rate": 0.00010151515151515152,
"loss": 2.0511,
"step": 264
},
{
"epoch": 2.409090909090909,
"grad_norm": 1.7083442211151123,
"learning_rate": 0.0001,
"loss": 1.6857,
"step": 265
},
{
"epoch": 2.418181818181818,
"grad_norm": 1.7851577997207642,
"learning_rate": 9.848484848484848e-05,
"loss": 1.6011,
"step": 266
},
{
"epoch": 2.4272727272727272,
"grad_norm": 1.8458926677703857,
"learning_rate": 9.696969696969698e-05,
"loss": 1.9975,
"step": 267
},
{
"epoch": 2.4363636363636365,
"grad_norm": 1.6345833539962769,
"learning_rate": 9.545454545454546e-05,
"loss": 1.6547,
"step": 268
},
{
"epoch": 2.4454545454545453,
"grad_norm": 1.972408652305603,
"learning_rate": 9.393939393939393e-05,
"loss": 1.8668,
"step": 269
},
{
"epoch": 2.4545454545454546,
"grad_norm": 2.2467167377471924,
"learning_rate": 9.242424242424242e-05,
"loss": 1.7917,
"step": 270
},
{
"epoch": 2.463636363636364,
"grad_norm": 2.024461507797241,
"learning_rate": 9.090909090909092e-05,
"loss": 1.7753,
"step": 271
},
{
"epoch": 2.4727272727272727,
"grad_norm": 1.6495355367660522,
"learning_rate": 8.93939393939394e-05,
"loss": 1.4943,
"step": 272
},
{
"epoch": 2.481818181818182,
"grad_norm": 2.026360273361206,
"learning_rate": 8.787878787878787e-05,
"loss": 1.6371,
"step": 273
},
{
"epoch": 2.4909090909090907,
"grad_norm": 1.9277071952819824,
"learning_rate": 8.636363636363636e-05,
"loss": 1.9051,
"step": 274
},
{
"epoch": 2.5,
"grad_norm": 2.0100483894348145,
"learning_rate": 8.484848484848486e-05,
"loss": 1.8797,
"step": 275
},
{
"epoch": 2.509090909090909,
"grad_norm": 2.0191221237182617,
"learning_rate": 8.333333333333333e-05,
"loss": 1.6841,
"step": 276
},
{
"epoch": 2.518181818181818,
"grad_norm": 1.6300185918807983,
"learning_rate": 8.181818181818182e-05,
"loss": 1.6628,
"step": 277
},
{
"epoch": 2.5272727272727273,
"grad_norm": 2.5483522415161133,
"learning_rate": 8.030303030303031e-05,
"loss": 1.848,
"step": 278
},
{
"epoch": 2.536363636363636,
"grad_norm": 2.5308337211608887,
"learning_rate": 7.878787878787879e-05,
"loss": 2.0427,
"step": 279
},
{
"epoch": 2.5454545454545454,
"grad_norm": 2.097010612487793,
"learning_rate": 7.727272727272727e-05,
"loss": 1.6295,
"step": 280
},
{
"epoch": 2.5545454545454547,
"grad_norm": 2.452202081680298,
"learning_rate": 7.575757575757576e-05,
"loss": 1.6162,
"step": 281
},
{
"epoch": 2.5636363636363635,
"grad_norm": 2.082160234451294,
"learning_rate": 7.424242424242426e-05,
"loss": 1.6048,
"step": 282
},
{
"epoch": 2.5727272727272728,
"grad_norm": 1.9822708368301392,
"learning_rate": 7.272727272727273e-05,
"loss": 1.5951,
"step": 283
},
{
"epoch": 2.581818181818182,
"grad_norm": 5.015679836273193,
"learning_rate": 7.121212121212121e-05,
"loss": 1.7139,
"step": 284
},
{
"epoch": 2.590909090909091,
"grad_norm": 2.2298402786254883,
"learning_rate": 6.96969696969697e-05,
"loss": 1.8234,
"step": 285
},
{
"epoch": 2.6,
"grad_norm": 1.9142459630966187,
"learning_rate": 6.818181818181818e-05,
"loss": 1.4472,
"step": 286
},
{
"epoch": 2.6090909090909093,
"grad_norm": 1.7407569885253906,
"learning_rate": 6.666666666666667e-05,
"loss": 1.6284,
"step": 287
},
{
"epoch": 2.618181818181818,
"grad_norm": 1.9365426301956177,
"learning_rate": 6.515151515151516e-05,
"loss": 1.8865,
"step": 288
},
{
"epoch": 2.6272727272727274,
"grad_norm": 2.4145357608795166,
"learning_rate": 6.363636363636363e-05,
"loss": 1.5228,
"step": 289
},
{
"epoch": 2.6363636363636362,
"grad_norm": 1.9109646081924438,
"learning_rate": 6.212121212121213e-05,
"loss": 1.8798,
"step": 290
},
{
"epoch": 2.6454545454545455,
"grad_norm": 1.9515796899795532,
"learning_rate": 6.060606060606061e-05,
"loss": 1.8353,
"step": 291
},
{
"epoch": 2.6545454545454543,
"grad_norm": 2.2350668907165527,
"learning_rate": 5.909090909090909e-05,
"loss": 2.3428,
"step": 292
},
{
"epoch": 2.6636363636363636,
"grad_norm": 1.7412108182907104,
"learning_rate": 5.757575757575758e-05,
"loss": 1.7247,
"step": 293
},
{
"epoch": 2.672727272727273,
"grad_norm": 2.316909074783325,
"learning_rate": 5.606060606060606e-05,
"loss": 1.6445,
"step": 294
},
{
"epoch": 2.6818181818181817,
"grad_norm": 2.045905113220215,
"learning_rate": 5.4545454545454546e-05,
"loss": 2.2573,
"step": 295
},
{
"epoch": 2.690909090909091,
"grad_norm": 1.7575581073760986,
"learning_rate": 5.303030303030303e-05,
"loss": 1.9093,
"step": 296
},
{
"epoch": 2.7,
"grad_norm": 10.453113555908203,
"learning_rate": 5.151515151515152e-05,
"loss": 1.8715,
"step": 297
},
{
"epoch": 2.709090909090909,
"grad_norm": 2.135576009750366,
"learning_rate": 5e-05,
"loss": 1.5546,
"step": 298
},
{
"epoch": 2.7181818181818183,
"grad_norm": 2.8170363903045654,
"learning_rate": 4.848484848484849e-05,
"loss": 1.5048,
"step": 299
},
{
"epoch": 2.7272727272727275,
"grad_norm": 2.0693070888519287,
"learning_rate": 4.6969696969696966e-05,
"loss": 2.3778,
"step": 300
},
{
"epoch": 2.7363636363636363,
"grad_norm": 1.7992305755615234,
"learning_rate": 4.545454545454546e-05,
"loss": 1.4883,
"step": 301
},
{
"epoch": 2.7454545454545456,
"grad_norm": 2.1758553981781006,
"learning_rate": 4.393939393939394e-05,
"loss": 1.632,
"step": 302
},
{
"epoch": 2.7545454545454544,
"grad_norm": 2.3659770488739014,
"learning_rate": 4.242424242424243e-05,
"loss": 1.6417,
"step": 303
},
{
"epoch": 2.7636363636363637,
"grad_norm": 2.3604612350463867,
"learning_rate": 4.090909090909091e-05,
"loss": 1.499,
"step": 304
},
{
"epoch": 2.7727272727272725,
"grad_norm": 2.1640219688415527,
"learning_rate": 3.939393939393939e-05,
"loss": 1.3101,
"step": 305
},
{
"epoch": 2.7818181818181817,
"grad_norm": 1.9443458318710327,
"learning_rate": 3.787878787878788e-05,
"loss": 1.7234,
"step": 306
},
{
"epoch": 2.790909090909091,
"grad_norm": 3.2635550498962402,
"learning_rate": 3.6363636363636364e-05,
"loss": 2.1717,
"step": 307
},
{
"epoch": 2.8,
"grad_norm": 1.711483120918274,
"learning_rate": 3.484848484848485e-05,
"loss": 1.6226,
"step": 308
},
{
"epoch": 2.809090909090909,
"grad_norm": 2.323063850402832,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.1511,
"step": 309
},
{
"epoch": 2.8181818181818183,
"grad_norm": 2.0467774868011475,
"learning_rate": 3.1818181818181814e-05,
"loss": 1.5749,
"step": 310
},
{
"epoch": 2.827272727272727,
"grad_norm": 1.854019045829773,
"learning_rate": 3.0303030303030306e-05,
"loss": 2.0141,
"step": 311
},
{
"epoch": 2.8363636363636364,
"grad_norm": 1.8301273584365845,
"learning_rate": 2.878787878787879e-05,
"loss": 1.7546,
"step": 312
},
{
"epoch": 2.8454545454545457,
"grad_norm": 2.283935308456421,
"learning_rate": 2.7272727272727273e-05,
"loss": 1.9298,
"step": 313
},
{
"epoch": 2.8545454545454545,
"grad_norm": 1.9070757627487183,
"learning_rate": 2.575757575757576e-05,
"loss": 1.7298,
"step": 314
},
{
"epoch": 2.8636363636363638,
"grad_norm": 2.1912167072296143,
"learning_rate": 2.4242424242424244e-05,
"loss": 1.564,
"step": 315
},
{
"epoch": 2.8727272727272726,
"grad_norm": 2.081099271774292,
"learning_rate": 2.272727272727273e-05,
"loss": 1.8931,
"step": 316
},
{
"epoch": 2.881818181818182,
"grad_norm": 2.1651575565338135,
"learning_rate": 2.1212121212121215e-05,
"loss": 1.7705,
"step": 317
},
{
"epoch": 2.8909090909090907,
"grad_norm": 2.102203369140625,
"learning_rate": 1.9696969696969697e-05,
"loss": 2.2126,
"step": 318
},
{
"epoch": 2.9,
"grad_norm": 1.824823021888733,
"learning_rate": 1.8181818181818182e-05,
"loss": 2.0321,
"step": 319
},
{
"epoch": 2.909090909090909,
"grad_norm": 2.1086361408233643,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.535,
"step": 320
},
{
"epoch": 2.918181818181818,
"grad_norm": 1.7079758644104004,
"learning_rate": 1.5151515151515153e-05,
"loss": 1.9123,
"step": 321
},
{
"epoch": 2.9272727272727272,
"grad_norm": 1.6254668235778809,
"learning_rate": 1.3636363636363637e-05,
"loss": 1.3286,
"step": 322
},
{
"epoch": 2.9363636363636365,
"grad_norm": 1.7518110275268555,
"learning_rate": 1.2121212121212122e-05,
"loss": 1.4937,
"step": 323
},
{
"epoch": 2.9454545454545453,
"grad_norm": 2.101076364517212,
"learning_rate": 1.0606060606060607e-05,
"loss": 2.0158,
"step": 324
},
{
"epoch": 2.9545454545454546,
"grad_norm": 2.2095401287078857,
"learning_rate": 9.090909090909091e-06,
"loss": 1.827,
"step": 325
},
{
"epoch": 2.963636363636364,
"grad_norm": 1.988400936126709,
"learning_rate": 7.5757575757575764e-06,
"loss": 2.1063,
"step": 326
},
{
"epoch": 2.9727272727272727,
"grad_norm": 1.926684021949768,
"learning_rate": 6.060606060606061e-06,
"loss": 1.5571,
"step": 327
},
{
"epoch": 2.981818181818182,
"grad_norm": 1.7507318258285522,
"learning_rate": 4.5454545454545455e-06,
"loss": 1.5938,
"step": 328
},
{
"epoch": 2.990909090909091,
"grad_norm": 2.0858609676361084,
"learning_rate": 3.0303030303030305e-06,
"loss": 1.5035,
"step": 329
},
{
"epoch": 3.0,
"grad_norm": 1.854127049446106,
"learning_rate": 1.5151515151515152e-06,
"loss": 1.7205,
"step": 330
},
{
"epoch": 3.0,
"eval_f1": 0.8917,
"eval_gen_len": 49.4636,
"eval_loss": 1.9953831434249878,
"eval_precision": 0.8901,
"eval_recall": 0.8936,
"eval_rouge1": 0.4576,
"eval_rouge2": 0.2129,
"eval_rougeL": 0.3814,
"eval_rougeLsum": 0.4246,
"eval_runtime": 12.2484,
"eval_samples_per_second": 8.981,
"eval_steps_per_second": 1.143,
"step": 330
},
{
"epoch": 3.0,
"step": 330,
"total_flos": 356896330481664.0,
"train_loss": 2.027634771303697,
"train_runtime": 130.1895,
"train_samples_per_second": 20.255,
"train_steps_per_second": 2.535
}
],
"logging_steps": 1,
"max_steps": 330,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 356896330481664.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}