tinyllama-1.1b-sum-sft-full_2gpus / trainer_state.json
martimfasantos's picture
Model save
bab86a2 verified
raw
history blame
14.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9939045038943446,
"eval_steps": 500,
"global_step": 368,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005418218760582459,
"grad_norm": 11.441351738340396,
"learning_rate": 5.405405405405406e-07,
"loss": 2.4665,
"step": 1
},
{
"epoch": 0.027091093802912292,
"grad_norm": 11.567929424436132,
"learning_rate": 2.702702702702703e-06,
"loss": 2.4607,
"step": 5
},
{
"epoch": 0.054182187605824585,
"grad_norm": 2.1647507226825287,
"learning_rate": 5.405405405405406e-06,
"loss": 2.3924,
"step": 10
},
{
"epoch": 0.08127328140873688,
"grad_norm": 1.0652926230951467,
"learning_rate": 8.108108108108109e-06,
"loss": 2.2869,
"step": 15
},
{
"epoch": 0.10836437521164917,
"grad_norm": 0.6956953231915263,
"learning_rate": 1.0810810810810812e-05,
"loss": 2.2241,
"step": 20
},
{
"epoch": 0.13545546901456146,
"grad_norm": 0.6368662485384925,
"learning_rate": 1.3513513513513515e-05,
"loss": 2.1943,
"step": 25
},
{
"epoch": 0.16254656281747376,
"grad_norm": 0.52626831061368,
"learning_rate": 1.6216216216216218e-05,
"loss": 2.1796,
"step": 30
},
{
"epoch": 0.18963765662038604,
"grad_norm": 0.5041342654725786,
"learning_rate": 1.891891891891892e-05,
"loss": 2.1739,
"step": 35
},
{
"epoch": 0.21672875042329834,
"grad_norm": 0.531083346249148,
"learning_rate": 1.9995946530314384e-05,
"loss": 2.1573,
"step": 40
},
{
"epoch": 0.24381984422621064,
"grad_norm": 0.4970950800288572,
"learning_rate": 1.9971187226043746e-05,
"loss": 2.1556,
"step": 45
},
{
"epoch": 0.2709109380291229,
"grad_norm": 0.4930331306794567,
"learning_rate": 1.9923976226947417e-05,
"loss": 2.1516,
"step": 50
},
{
"epoch": 0.2980020318320352,
"grad_norm": 0.4792958893497388,
"learning_rate": 1.985441983600819e-05,
"loss": 2.1416,
"step": 55
},
{
"epoch": 0.3250931256349475,
"grad_norm": 0.4704908677461612,
"learning_rate": 1.9762674670369757e-05,
"loss": 2.1375,
"step": 60
},
{
"epoch": 0.3521842194378598,
"grad_norm": 0.46530825034717316,
"learning_rate": 1.9648947308688594e-05,
"loss": 2.1328,
"step": 65
},
{
"epoch": 0.37927531324077207,
"grad_norm": 0.47535573444890883,
"learning_rate": 1.9513493825989664e-05,
"loss": 2.1402,
"step": 70
},
{
"epoch": 0.4063664070436844,
"grad_norm": 0.4759377502233077,
"learning_rate": 1.9356619217073252e-05,
"loss": 2.1332,
"step": 75
},
{
"epoch": 0.4334575008465967,
"grad_norm": 0.4662210394921992,
"learning_rate": 1.917867670977126e-05,
"loss": 2.1335,
"step": 80
},
{
"epoch": 0.460548594649509,
"grad_norm": 0.4757611076987449,
"learning_rate": 1.8980066969599216e-05,
"loss": 2.1258,
"step": 85
},
{
"epoch": 0.4876396884524213,
"grad_norm": 0.45382312904649785,
"learning_rate": 1.8761237197594945e-05,
"loss": 2.1293,
"step": 90
},
{
"epoch": 0.5147307822553335,
"grad_norm": 0.47488848221887603,
"learning_rate": 1.852268012337514e-05,
"loss": 2.1279,
"step": 95
},
{
"epoch": 0.5418218760582458,
"grad_norm": 0.47098691848945873,
"learning_rate": 1.8264932895677195e-05,
"loss": 2.126,
"step": 100
},
{
"epoch": 0.5689129698611581,
"grad_norm": 0.47815599863300057,
"learning_rate": 1.798857587288445e-05,
"loss": 2.1249,
"step": 105
},
{
"epoch": 0.5960040636640704,
"grad_norm": 0.45607203288859494,
"learning_rate": 1.769423131625808e-05,
"loss": 2.1182,
"step": 110
},
{
"epoch": 0.6230951574669827,
"grad_norm": 0.4566297150188363,
"learning_rate": 1.738256198881809e-05,
"loss": 2.1182,
"step": 115
},
{
"epoch": 0.650186251269895,
"grad_norm": 0.4563332108229212,
"learning_rate": 1.7054269663028232e-05,
"loss": 2.1158,
"step": 120
},
{
"epoch": 0.6772773450728073,
"grad_norm": 0.4719747539449419,
"learning_rate": 1.6710093540645056e-05,
"loss": 2.1124,
"step": 125
},
{
"epoch": 0.7043684388757196,
"grad_norm": 0.45233465694057096,
"learning_rate": 1.6350808588288964e-05,
"loss": 2.1173,
"step": 130
},
{
"epoch": 0.731459532678632,
"grad_norm": 0.4494520104287378,
"learning_rate": 1.597722379248512e-05,
"loss": 2.1147,
"step": 135
},
{
"epoch": 0.7585506264815441,
"grad_norm": 0.44217741153395734,
"learning_rate": 1.559018033810316e-05,
"loss": 2.1158,
"step": 140
},
{
"epoch": 0.7856417202844564,
"grad_norm": 0.43568954148921973,
"learning_rate": 1.5190549714297303e-05,
"loss": 2.1155,
"step": 145
},
{
"epoch": 0.8127328140873687,
"grad_norm": 0.45615630991925854,
"learning_rate": 1.4779231752211546e-05,
"loss": 2.1162,
"step": 150
},
{
"epoch": 0.839823907890281,
"grad_norm": 0.4607530478729013,
"learning_rate": 1.4357152598868478e-05,
"loss": 2.117,
"step": 155
},
{
"epoch": 0.8669150016931934,
"grad_norm": 0.43874353467346544,
"learning_rate": 1.3925262631803722e-05,
"loss": 2.1047,
"step": 160
},
{
"epoch": 0.8940060954961057,
"grad_norm": 0.4577418545526205,
"learning_rate": 1.3484534319141592e-05,
"loss": 2.1103,
"step": 165
},
{
"epoch": 0.921097189299018,
"grad_norm": 0.44202574956917495,
"learning_rate": 1.303596002993028e-05,
"loss": 2.1117,
"step": 170
},
{
"epoch": 0.9481882831019303,
"grad_norm": 0.4461244150173344,
"learning_rate": 1.2580549799667034e-05,
"loss": 2.1141,
"step": 175
},
{
"epoch": 0.9752793769048426,
"grad_norm": 0.45529168414311305,
"learning_rate": 1.2119329056044533e-05,
"loss": 2.1046,
"step": 180
},
{
"epoch": 0.9969522519471724,
"eval_loss": 2.1050913333892822,
"eval_runtime": 76.5237,
"eval_samples_per_second": 17.054,
"eval_steps_per_second": 8.533,
"step": 184
},
{
"epoch": 1.0023704707077548,
"grad_norm": 0.4809974366908029,
"learning_rate": 1.165333631003928e-05,
"loss": 2.0998,
"step": 185
},
{
"epoch": 1.029461564510667,
"grad_norm": 0.48705866202596343,
"learning_rate": 1.1183620817540985e-05,
"loss": 2.0462,
"step": 190
},
{
"epoch": 1.0565526583135794,
"grad_norm": 0.4868339560578216,
"learning_rate": 1.0711240216788036e-05,
"loss": 2.0529,
"step": 195
},
{
"epoch": 1.0836437521164917,
"grad_norm": 0.4620568487887428,
"learning_rate": 1.0237258146928849e-05,
"loss": 2.0494,
"step": 200
},
{
"epoch": 1.110734845919404,
"grad_norm": 0.4641633596897171,
"learning_rate": 9.762741853071153e-06,
"loss": 2.0467,
"step": 205
},
{
"epoch": 1.1378259397223163,
"grad_norm": 0.48677368374631885,
"learning_rate": 9.288759783211967e-06,
"loss": 2.0482,
"step": 210
},
{
"epoch": 1.1649170335252286,
"grad_norm": 0.46534164607337153,
"learning_rate": 8.81637918245902e-06,
"loss": 2.0469,
"step": 215
},
{
"epoch": 1.1920081273281409,
"grad_norm": 0.46946874812594946,
"learning_rate": 8.346663689960724e-06,
"loss": 2.0445,
"step": 220
},
{
"epoch": 1.2190992211310532,
"grad_norm": 0.4663999382035462,
"learning_rate": 7.880670943955467e-06,
"loss": 2.0481,
"step": 225
},
{
"epoch": 1.2461903149339655,
"grad_norm": 0.4457099195715021,
"learning_rate": 7.419450200332965e-06,
"loss": 2.0529,
"step": 230
},
{
"epoch": 1.2732814087368778,
"grad_norm": 0.46304032166442355,
"learning_rate": 6.964039970069722e-06,
"loss": 2.0467,
"step": 235
},
{
"epoch": 1.30037250253979,
"grad_norm": 0.4449406613479281,
"learning_rate": 6.515465680858412e-06,
"loss": 2.0486,
"step": 240
},
{
"epoch": 1.3274635963427024,
"grad_norm": 0.4389522489189322,
"learning_rate": 6.074737368196279e-06,
"loss": 2.0516,
"step": 245
},
{
"epoch": 1.3545546901456147,
"grad_norm": 0.43429376447812934,
"learning_rate": 5.642847401131526e-06,
"loss": 2.0398,
"step": 250
},
{
"epoch": 1.381645783948527,
"grad_norm": 0.42907764023330297,
"learning_rate": 5.220768247788458e-06,
"loss": 2.0469,
"step": 255
},
{
"epoch": 1.4087368777514393,
"grad_norm": 0.43261082208262897,
"learning_rate": 4.809450285702697e-06,
"loss": 2.049,
"step": 260
},
{
"epoch": 1.4358279715543514,
"grad_norm": 0.4495153435008709,
"learning_rate": 4.409819661896839e-06,
"loss": 2.0453,
"step": 265
},
{
"epoch": 1.462919065357264,
"grad_norm": 0.44801407055899056,
"learning_rate": 4.022776207514885e-06,
"loss": 2.0401,
"step": 270
},
{
"epoch": 1.490010159160176,
"grad_norm": 0.43760507616111505,
"learning_rate": 3.6491914117110405e-06,
"loss": 2.047,
"step": 275
},
{
"epoch": 1.5171012529630885,
"grad_norm": 0.4323485633364769,
"learning_rate": 3.2899064593549477e-06,
"loss": 2.0445,
"step": 280
},
{
"epoch": 1.5441923467660006,
"grad_norm": 0.44598125572196134,
"learning_rate": 2.945730336971767e-06,
"loss": 2.0405,
"step": 285
},
{
"epoch": 1.5712834405689131,
"grad_norm": 0.44045190794321376,
"learning_rate": 2.6174380111819144e-06,
"loss": 2.0449,
"step": 290
},
{
"epoch": 1.5983745343718252,
"grad_norm": 0.4315376791223261,
"learning_rate": 2.3057686837419246e-06,
"loss": 2.0455,
"step": 295
},
{
"epoch": 1.6254656281747377,
"grad_norm": 0.4141056168787056,
"learning_rate": 2.011424127115552e-06,
"loss": 2.0429,
"step": 300
},
{
"epoch": 1.6525567219776498,
"grad_norm": 0.42561322896204196,
"learning_rate": 1.7350671043228072e-06,
"loss": 2.0473,
"step": 305
},
{
"epoch": 1.679647815780562,
"grad_norm": 0.4318633569152662,
"learning_rate": 1.4773198766248642e-06,
"loss": 2.0471,
"step": 310
},
{
"epoch": 1.7067389095834744,
"grad_norm": 0.42654823899186717,
"learning_rate": 1.2387628024050557e-06,
"loss": 2.0502,
"step": 315
},
{
"epoch": 1.7338300033863867,
"grad_norm": 0.4318585206830447,
"learning_rate": 1.0199330304007858e-06,
"loss": 2.0374,
"step": 320
},
{
"epoch": 1.760921097189299,
"grad_norm": 0.42587188797302283,
"learning_rate": 8.213232902287438e-07,
"loss": 2.0508,
"step": 325
},
{
"epoch": 1.7880121909922113,
"grad_norm": 0.42658756639019496,
"learning_rate": 6.433807829267491e-07,
"loss": 2.0458,
"step": 330
},
{
"epoch": 1.8151032847951236,
"grad_norm": 0.4254891411016103,
"learning_rate": 4.865061740103361e-07,
"loss": 2.0538,
"step": 335
},
{
"epoch": 1.842194378598036,
"grad_norm": 0.43121146874552274,
"learning_rate": 3.510526913114065e-07,
"loss": 2.0437,
"step": 340
},
{
"epoch": 1.8692854724009482,
"grad_norm": 0.4148670481064235,
"learning_rate": 2.3732532963024468e-07,
"loss": 2.0366,
"step": 345
},
{
"epoch": 1.8963765662038605,
"grad_norm": 0.4240449705158572,
"learning_rate": 1.4558016399181086e-07,
"loss": 2.0462,
"step": 350
},
{
"epoch": 1.9234676600067728,
"grad_norm": 0.41343102256412484,
"learning_rate": 7.602377305258479e-08,
"loss": 2.0485,
"step": 355
},
{
"epoch": 1.950558753809685,
"grad_norm": 0.4219092082642883,
"learning_rate": 2.8812773956256034e-08,
"loss": 2.0398,
"step": 360
},
{
"epoch": 1.9776498476125974,
"grad_norm": 0.4203661391482514,
"learning_rate": 4.053469685617595e-09,
"loss": 2.0438,
"step": 365
},
{
"epoch": 1.9939045038943446,
"eval_loss": 2.097979784011841,
"eval_runtime": 77.4795,
"eval_samples_per_second": 16.843,
"eval_steps_per_second": 8.428,
"step": 368
},
{
"epoch": 1.9939045038943446,
"step": 368,
"total_flos": 53334434119680.0,
"train_loss": 2.0994615062423376,
"train_runtime": 7293.2016,
"train_samples_per_second": 6.477,
"train_steps_per_second": 0.05
}
],
"logging_steps": 5,
"max_steps": 368,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 53334434119680.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}