ModernBERT-ar-base-30m-13 / trainer_state.json
akhooli's picture
Upload folder using huggingface_hub
0c59e0f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.12977265992858178,
"eval_steps": 500,
"global_step": 60000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002162877665476363,
"grad_norm": 2.268080472946167,
"learning_rate": 4.9945869519559514e-05,
"loss": 7.7847,
"step": 1000
},
{
"epoch": 0.004325755330952726,
"grad_norm": 2.551193952560425,
"learning_rate": 4.983760855867853e-05,
"loss": 5.9343,
"step": 2000
},
{
"epoch": 0.006488632996429089,
"grad_norm": 2.584500551223755,
"learning_rate": 4.972934759779754e-05,
"loss": 5.3418,
"step": 3000
},
{
"epoch": 0.008651510661905452,
"grad_norm": 2.7399814128875732,
"learning_rate": 4.962108663691656e-05,
"loss": 4.9453,
"step": 4000
},
{
"epoch": 0.010814388327381816,
"grad_norm": 2.7650134563446045,
"learning_rate": 4.9512825676035575e-05,
"loss": 4.6277,
"step": 5000
},
{
"epoch": 0.012977265992858178,
"grad_norm": 2.8655128479003906,
"learning_rate": 4.9404564715154586e-05,
"loss": 4.3648,
"step": 6000
},
{
"epoch": 0.015140143658334541,
"grad_norm": 2.8312480449676514,
"learning_rate": 4.92963037542736e-05,
"loss": 4.1441,
"step": 7000
},
{
"epoch": 0.017303021323810903,
"grad_norm": 2.842963218688965,
"learning_rate": 4.918804279339262e-05,
"loss": 3.9787,
"step": 8000
},
{
"epoch": 0.019465898989287266,
"grad_norm": 2.6685407161712646,
"learning_rate": 4.907978183251164e-05,
"loss": 3.8343,
"step": 9000
},
{
"epoch": 0.02162877665476363,
"grad_norm": 2.7094473838806152,
"learning_rate": 4.897152087163065e-05,
"loss": 3.7211,
"step": 10000
},
{
"epoch": 0.023791654320239993,
"grad_norm": 2.7870073318481445,
"learning_rate": 4.886336817171055e-05,
"loss": 3.6358,
"step": 11000
},
{
"epoch": 0.025954531985716355,
"grad_norm": 2.6066818237304688,
"learning_rate": 4.8755107210829566e-05,
"loss": 3.5526,
"step": 12000
},
{
"epoch": 0.028117409651192717,
"grad_norm": 2.8119983673095703,
"learning_rate": 4.864695451090946e-05,
"loss": 3.4758,
"step": 13000
},
{
"epoch": 0.030280287316669083,
"grad_norm": 2.7353262901306152,
"learning_rate": 4.853869355002847e-05,
"loss": 3.4134,
"step": 14000
},
{
"epoch": 0.032443164982145445,
"grad_norm": 2.6471846103668213,
"learning_rate": 4.843054085010837e-05,
"loss": 3.3547,
"step": 15000
},
{
"epoch": 0.03460604264762181,
"grad_norm": 2.67105770111084,
"learning_rate": 4.832227988922739e-05,
"loss": 3.3033,
"step": 16000
},
{
"epoch": 0.03676892031309817,
"grad_norm": 2.667982339859009,
"learning_rate": 4.8214127189307287e-05,
"loss": 3.2608,
"step": 17000
},
{
"epoch": 0.03893179797857453,
"grad_norm": 2.588027238845825,
"learning_rate": 4.81058662284263e-05,
"loss": 3.2199,
"step": 18000
},
{
"epoch": 0.0410946756440509,
"grad_norm": 2.650073289871216,
"learning_rate": 4.799771352850619e-05,
"loss": 3.1817,
"step": 19000
},
{
"epoch": 0.04325755330952726,
"grad_norm": 2.4354302883148193,
"learning_rate": 4.788945256762521e-05,
"loss": 3.1464,
"step": 20000
},
{
"epoch": 0.045420430975003624,
"grad_norm": 2.6227738857269287,
"learning_rate": 4.778129986770511e-05,
"loss": 3.1134,
"step": 21000
},
{
"epoch": 0.047583308640479986,
"grad_norm": 2.5212135314941406,
"learning_rate": 4.767303890682412e-05,
"loss": 3.08,
"step": 22000
},
{
"epoch": 0.04974618630595635,
"grad_norm": 2.4768059253692627,
"learning_rate": 4.756488620690402e-05,
"loss": 3.0511,
"step": 23000
},
{
"epoch": 0.05190906397143271,
"grad_norm": 2.5772910118103027,
"learning_rate": 4.7456625246023034e-05,
"loss": 3.0225,
"step": 24000
},
{
"epoch": 0.05407194163690907,
"grad_norm": 2.4655566215515137,
"learning_rate": 4.7348472546102937e-05,
"loss": 3.0053,
"step": 25000
},
{
"epoch": 0.056234819302385435,
"grad_norm": 2.432565212249756,
"learning_rate": 4.724021158522195e-05,
"loss": 2.9726,
"step": 26000
},
{
"epoch": 0.058397696967861804,
"grad_norm": 2.437964677810669,
"learning_rate": 4.7131950624340964e-05,
"loss": 2.9542,
"step": 27000
},
{
"epoch": 0.060560574633338166,
"grad_norm": 2.498307466506958,
"learning_rate": 4.702379792442086e-05,
"loss": 2.9373,
"step": 28000
},
{
"epoch": 0.06272345229881453,
"grad_norm": 2.3338165283203125,
"learning_rate": 4.6915536963539876e-05,
"loss": 2.9143,
"step": 29000
},
{
"epoch": 0.06488632996429089,
"grad_norm": 2.2597150802612305,
"learning_rate": 4.680738426361977e-05,
"loss": 2.8947,
"step": 30000
},
{
"epoch": 0.06704920762976725,
"grad_norm": 2.3500025272369385,
"learning_rate": 4.669912330273879e-05,
"loss": 2.8729,
"step": 31000
},
{
"epoch": 0.06921208529524361,
"grad_norm": 2.592963457107544,
"learning_rate": 4.6590970602818684e-05,
"loss": 2.8638,
"step": 32000
},
{
"epoch": 0.07137496296071998,
"grad_norm": 2.2916111946105957,
"learning_rate": 4.64827096419377e-05,
"loss": 2.8423,
"step": 33000
},
{
"epoch": 0.07353784062619634,
"grad_norm": 2.298884153366089,
"learning_rate": 4.63745569420176e-05,
"loss": 2.8258,
"step": 34000
},
{
"epoch": 0.0757007182916727,
"grad_norm": 2.2187180519104004,
"learning_rate": 4.6266295981136614e-05,
"loss": 2.8085,
"step": 35000
},
{
"epoch": 0.07786359595714906,
"grad_norm": 2.3649799823760986,
"learning_rate": 4.615814328121651e-05,
"loss": 2.7946,
"step": 36000
},
{
"epoch": 0.08002647362262542,
"grad_norm": 2.189671754837036,
"learning_rate": 4.6049882320335526e-05,
"loss": 2.7828,
"step": 37000
},
{
"epoch": 0.0821893512881018,
"grad_norm": 2.341826915740967,
"learning_rate": 4.594172962041542e-05,
"loss": 2.7706,
"step": 38000
},
{
"epoch": 0.08435222895357816,
"grad_norm": 2.3907382488250732,
"learning_rate": 4.583346865953443e-05,
"loss": 2.7548,
"step": 39000
},
{
"epoch": 0.08651510661905452,
"grad_norm": 2.2284059524536133,
"learning_rate": 4.5725315959614334e-05,
"loss": 2.7453,
"step": 40000
},
{
"epoch": 0.08867798428453089,
"grad_norm": 2.254753589630127,
"learning_rate": 4.561705499873335e-05,
"loss": 2.7335,
"step": 41000
},
{
"epoch": 0.09084086195000725,
"grad_norm": 2.2830395698547363,
"learning_rate": 4.550890229881325e-05,
"loss": 2.7188,
"step": 42000
},
{
"epoch": 0.09300373961548361,
"grad_norm": 2.2295284271240234,
"learning_rate": 4.540064133793226e-05,
"loss": 2.7066,
"step": 43000
},
{
"epoch": 0.09516661728095997,
"grad_norm": 2.177199602127075,
"learning_rate": 4.529248863801216e-05,
"loss": 2.6916,
"step": 44000
},
{
"epoch": 0.09732949494643633,
"grad_norm": 2.0382652282714844,
"learning_rate": 4.5184227677131176e-05,
"loss": 2.6852,
"step": 45000
},
{
"epoch": 0.0994923726119127,
"grad_norm": 2.240755319595337,
"learning_rate": 4.507607497721107e-05,
"loss": 2.6793,
"step": 46000
},
{
"epoch": 0.10165525027738906,
"grad_norm": 2.1940410137176514,
"learning_rate": 4.496781401633008e-05,
"loss": 2.6659,
"step": 47000
},
{
"epoch": 0.10381812794286542,
"grad_norm": 2.203796148300171,
"learning_rate": 4.4859661316409984e-05,
"loss": 2.6526,
"step": 48000
},
{
"epoch": 0.10598100560834178,
"grad_norm": 2.1186819076538086,
"learning_rate": 4.4751400355529e-05,
"loss": 2.6463,
"step": 49000
},
{
"epoch": 0.10814388327381814,
"grad_norm": 2.13010573387146,
"learning_rate": 4.46432476556089e-05,
"loss": 2.6403,
"step": 50000
},
{
"epoch": 0.11030676093929451,
"grad_norm": 2.1970157623291016,
"learning_rate": 4.4535094955688785e-05,
"loss": 2.6276,
"step": 51000
},
{
"epoch": 0.11246963860477087,
"grad_norm": 2.1742944717407227,
"learning_rate": 4.44268339948078e-05,
"loss": 2.6178,
"step": 52000
},
{
"epoch": 0.11463251627024724,
"grad_norm": 2.195935010910034,
"learning_rate": 4.4318681294887705e-05,
"loss": 2.6095,
"step": 53000
},
{
"epoch": 0.11679539393572361,
"grad_norm": 2.0977721214294434,
"learning_rate": 4.421042033400672e-05,
"loss": 2.6037,
"step": 54000
},
{
"epoch": 0.11895827160119997,
"grad_norm": 2.1214547157287598,
"learning_rate": 4.410215937312573e-05,
"loss": 2.5962,
"step": 55000
},
{
"epoch": 0.12112114926667633,
"grad_norm": 2.216897964477539,
"learning_rate": 4.399400667320563e-05,
"loss": 2.5881,
"step": 56000
},
{
"epoch": 0.1232840269321527,
"grad_norm": 2.1751599311828613,
"learning_rate": 4.3885745712324644e-05,
"loss": 2.5817,
"step": 57000
},
{
"epoch": 0.12544690459762906,
"grad_norm": 2.1866142749786377,
"learning_rate": 4.377748475144366e-05,
"loss": 2.5691,
"step": 58000
},
{
"epoch": 0.1276097822631054,
"grad_norm": 2.1192381381988525,
"learning_rate": 4.366933205152356e-05,
"loss": 2.5649,
"step": 59000
},
{
"epoch": 0.12977265992858178,
"grad_norm": 1.9748364686965942,
"learning_rate": 4.3561071090642574e-05,
"loss": 2.5535,
"step": 60000
}
],
"logging_steps": 1000,
"max_steps": 462347,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.30908615081984e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}