|
{ |
|
"best_metric": 0.6546083688735962, |
|
"best_model_checkpoint": "models_gitignored/distilbert-base-uncased-finetuned-sentence-classification/checkpoint-12626", |
|
"epoch": 4.0, |
|
"global_step": 50504, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.984159670521147e-05, |
|
"loss": 1.177, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.968319341042294e-05, |
|
"loss": 0.9186, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9524790115634406e-05, |
|
"loss": 0.8261, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9366386820845877e-05, |
|
"loss": 0.8177, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9207983526057344e-05, |
|
"loss": 0.7788, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.904958023126881e-05, |
|
"loss": 0.7519, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.889117693648028e-05, |
|
"loss": 0.7243, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.8732773641691748e-05, |
|
"loss": 0.7431, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.8574370346903215e-05, |
|
"loss": 0.7297, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8415967052114686e-05, |
|
"loss": 0.7061, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.8257563757326153e-05, |
|
"loss": 0.698, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.8099160462537623e-05, |
|
"loss": 0.7074, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.794075716774909e-05, |
|
"loss": 0.6975, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.7782353872960557e-05, |
|
"loss": 0.6943, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.7623950578172028e-05, |
|
"loss": 0.7045, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.7465547283383495e-05, |
|
"loss": 0.6764, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.730714398859496e-05, |
|
"loss": 0.6722, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.7148740693806432e-05, |
|
"loss": 0.6884, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.69903373990179e-05, |
|
"loss": 0.6903, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.683193410422937e-05, |
|
"loss": 0.6854, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.667353080944084e-05, |
|
"loss": 0.6861, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.6515127514652307e-05, |
|
"loss": 0.6894, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.6356724219863774e-05, |
|
"loss": 0.6754, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.6198320925075244e-05, |
|
"loss": 0.6918, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.603991763028671e-05, |
|
"loss": 0.6746, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.778547431856752, |
|
"eval_f1": 0.7757136063142371, |
|
"eval_kappa": 0.7014269317598121, |
|
"eval_loss": 0.6546083688735962, |
|
"eval_precision": 0.7762465189891471, |
|
"eval_recall": 0.778547431856752, |
|
"eval_runtime": 150.616, |
|
"eval_samples_per_second": 191.699, |
|
"eval_steps_per_second": 23.968, |
|
"step": 12626 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 1.5881514335498182e-05, |
|
"loss": 0.5694, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 1.572311104070965e-05, |
|
"loss": 0.5509, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 1.5564707745921116e-05, |
|
"loss": 0.5466, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 1.5406304451132586e-05, |
|
"loss": 0.5544, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.5247901156344053e-05, |
|
"loss": 0.566, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.5089497861555522e-05, |
|
"loss": 0.5773, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 1.4931094566766991e-05, |
|
"loss": 0.6056, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 1.4772691271978458e-05, |
|
"loss": 0.5532, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.4614287977189927e-05, |
|
"loss": 0.5764, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 1.4455884682401395e-05, |
|
"loss": 0.5499, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 1.4297481387612864e-05, |
|
"loss": 0.5695, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 1.4139078092824333e-05, |
|
"loss": 0.5823, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.39806747980358e-05, |
|
"loss": 0.5329, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 1.3822271503247269e-05, |
|
"loss": 0.5661, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 1.3663868208458737e-05, |
|
"loss": 0.5851, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 1.3505464913670206e-05, |
|
"loss": 0.5859, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 1.3347061618881673e-05, |
|
"loss": 0.5569, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 1.3188658324093142e-05, |
|
"loss": 0.5473, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 1.303025502930461e-05, |
|
"loss": 0.5635, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 1.287185173451608e-05, |
|
"loss": 0.5848, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.2713448439727546e-05, |
|
"loss": 0.5885, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.2555045144939015e-05, |
|
"loss": 0.5502, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 1.2396641850150484e-05, |
|
"loss": 0.5999, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.2238238555361953e-05, |
|
"loss": 0.5639, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 1.207983526057342e-05, |
|
"loss": 0.5664, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7842274789595817, |
|
"eval_f1": 0.7824161012799614, |
|
"eval_kappa": 0.7102691347246439, |
|
"eval_loss": 0.7084277868270874, |
|
"eval_precision": 0.7870631351341026, |
|
"eval_recall": 0.7842274789595817, |
|
"eval_runtime": 152.0087, |
|
"eval_samples_per_second": 189.943, |
|
"eval_steps_per_second": 23.749, |
|
"step": 25252 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.1921431965784888e-05, |
|
"loss": 0.4751, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 1.1763028670996357e-05, |
|
"loss": 0.4208, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.1604625376207826e-05, |
|
"loss": 0.4563, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.1446222081419293e-05, |
|
"loss": 0.4513, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.1287818786630762e-05, |
|
"loss": 0.4385, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 1.112941549184223e-05, |
|
"loss": 0.4493, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 1.0971012197053699e-05, |
|
"loss": 0.4533, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.0812608902265168e-05, |
|
"loss": 0.4365, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.0654205607476635e-05, |
|
"loss": 0.4205, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 1.0495802312688104e-05, |
|
"loss": 0.4538, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.0337399017899574e-05, |
|
"loss": 0.4653, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.0178995723111043e-05, |
|
"loss": 0.436, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.0020592428322511e-05, |
|
"loss": 0.4249, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 9.862189133533979e-06, |
|
"loss": 0.4441, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 9.703785838745447e-06, |
|
"loss": 0.4544, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 9.545382543956914e-06, |
|
"loss": 0.446, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 9.386979249168383e-06, |
|
"loss": 0.4457, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 9.228575954379852e-06, |
|
"loss": 0.4472, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 9.07017265959132e-06, |
|
"loss": 0.4728, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 8.911769364802788e-06, |
|
"loss": 0.4455, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 8.753366070014258e-06, |
|
"loss": 0.4622, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 8.594962775225727e-06, |
|
"loss": 0.4566, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 8.436559480437194e-06, |
|
"loss": 0.4185, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 8.278156185648662e-06, |
|
"loss": 0.4351, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 8.119752890860131e-06, |
|
"loss": 0.4566, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.7792747549613827, |
|
"eval_f1": 0.7780530572128844, |
|
"eval_kappa": 0.7039084509242337, |
|
"eval_loss": 0.7550320029258728, |
|
"eval_precision": 0.7813618395098462, |
|
"eval_recall": 0.7792747549613827, |
|
"eval_runtime": 151.5948, |
|
"eval_samples_per_second": 190.462, |
|
"eval_steps_per_second": 23.813, |
|
"step": 37878 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 7.9613495960716e-06, |
|
"loss": 0.4023, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 7.802946301283067e-06, |
|
"loss": 0.3119, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 7.644543006494536e-06, |
|
"loss": 0.3205, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 7.4861397117060044e-06, |
|
"loss": 0.35, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 7.327736416917472e-06, |
|
"loss": 0.3333, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 7.169333122128941e-06, |
|
"loss": 0.3563, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 7.010929827340409e-06, |
|
"loss": 0.3388, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 6.852526532551878e-06, |
|
"loss": 0.3422, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 6.6941232377633456e-06, |
|
"loss": 0.3336, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 6.535719942974814e-06, |
|
"loss": 0.3433, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 6.377316648186282e-06, |
|
"loss": 0.3574, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 6.218913353397751e-06, |
|
"loss": 0.3105, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 6.060510058609219e-06, |
|
"loss": 0.3545, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 5.9021067638206875e-06, |
|
"loss": 0.3469, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 5.7437034690321554e-06, |
|
"loss": 0.35, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 5.585300174243625e-06, |
|
"loss": 0.3425, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 5.426896879455094e-06, |
|
"loss": 0.3203, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 5.268493584666562e-06, |
|
"loss": 0.3549, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 5.11009028987803e-06, |
|
"loss": 0.3326, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 4.951686995089498e-06, |
|
"loss": 0.3328, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 4.793283700300967e-06, |
|
"loss": 0.3376, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 4.634880405512435e-06, |
|
"loss": 0.3285, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 4.476477110723904e-06, |
|
"loss": 0.361, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 4.3180738159353715e-06, |
|
"loss": 0.3487, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 4.15967052114684e-06, |
|
"loss": 0.3511, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.001267226358309e-06, |
|
"loss": 0.3528, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7778201087521214, |
|
"eval_f1": 0.7760540283760836, |
|
"eval_kappa": 0.7007853270915267, |
|
"eval_loss": 0.9983726739883423, |
|
"eval_precision": 0.7770574865770817, |
|
"eval_recall": 0.7778201087521214, |
|
"eval_runtime": 151.818, |
|
"eval_samples_per_second": 190.182, |
|
"eval_steps_per_second": 23.778, |
|
"step": 50504 |
|
} |
|
], |
|
"max_steps": 63130, |
|
"num_train_epochs": 5, |
|
"total_flos": 5.330919857118713e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|