clip-roberta-finetuned / trainer_state.json
superbigtree's picture
End of training
efe839e verified
raw
history blame
7.43 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 27741,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 4.909880682022999e-05,
"loss": 1.5855,
"step": 500
},
{
"epoch": 0.11,
"learning_rate": 4.819761364045997e-05,
"loss": 0.9642,
"step": 1000
},
{
"epoch": 0.16,
"learning_rate": 4.7296420460689956e-05,
"loss": 0.8671,
"step": 1500
},
{
"epoch": 0.22,
"learning_rate": 4.639522728091994e-05,
"loss": 0.8052,
"step": 2000
},
{
"epoch": 0.27,
"learning_rate": 4.5494034101149925e-05,
"loss": 0.765,
"step": 2500
},
{
"epoch": 0.32,
"learning_rate": 4.459284092137991e-05,
"loss": 0.7121,
"step": 3000
},
{
"epoch": 0.38,
"learning_rate": 4.3691647741609894e-05,
"loss": 0.6643,
"step": 3500
},
{
"epoch": 0.43,
"learning_rate": 4.279045456183988e-05,
"loss": 0.6489,
"step": 4000
},
{
"epoch": 0.49,
"learning_rate": 4.188926138206986e-05,
"loss": 0.6167,
"step": 4500
},
{
"epoch": 0.54,
"learning_rate": 4.098806820229985e-05,
"loss": 0.5922,
"step": 5000
},
{
"epoch": 0.59,
"learning_rate": 4.008687502252983e-05,
"loss": 0.5763,
"step": 5500
},
{
"epoch": 0.65,
"learning_rate": 3.9185681842759816e-05,
"loss": 0.5449,
"step": 6000
},
{
"epoch": 0.7,
"learning_rate": 3.82844886629898e-05,
"loss": 0.5321,
"step": 6500
},
{
"epoch": 0.76,
"learning_rate": 3.7383295483219785e-05,
"loss": 0.5141,
"step": 7000
},
{
"epoch": 0.81,
"learning_rate": 3.648210230344977e-05,
"loss": 0.4918,
"step": 7500
},
{
"epoch": 0.87,
"learning_rate": 3.558090912367975e-05,
"loss": 0.4764,
"step": 8000
},
{
"epoch": 0.92,
"learning_rate": 3.467971594390974e-05,
"loss": 0.4581,
"step": 8500
},
{
"epoch": 0.97,
"learning_rate": 3.3778522764139723e-05,
"loss": 0.4505,
"step": 9000
},
{
"epoch": 1.03,
"learning_rate": 3.287732958436971e-05,
"loss": 0.4065,
"step": 9500
},
{
"epoch": 1.08,
"learning_rate": 3.197613640459969e-05,
"loss": 0.3699,
"step": 10000
},
{
"epoch": 1.14,
"learning_rate": 3.107494322482968e-05,
"loss": 0.3601,
"step": 10500
},
{
"epoch": 1.19,
"learning_rate": 3.017375004505966e-05,
"loss": 0.3493,
"step": 11000
},
{
"epoch": 1.24,
"learning_rate": 2.9272556865289646e-05,
"loss": 0.3467,
"step": 11500
},
{
"epoch": 1.3,
"learning_rate": 2.837136368551963e-05,
"loss": 0.3401,
"step": 12000
},
{
"epoch": 1.35,
"learning_rate": 2.7470170505749615e-05,
"loss": 0.3317,
"step": 12500
},
{
"epoch": 1.41,
"learning_rate": 2.65689773259796e-05,
"loss": 0.3183,
"step": 13000
},
{
"epoch": 1.46,
"learning_rate": 2.5667784146209584e-05,
"loss": 0.3242,
"step": 13500
},
{
"epoch": 1.51,
"learning_rate": 2.476659096643957e-05,
"loss": 0.3167,
"step": 14000
},
{
"epoch": 1.57,
"learning_rate": 2.3865397786669553e-05,
"loss": 0.2991,
"step": 14500
},
{
"epoch": 1.62,
"learning_rate": 2.2964204606899537e-05,
"loss": 0.3037,
"step": 15000
},
{
"epoch": 1.68,
"learning_rate": 2.2063011427129522e-05,
"loss": 0.2938,
"step": 15500
},
{
"epoch": 1.73,
"learning_rate": 2.1161818247359506e-05,
"loss": 0.2826,
"step": 16000
},
{
"epoch": 1.78,
"learning_rate": 2.026062506758949e-05,
"loss": 0.2768,
"step": 16500
},
{
"epoch": 1.84,
"learning_rate": 1.9359431887819472e-05,
"loss": 0.2733,
"step": 17000
},
{
"epoch": 1.89,
"learning_rate": 1.8458238708049457e-05,
"loss": 0.273,
"step": 17500
},
{
"epoch": 1.95,
"learning_rate": 1.755704552827944e-05,
"loss": 0.2639,
"step": 18000
},
{
"epoch": 2.0,
"learning_rate": 1.6655852348509426e-05,
"loss": 0.2563,
"step": 18500
},
{
"epoch": 2.05,
"learning_rate": 1.575465916873941e-05,
"loss": 0.202,
"step": 19000
},
{
"epoch": 2.11,
"learning_rate": 1.4853465988969395e-05,
"loss": 0.1995,
"step": 19500
},
{
"epoch": 2.16,
"learning_rate": 1.3952272809199379e-05,
"loss": 0.1923,
"step": 20000
},
{
"epoch": 2.22,
"learning_rate": 1.3051079629429367e-05,
"loss": 0.1955,
"step": 20500
},
{
"epoch": 2.27,
"learning_rate": 1.214988644965935e-05,
"loss": 0.1916,
"step": 21000
},
{
"epoch": 2.33,
"learning_rate": 1.1248693269889334e-05,
"loss": 0.1826,
"step": 21500
},
{
"epoch": 2.38,
"learning_rate": 1.0347500090119317e-05,
"loss": 0.1863,
"step": 22000
},
{
"epoch": 2.43,
"learning_rate": 9.446306910349303e-06,
"loss": 0.1797,
"step": 22500
},
{
"epoch": 2.49,
"learning_rate": 8.545113730579288e-06,
"loss": 0.1755,
"step": 23000
},
{
"epoch": 2.54,
"learning_rate": 7.643920550809272e-06,
"loss": 0.1744,
"step": 23500
},
{
"epoch": 2.6,
"learning_rate": 6.742727371039257e-06,
"loss": 0.1737,
"step": 24000
},
{
"epoch": 2.65,
"learning_rate": 5.84153419126924e-06,
"loss": 0.1631,
"step": 24500
},
{
"epoch": 2.7,
"learning_rate": 4.940341011499225e-06,
"loss": 0.165,
"step": 25000
},
{
"epoch": 2.76,
"learning_rate": 4.03914783172921e-06,
"loss": 0.1648,
"step": 25500
},
{
"epoch": 2.81,
"learning_rate": 3.1379546519591943e-06,
"loss": 0.1626,
"step": 26000
},
{
"epoch": 2.87,
"learning_rate": 2.2367614721891784e-06,
"loss": 0.1551,
"step": 26500
},
{
"epoch": 2.92,
"learning_rate": 1.335568292419163e-06,
"loss": 0.1577,
"step": 27000
},
{
"epoch": 2.97,
"learning_rate": 4.3437511264914753e-07,
"loss": 0.1535,
"step": 27500
},
{
"epoch": 3.0,
"step": 27741,
"total_flos": 2.3702775297552e+17,
"train_loss": 0.38746264250645196,
"train_runtime": 16164.8093,
"train_samples_per_second": 109.822,
"train_steps_per_second": 1.716
}
],
"logging_steps": 500,
"max_steps": 27741,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 2.3702775297552e+17,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}