VLA8B-V1 / trainer_state.json
SiyuanH's picture
Upload folder using huggingface_hub
a8e60f3 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.999690962871396,
"eval_steps": 500,
"global_step": 70780,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"learning_rate": 9.416195856873823e-06,
"loss": 0.7169,
"step": 500
},
{
"epoch": 0.07,
"learning_rate": 1.8832391713747646e-05,
"loss": 0.7282,
"step": 1000
},
{
"epoch": 0.11,
"learning_rate": 2.8248587570621472e-05,
"loss": 0.7519,
"step": 1500
},
{
"epoch": 0.14,
"learning_rate": 3.766478342749529e-05,
"loss": 0.7757,
"step": 2000
},
{
"epoch": 0.18,
"learning_rate": 3.9997039890834935e-05,
"loss": 0.8085,
"step": 2500
},
{
"epoch": 0.21,
"learning_rate": 3.998393455306432e-05,
"loss": 0.8176,
"step": 3000
},
{
"epoch": 0.25,
"learning_rate": 3.9960368899072094e-05,
"loss": 0.8158,
"step": 3500
},
{
"epoch": 0.28,
"learning_rate": 3.992635526397635e-05,
"loss": 0.8239,
"step": 4000
},
{
"epoch": 0.32,
"learning_rate": 3.9881911451747484e-05,
"loss": 0.8059,
"step": 4500
},
{
"epoch": 0.35,
"learning_rate": 3.9827060725888914e-05,
"loss": 0.806,
"step": 5000
},
{
"epoch": 0.39,
"learning_rate": 3.9761831797260154e-05,
"loss": 0.8018,
"step": 5500
},
{
"epoch": 0.42,
"learning_rate": 3.96862588090485e-05,
"loss": 0.7967,
"step": 6000
},
{
"epoch": 0.46,
"learning_rate": 3.960038131889723e-05,
"loss": 0.8037,
"step": 6500
},
{
"epoch": 0.49,
"learning_rate": 3.9504244278199726e-05,
"loss": 0.7933,
"step": 7000
},
{
"epoch": 0.53,
"learning_rate": 3.9397898008570265e-05,
"loss": 0.7962,
"step": 7500
},
{
"epoch": 0.57,
"learning_rate": 3.9281398175503866e-05,
"loss": 0.7918,
"step": 8000
},
{
"epoch": 0.6,
"learning_rate": 3.915480575923895e-05,
"loss": 0.7868,
"step": 8500
},
{
"epoch": 0.64,
"learning_rate": 3.901818702283807e-05,
"loss": 0.7887,
"step": 9000
},
{
"epoch": 0.67,
"learning_rate": 3.8871613477503424e-05,
"loss": 0.7835,
"step": 9500
},
{
"epoch": 0.71,
"learning_rate": 3.87151618451453e-05,
"loss": 0.7867,
"step": 10000
},
{
"epoch": 0.74,
"learning_rate": 3.854891401822304e-05,
"loss": 0.7844,
"step": 10500
},
{
"epoch": 0.78,
"learning_rate": 3.837295701687955e-05,
"loss": 0.7791,
"step": 11000
},
{
"epoch": 0.81,
"learning_rate": 3.818738294339182e-05,
"loss": 0.785,
"step": 11500
},
{
"epoch": 0.85,
"learning_rate": 3.799228893396123e-05,
"loss": 0.7791,
"step": 12000
},
{
"epoch": 0.88,
"learning_rate": 3.778777710786896e-05,
"loss": 0.7804,
"step": 12500
},
{
"epoch": 0.92,
"learning_rate": 3.757395451402304e-05,
"loss": 0.7794,
"step": 13000
},
{
"epoch": 0.95,
"learning_rate": 3.735093307492506e-05,
"loss": 0.7736,
"step": 13500
},
{
"epoch": 0.99,
"learning_rate": 3.7118829528085897e-05,
"loss": 0.7837,
"step": 14000
},
{
"epoch": 1.02,
"learning_rate": 3.687776536492105e-05,
"loss": 0.7527,
"step": 14500
},
{
"epoch": 1.06,
"learning_rate": 3.662786676715763e-05,
"loss": 0.7403,
"step": 15000
},
{
"epoch": 1.09,
"learning_rate": 3.636926454078625e-05,
"loss": 0.7547,
"step": 15500
},
{
"epoch": 1.13,
"learning_rate": 3.610209404759251e-05,
"loss": 0.7476,
"step": 16000
},
{
"epoch": 1.17,
"learning_rate": 3.5826495134303565e-05,
"loss": 0.7474,
"step": 16500
},
{
"epoch": 1.2,
"learning_rate": 3.5542612059387445e-05,
"loss": 0.747,
"step": 17000
},
{
"epoch": 1.24,
"learning_rate": 3.5250593417542837e-05,
"loss": 0.7441,
"step": 17500
},
{
"epoch": 1.27,
"learning_rate": 3.495059206191926e-05,
"loss": 0.7463,
"step": 18000
},
{
"epoch": 1.31,
"learning_rate": 3.464276502410819e-05,
"loss": 0.7452,
"step": 18500
},
{
"epoch": 1.34,
"learning_rate": 3.432727343194701e-05,
"loss": 0.7439,
"step": 19000
},
{
"epoch": 1.38,
"learning_rate": 3.400428242517889e-05,
"loss": 0.7438,
"step": 19500
},
{
"epoch": 1.41,
"learning_rate": 3.367396106901259e-05,
"loss": 0.7427,
"step": 20000
},
{
"epoch": 1.45,
"learning_rate": 3.3336482265627675e-05,
"loss": 0.7471,
"step": 20500
},
{
"epoch": 1.48,
"learning_rate": 3.299202266367119e-05,
"loss": 0.745,
"step": 21000
},
{
"epoch": 1.52,
"learning_rate": 3.2640762565793374e-05,
"loss": 0.7418,
"step": 21500
},
{
"epoch": 1.55,
"learning_rate": 3.2282885834270696e-05,
"loss": 0.7465,
"step": 22000
},
{
"epoch": 1.59,
"learning_rate": 3.191857979476569e-05,
"loss": 0.7416,
"step": 22500
},
{
"epoch": 1.62,
"learning_rate": 3.154803513827388e-05,
"loss": 0.7421,
"step": 23000
},
{
"epoch": 1.66,
"learning_rate": 3.117144582130925e-05,
"loss": 0.7396,
"step": 23500
},
{
"epoch": 1.7,
"learning_rate": 3.078900896438028e-05,
"loss": 0.7371,
"step": 24000
},
{
"epoch": 1.73,
"learning_rate": 3.040092474881003e-05,
"loss": 0.7393,
"step": 24500
},
{
"epoch": 1.77,
"learning_rate": 3.0007396311953882e-05,
"loss": 0.7366,
"step": 25000
},
{
"epoch": 1.8,
"learning_rate": 2.9608629640870138e-05,
"loss": 0.7455,
"step": 25500
},
{
"epoch": 1.84,
"learning_rate": 2.9204833464498878e-05,
"loss": 0.7387,
"step": 26000
},
{
"epoch": 1.87,
"learning_rate": 2.8796219144405713e-05,
"loss": 0.7363,
"step": 26500
},
{
"epoch": 1.91,
"learning_rate": 2.838300056414743e-05,
"loss": 0.7387,
"step": 27000
},
{
"epoch": 1.94,
"learning_rate": 2.7965394017317587e-05,
"loss": 0.7317,
"step": 27500
},
{
"epoch": 1.98,
"learning_rate": 2.754361809433063e-05,
"loss": 0.7331,
"step": 28000
},
{
"epoch": 2.01,
"learning_rate": 2.711789356800372e-05,
"loss": 0.7049,
"step": 28500
},
{
"epoch": 2.05,
"learning_rate": 2.6688443277996225e-05,
"loss": 0.6657,
"step": 29000
},
{
"epoch": 2.08,
"learning_rate": 2.6255492014167356e-05,
"loss": 0.6651,
"step": 29500
},
{
"epoch": 2.12,
"learning_rate": 2.581926639891304e-05,
"loss": 0.6717,
"step": 30000
},
{
"epoch": 2.15,
"learning_rate": 2.537999476854349e-05,
"loss": 0.6688,
"step": 30500
},
{
"epoch": 2.19,
"learning_rate": 2.4937907053763732e-05,
"loss": 0.6726,
"step": 31000
},
{
"epoch": 2.23,
"learning_rate": 2.4493234659319507e-05,
"loss": 0.6681,
"step": 31500
},
{
"epoch": 2.26,
"learning_rate": 2.404621034287166e-05,
"loss": 0.6656,
"step": 32000
},
{
"epoch": 2.3,
"learning_rate": 2.359706809316231e-05,
"loss": 0.6633,
"step": 32500
},
{
"epoch": 2.33,
"learning_rate": 2.314604300753667e-05,
"loss": 0.6647,
"step": 33000
},
{
"epoch": 2.37,
"learning_rate": 2.2693371168884593e-05,
"loss": 0.6632,
"step": 33500
},
{
"epoch": 2.4,
"learning_rate": 2.2239289522066157e-05,
"loss": 0.6642,
"step": 34000
},
{
"epoch": 2.44,
"learning_rate": 2.178403574988621e-05,
"loss": 0.6626,
"step": 34500
},
{
"epoch": 2.47,
"learning_rate": 2.1327848148682503e-05,
"loss": 0.659,
"step": 35000
},
{
"epoch": 2.51,
"learning_rate": 2.0870965503592795e-05,
"loss": 0.6626,
"step": 35500
},
{
"epoch": 2.54,
"learning_rate": 2.0413626963566004e-05,
"loss": 0.6605,
"step": 36000
},
{
"epoch": 2.58,
"learning_rate": 1.9956071916183e-05,
"loss": 0.6583,
"step": 36500
},
{
"epoch": 2.61,
"learning_rate": 1.9498539862352476e-05,
"loss": 0.6554,
"step": 37000
},
{
"epoch": 2.65,
"learning_rate": 1.904127029094744e-05,
"loss": 0.6588,
"step": 37500
},
{
"epoch": 2.68,
"learning_rate": 1.8584502553448085e-05,
"loss": 0.6561,
"step": 38000
},
{
"epoch": 2.72,
"learning_rate": 1.812847573865655e-05,
"loss": 0.6529,
"step": 38500
},
{
"epoch": 2.75,
"learning_rate": 1.7673428547549134e-05,
"loss": 0.6499,
"step": 39000
},
{
"epoch": 2.79,
"learning_rate": 1.721959916833157e-05,
"loss": 0.648,
"step": 39500
},
{
"epoch": 2.83,
"learning_rate": 1.6767225151762676e-05,
"loss": 0.6479,
"step": 40000
},
{
"epoch": 2.86,
"learning_rate": 1.631654328681168e-05,
"loss": 0.6482,
"step": 40500
},
{
"epoch": 2.9,
"learning_rate": 1.586778947671426e-05,
"loss": 0.6446,
"step": 41000
},
{
"epoch": 2.93,
"learning_rate": 1.5421198615492244e-05,
"loss": 0.6432,
"step": 41500
},
{
"epoch": 2.97,
"learning_rate": 1.4977004465001586e-05,
"loss": 0.6425,
"step": 42000
},
{
"epoch": 3.0,
"learning_rate": 1.4535439532572877e-05,
"loss": 0.6361,
"step": 42500
},
{
"epoch": 3.04,
"learning_rate": 1.4096734949308623e-05,
"loss": 0.5411,
"step": 43000
},
{
"epoch": 3.07,
"learning_rate": 1.3661120349100823e-05,
"loss": 0.5375,
"step": 43500
},
{
"epoch": 3.11,
"learning_rate": 1.3228823748432258e-05,
"loss": 0.5394,
"step": 44000
},
{
"epoch": 3.14,
"learning_rate": 1.2800071427024391e-05,
"loss": 0.5369,
"step": 44500
},
{
"epoch": 3.18,
"learning_rate": 1.2375087809394368e-05,
"loss": 0.5378,
"step": 45000
},
{
"epoch": 3.21,
"learning_rate": 1.1954095347383076e-05,
"loss": 0.5365,
"step": 45500
},
{
"epoch": 3.25,
"learning_rate": 1.153731440371577e-05,
"loss": 0.5358,
"step": 46000
},
{
"epoch": 3.28,
"learning_rate": 1.1124963136656253e-05,
"loss": 0.5375,
"step": 46500
},
{
"epoch": 3.32,
"learning_rate": 1.0717257385814897e-05,
"loss": 0.537,
"step": 47000
},
{
"epoch": 3.36,
"learning_rate": 1.0314410559170397e-05,
"loss": 0.5348,
"step": 47500
},
{
"epoch": 3.39,
"learning_rate": 9.916633521364266e-06,
"loss": 0.5342,
"step": 48000
},
{
"epoch": 3.43,
"learning_rate": 9.524134483326633e-06,
"loss": 0.5321,
"step": 48500
},
{
"epoch": 3.46,
"learning_rate": 9.137118893291118e-06,
"loss": 0.5311,
"step": 49000
},
{
"epoch": 3.5,
"learning_rate": 8.755789329255755e-06,
"loss": 0.5303,
"step": 49500
},
{
"epoch": 3.53,
"learning_rate": 8.3803453929463e-06,
"loss": 0.5296,
"step": 50000
},
{
"epoch": 3.57,
"learning_rate": 8.01098360533749e-06,
"loss": 0.5288,
"step": 50500
},
{
"epoch": 3.6,
"learning_rate": 7.647897303786813e-06,
"loss": 0.5266,
"step": 51000
},
{
"epoch": 3.64,
"learning_rate": 7.291276540834699e-06,
"loss": 0.5263,
"step": 51500
},
{
"epoch": 3.67,
"learning_rate": 6.941307984724182e-06,
"loss": 0.5259,
"step": 52000
},
{
"epoch": 3.71,
"learning_rate": 6.598174821691929e-06,
"loss": 0.5231,
"step": 52500
},
{
"epoch": 3.74,
"learning_rate": 6.262056660081919e-06,
"loss": 0.5255,
"step": 53000
},
{
"epoch": 3.78,
"learning_rate": 5.933129436331942e-06,
"loss": 0.5237,
"step": 53500
},
{
"epoch": 3.81,
"learning_rate": 5.611565322882084e-06,
"loss": 0.5213,
"step": 54000
},
{
"epoch": 3.85,
"learning_rate": 5.297532638053395e-06,
"loss": 0.5203,
"step": 54500
},
{
"epoch": 3.89,
"learning_rate": 4.991195757944023e-06,
"loss": 0.5191,
"step": 55000
},
{
"epoch": 3.92,
"learning_rate": 4.6927150303887505e-06,
"loss": 0.5177,
"step": 55500
},
{
"epoch": 3.96,
"learning_rate": 4.402246691027168e-06,
"loss": 0.5161,
"step": 56000
},
{
"epoch": 3.99,
"learning_rate": 4.119942781524248e-06,
"loss": 0.516,
"step": 56500
},
{
"epoch": 4.03,
"learning_rate": 3.845951069986216e-06,
"loss": 0.4345,
"step": 57000
},
{
"epoch": 4.06,
"learning_rate": 3.5804149736133887e-06,
"loss": 0.4057,
"step": 57500
},
{
"epoch": 4.1,
"learning_rate": 3.3234734836303883e-06,
"loss": 0.4048,
"step": 58000
},
{
"epoch": 4.13,
"learning_rate": 3.075261092533097e-06,
"loss": 0.4048,
"step": 58500
},
{
"epoch": 4.17,
"learning_rate": 2.8359077236904165e-06,
"loss": 0.4041,
"step": 59000
},
{
"epoch": 4.2,
"learning_rate": 2.6055386633376613e-06,
"loss": 0.4026,
"step": 59500
},
{
"epoch": 4.24,
"learning_rate": 2.3842744949971765e-06,
"loss": 0.4039,
"step": 60000
},
{
"epoch": 4.27,
"learning_rate": 2.172231036360588e-06,
"loss": 0.403,
"step": 60500
},
{
"epoch": 4.31,
"learning_rate": 1.9695192786655902e-06,
"loss": 0.4038,
"step": 61000
},
{
"epoch": 4.34,
"learning_rate": 1.776245328599111e-06,
"loss": 0.4023,
"step": 61500
},
{
"epoch": 4.38,
"learning_rate": 1.5925103527572395e-06,
"loss": 0.4018,
"step": 62000
},
{
"epoch": 4.41,
"learning_rate": 1.4184105246909429e-06,
"loss": 0.4017,
"step": 62500
},
{
"epoch": 4.45,
"learning_rate": 1.2540369745653446e-06,
"loss": 0.4022,
"step": 63000
},
{
"epoch": 4.49,
"learning_rate": 1.099475741458904e-06,
"loss": 0.4016,
"step": 63500
},
{
"epoch": 4.52,
"learning_rate": 9.548077283274115e-07,
"loss": 0.4021,
"step": 64000
},
{
"epoch": 4.56,
"learning_rate": 8.201086596564867e-07,
"loss": 0.4003,
"step": 64500
},
{
"epoch": 4.59,
"learning_rate": 6.954490418246052e-07,
"loss": 0.4001,
"step": 65000
},
{
"epoch": 4.63,
"learning_rate": 5.808941261975087e-07,
"loss": 0.4004,
"step": 65500
},
{
"epoch": 4.66,
"learning_rate": 4.765038749732864e-07,
"loss": 0.4003,
"step": 66000
},
{
"epoch": 4.7,
"learning_rate": 3.823329297959766e-07,
"loss": 0.4002,
"step": 66500
},
{
"epoch": 4.73,
"learning_rate": 2.98430583154139e-07,
"loss": 0.4002,
"step": 67000
},
{
"epoch": 4.77,
"learning_rate": 2.2484075257939165e-07,
"loss": 0.4006,
"step": 67500
},
{
"epoch": 4.8,
"learning_rate": 1.6160195765838605e-07,
"loss": 0.3993,
"step": 68000
},
{
"epoch": 4.84,
"learning_rate": 1.0874729987023547e-07,
"loss": 0.3989,
"step": 68500
},
{
"epoch": 4.87,
"learning_rate": 6.630444526002367e-08,
"loss": 0.3991,
"step": 69000
},
{
"epoch": 4.91,
"learning_rate": 3.4295609957382126e-08,
"loss": 0.3996,
"step": 69500
},
{
"epoch": 4.94,
"learning_rate": 1.2737548547760991e-08,
"loss": 0.3992,
"step": 70000
},
{
"epoch": 4.98,
"learning_rate": 1.6415453024820617e-09,
"loss": 0.3995,
"step": 70500
},
{
"epoch": 5.0,
"step": 70780,
"total_flos": 1.007604716180962e+24,
"train_loss": 0.6235090117226821,
"train_runtime": 442317.6902,
"train_samples_per_second": 40.968,
"train_steps_per_second": 0.16
}
],
"logging_steps": 500,
"max_steps": 70780,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 3000,
"total_flos": 1.007604716180962e+24,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}