RC-GPT / user-baichuan2-13b-v2-3.6 /trainer_state.json
Liang-Su's picture
Upload 74 files
18187e5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9989637305699481,
"eval_steps": 500,
"global_step": 482,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 4.99941873550415,
"learning_rate": 2e-05,
"loss": 9.9329,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 1.741065502166748,
"learning_rate": 4e-05,
"loss": 11.0746,
"step": 20
},
{
"epoch": 0.06,
"grad_norm": 1.4727320671081543,
"learning_rate": 6e-05,
"loss": 2.7159,
"step": 30
},
{
"epoch": 0.08,
"grad_norm": 0.1335960477590561,
"learning_rate": 8e-05,
"loss": 0.3969,
"step": 40
},
{
"epoch": 0.1,
"grad_norm": 0.0014472692273557186,
"learning_rate": 0.0001,
"loss": 0.0032,
"step": 50
},
{
"epoch": 0.12,
"grad_norm": 0.0010780546581372619,
"learning_rate": 0.0001,
"loss": 0.0002,
"step": 60
},
{
"epoch": 0.15,
"grad_norm": 1.03132963180542,
"learning_rate": 0.0001,
"loss": 0.0002,
"step": 70
},
{
"epoch": 0.17,
"grad_norm": 0.008827299810945988,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.19,
"grad_norm": 0.0002956670359708369,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 90
},
{
"epoch": 0.21,
"grad_norm": 0.0003419867134653032,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.23,
"grad_norm": 0.0003681881644297391,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 110
},
{
"epoch": 0.25,
"grad_norm": 0.0002884200366679579,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 120
},
{
"epoch": 0.27,
"grad_norm": 0.00011985149467363954,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 130
},
{
"epoch": 0.29,
"grad_norm": 0.0003195986500941217,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 140
},
{
"epoch": 0.31,
"grad_norm": 0.00010149635636480525,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 150
},
{
"epoch": 0.33,
"grad_norm": 0.00010508792183827609,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 160
},
{
"epoch": 0.35,
"grad_norm": 0.00011793687008321285,
"learning_rate": 0.0001,
"loss": 0.006,
"step": 170
},
{
"epoch": 0.37,
"grad_norm": 8.076676749624312e-05,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 180
},
{
"epoch": 0.39,
"grad_norm": 0.0007808339432813227,
"learning_rate": 0.0001,
"loss": 0.006,
"step": 190
},
{
"epoch": 0.41,
"grad_norm": 0.11711683869361877,
"learning_rate": 0.0001,
"loss": 0.003,
"step": 200
},
{
"epoch": 0.44,
"grad_norm": 0.0002039404644165188,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 210
},
{
"epoch": 0.46,
"grad_norm": 0.00873592495918274,
"learning_rate": 0.0001,
"loss": 0.0209,
"step": 220
},
{
"epoch": 0.48,
"grad_norm": 3.0506539344787598,
"learning_rate": 0.0001,
"loss": 0.0201,
"step": 230
},
{
"epoch": 0.5,
"grad_norm": 0.05903371796011925,
"learning_rate": 0.0001,
"loss": 0.0026,
"step": 240
},
{
"epoch": 0.52,
"grad_norm": 0.0002484666183590889,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 250
},
{
"epoch": 0.54,
"grad_norm": 0.0003493047261144966,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 260
},
{
"epoch": 0.56,
"grad_norm": 0.0008058947860263288,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 270
},
{
"epoch": 0.58,
"grad_norm": 0.0004198936221655458,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 280
},
{
"epoch": 0.6,
"grad_norm": 0.0002983050071634352,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 290
},
{
"epoch": 0.62,
"grad_norm": 0.0002279053587699309,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 300
},
{
"epoch": 0.64,
"grad_norm": 0.00015332824841607362,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 310
},
{
"epoch": 0.66,
"grad_norm": 0.00011723622446879745,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 320
},
{
"epoch": 0.68,
"grad_norm": 0.0001235378731507808,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 330
},
{
"epoch": 0.7,
"grad_norm": 0.00010625163122313097,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 340
},
{
"epoch": 0.73,
"grad_norm": 7.50239341869019e-05,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 350
},
{
"epoch": 0.75,
"grad_norm": 0.00010148331784876063,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 360
},
{
"epoch": 0.77,
"grad_norm": 7.368126534856856e-05,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 370
},
{
"epoch": 0.79,
"grad_norm": 0.00012744461128022522,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 380
},
{
"epoch": 0.81,
"grad_norm": 5.87971335335169e-05,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 390
},
{
"epoch": 0.83,
"grad_norm": 6.405858584912494e-05,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 400
},
{
"epoch": 0.85,
"grad_norm": 9.752299229148775e-05,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 410
},
{
"epoch": 0.87,
"grad_norm": 4.5175151171861216e-05,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 420
},
{
"epoch": 0.89,
"grad_norm": 0.00013234459038358182,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 430
},
{
"epoch": 0.91,
"grad_norm": 0.00013048920664004982,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 440
},
{
"epoch": 0.93,
"grad_norm": 0.0004233328509144485,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 450
},
{
"epoch": 0.95,
"grad_norm": 0.00019652876653708518,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 460
},
{
"epoch": 0.97,
"grad_norm": 0.000157060450874269,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 470
},
{
"epoch": 0.99,
"grad_norm": 0.00014773521979805082,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 480
},
{
"epoch": 1.0,
"step": 482,
"total_flos": 7.979272829443277e+17,
"train_loss": 0.5017403132133569,
"train_runtime": 75900.5046,
"train_samples_per_second": 0.102,
"train_steps_per_second": 0.006
}
],
"logging_steps": 10,
"max_steps": 482,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 7.979272829443277e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}