QingyiSi's picture
Upload 1268 files
4697198
raw
history blame
4.63 kB
{
"best_metric": 0.8547194600105286,
"best_model_checkpoint": "/mnt/bn/qingyi-bn-lq/llama/lora-alpaca/checkpoint-600",
"epoch": 1.5374759769378603,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 5.9999999999999995e-05,
"loss": 1.9913,
"step": 20
},
{
"epoch": 0.1,
"learning_rate": 0.00011999999999999999,
"loss": 1.5044,
"step": 40
},
{
"epoch": 0.15,
"learning_rate": 0.00017999999999999998,
"loss": 0.98,
"step": 60
},
{
"epoch": 0.2,
"learning_rate": 0.00023999999999999998,
"loss": 0.9036,
"step": 80
},
{
"epoch": 0.26,
"learning_rate": 0.0003,
"loss": 0.8966,
"step": 100
},
{
"epoch": 0.31,
"learning_rate": 0.00029439252336448596,
"loss": 0.8743,
"step": 120
},
{
"epoch": 0.36,
"learning_rate": 0.00028878504672897194,
"loss": 0.8711,
"step": 140
},
{
"epoch": 0.41,
"learning_rate": 0.0002831775700934579,
"loss": 0.8505,
"step": 160
},
{
"epoch": 0.46,
"learning_rate": 0.0002775700934579439,
"loss": 0.8546,
"step": 180
},
{
"epoch": 0.51,
"learning_rate": 0.0002719626168224299,
"loss": 0.8521,
"step": 200
},
{
"epoch": 0.51,
"eval_loss": 0.8707928657531738,
"eval_runtime": 19.2615,
"eval_samples_per_second": 103.834,
"eval_steps_per_second": 3.271,
"step": 200
},
{
"epoch": 0.56,
"learning_rate": 0.00026635514018691586,
"loss": 0.8523,
"step": 220
},
{
"epoch": 0.61,
"learning_rate": 0.00026074766355140184,
"loss": 0.8463,
"step": 240
},
{
"epoch": 0.67,
"learning_rate": 0.0002551401869158878,
"loss": 0.8443,
"step": 260
},
{
"epoch": 0.72,
"learning_rate": 0.0002495327102803738,
"loss": 0.8557,
"step": 280
},
{
"epoch": 0.77,
"learning_rate": 0.0002439252336448598,
"loss": 0.8607,
"step": 300
},
{
"epoch": 0.82,
"learning_rate": 0.00023831775700934577,
"loss": 0.8479,
"step": 320
},
{
"epoch": 0.87,
"learning_rate": 0.00023271028037383175,
"loss": 0.8416,
"step": 340
},
{
"epoch": 0.92,
"learning_rate": 0.00022710280373831773,
"loss": 0.8494,
"step": 360
},
{
"epoch": 0.97,
"learning_rate": 0.0002214953271028037,
"loss": 0.8414,
"step": 380
},
{
"epoch": 1.02,
"learning_rate": 0.0002158878504672897,
"loss": 0.8476,
"step": 400
},
{
"epoch": 1.02,
"eval_loss": 0.8598244190216064,
"eval_runtime": 74.9831,
"eval_samples_per_second": 26.673,
"eval_steps_per_second": 0.84,
"step": 400
},
{
"epoch": 1.08,
"learning_rate": 0.00021028037383177567,
"loss": 0.8443,
"step": 420
},
{
"epoch": 1.13,
"learning_rate": 0.00020467289719626166,
"loss": 0.8382,
"step": 440
},
{
"epoch": 1.18,
"learning_rate": 0.00019906542056074764,
"loss": 0.8467,
"step": 460
},
{
"epoch": 1.23,
"learning_rate": 0.00019345794392523362,
"loss": 0.8384,
"step": 480
},
{
"epoch": 1.28,
"learning_rate": 0.0001878504672897196,
"loss": 0.8346,
"step": 500
},
{
"epoch": 1.33,
"learning_rate": 0.00018224299065420558,
"loss": 0.8347,
"step": 520
},
{
"epoch": 1.38,
"learning_rate": 0.00017663551401869156,
"loss": 0.8363,
"step": 540
},
{
"epoch": 1.43,
"learning_rate": 0.00017102803738317754,
"loss": 0.8284,
"step": 560
},
{
"epoch": 1.49,
"learning_rate": 0.00016542056074766352,
"loss": 0.837,
"step": 580
},
{
"epoch": 1.54,
"learning_rate": 0.0001598130841121495,
"loss": 0.8385,
"step": 600
},
{
"epoch": 1.54,
"eval_loss": 0.8547194600105286,
"eval_runtime": 19.3656,
"eval_samples_per_second": 103.276,
"eval_steps_per_second": 3.253,
"step": 600
}
],
"max_steps": 1170,
"num_train_epochs": 3,
"total_flos": 7.798475370360996e+17,
"trial_name": null,
"trial_params": null
}