flytech commited on
Commit
ff82bd9
1 Parent(s): 248b81b

Training in progress, step 1200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37a81150462de1f5724816fc86eeb72708fdd41c2b9120a6c0fb0893e25e26a2
3
  size 250422888
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b57af93b93b031de6e632de092d31a96e7b6a4bc72353591acdc5c6475f51e76
3
  size 250422888
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b9b872563bae5226150d141d2458c477cc4944f5e1ef6209dfe23ed474e91ae
3
  size 126034975
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f046e63e10418a9dac2b2d0622e3354a7d5efbbc4b83e3cf9a3c254b0725a0b9
3
  size 126034975
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:104cbb095e211a3aa51312000d657ebbe612e28bb6e9115038fb06d9d13e1bb5
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5776b72341fe8dcc62fa355a6219987d7315883c515cce58c8413060cb570a18
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3bb7339d1d2da342a44f3ba5e076fb6e5d93aee86f890c002703d2b3a0750820
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d8d6be7898f87772ccbc5c732e900fe63a643c4595ce6af3d6bc6f811ba4b65
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.688172043010753,
5
  "eval_steps": 200,
6
- "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -72,13 +72,26 @@
72
  "eval_samples_per_second": 2.752,
73
  "eval_steps_per_second": 0.176,
74
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  }
76
  ],
77
  "logging_steps": 200,
78
  "max_steps": 2232,
79
  "num_train_epochs": 6,
80
  "save_steps": 200,
81
- "total_flos": 4.764597717970944e+17,
82
  "trial_name": null,
83
  "trial_params": null
84
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 3.225806451612903,
5
  "eval_steps": 200,
6
+ "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
72
  "eval_samples_per_second": 2.752,
73
  "eval_steps_per_second": 0.176,
74
  "step": 1000
75
+ },
76
+ {
77
+ "epoch": 3.23,
78
+ "learning_rate": 0.0002,
79
+ "loss": 0.0396,
80
+ "step": 1200
81
+ },
82
+ {
83
+ "epoch": 3.23,
84
+ "eval_runtime": 113.7472,
85
+ "eval_samples_per_second": 2.752,
86
+ "eval_steps_per_second": 0.176,
87
+ "step": 1200
88
  }
89
  ],
90
  "logging_steps": 200,
91
  "max_steps": 2232,
92
  "num_train_epochs": 6,
93
  "save_steps": 200,
94
+ "total_flos": 5.716265459337216e+17,
95
  "trial_name": null,
96
  "trial_params": null
97
  }