willtensora commited on
Commit
da82b6c
·
verified ·
1 Parent(s): 6126eff

Training in progress, step 9, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ce7ffbf41919a09cb7f41ecfc691d2777eeea4759dea90abcc883b1c2ca7cdc
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92c6adf8f4340fa05c4cff561b05ac7aa6717ed5aad94be8bc13ae5db386177f
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:387f7310bd0c74de2c53e93c66957511b8f4b9a57baba58e1d75956a19a21adf
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55b354e2c22ea89e6399c1e15f99a845b4c91668f457d64756465efbbd682d2e
3
  size 43122580
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4efb600f1c3500add79f72139e980a8df1a4b60b899741720596608757ec0b80
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef12b6dfbbd8e9ffb3ee559ac88ba28ccd4ecbca575ae9e6bba9374f75ce34ca
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2c328ca304e4a83b705e1c382163cbaf9cdf3f6a2a09f6efddc567c79906d4b
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6771eaea8d119793794214c4189a1e1af4912c9df24e2598e943f186f276b24a
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0099cb7287625b29b67c4fcf42ff20fae623b429bfb10f5ac695bc54f2be54fd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c8e6b04902f17ae368c3e6cfd97a31ad4de2f025d673daea8c033ce0e260946
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.006191950464396285,
5
  "eval_steps": 3,
6
- "global_step": 6,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -73,6 +73,35 @@
73
  "eval_samples_per_second": 63.718,
74
  "eval_steps_per_second": 15.988,
75
  "step": 6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  }
77
  ],
78
  "logging_steps": 1,
@@ -92,7 +121,7 @@
92
  "attributes": {}
93
  }
94
  },
95
- "total_flos": 2219477097775104.0,
96
  "train_batch_size": 2,
97
  "trial_name": null,
98
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.009287925696594427,
5
  "eval_steps": 3,
6
+ "global_step": 9,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
73
  "eval_samples_per_second": 63.718,
74
  "eval_steps_per_second": 15.988,
75
  "step": 6
76
+ },
77
+ {
78
+ "epoch": 0.007223942208462332,
79
+ "grad_norm": 11.56080150604248,
80
+ "learning_rate": 0.00014,
81
+ "loss": 7.9041,
82
+ "step": 7
83
+ },
84
+ {
85
+ "epoch": 0.008255933952528379,
86
+ "grad_norm": 11.11003303527832,
87
+ "learning_rate": 0.00016,
88
+ "loss": 7.3022,
89
+ "step": 8
90
+ },
91
+ {
92
+ "epoch": 0.009287925696594427,
93
+ "grad_norm": 12.207099914550781,
94
+ "learning_rate": 0.00018,
95
+ "loss": 5.3686,
96
+ "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.009287925696594427,
100
+ "eval_loss": 4.850965976715088,
101
+ "eval_runtime": 12.8178,
102
+ "eval_samples_per_second": 63.739,
103
+ "eval_steps_per_second": 15.993,
104
+ "step": 9
105
  }
106
  ],
107
  "logging_steps": 1,
 
121
  "attributes": {}
122
  }
123
  },
124
+ "total_flos": 3329215646662656.0,
125
  "train_batch_size": 2,
126
  "trial_name": null,
127
  "trial_params": null