khyat commited on
Commit
5a3a5d4
1 Parent(s): ab9f659

Training in progress, step 23000

Browse files
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ccb45568bd4c7d3511e45041d493c661b174070436e88dce1bd20710cc6d9db
3
  size 5544932128
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d17ba7f0bda6ad7bdf8a551f0a9de22ec3e35ffc7f7de852db391e779af37bf8
3
  size 5544932128
last-checkpoint/adapter_config.json CHANGED
@@ -23,13 +23,13 @@
23
  "rank_pattern": {},
24
  "revision": "unsloth",
25
  "target_modules": [
26
- "v_proj",
27
- "up_proj",
28
- "k_proj",
29
  "o_proj",
 
 
30
  "down_proj",
31
  "gate_proj",
32
- "q_proj"
 
33
  ],
34
  "task_type": "CAUSAL_LM",
35
  "use_dora": false,
 
23
  "rank_pattern": {},
24
  "revision": "unsloth",
25
  "target_modules": [
 
 
 
26
  "o_proj",
27
+ "v_proj",
28
+ "q_proj",
29
  "down_proj",
30
  "gate_proj",
31
+ "k_proj",
32
+ "up_proj"
33
  ],
34
  "task_type": "CAUSAL_LM",
35
  "use_dora": false,
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd21db90406acf9716c653cef333b46bc65f2d3afa87981f50c21efc9b5dc1a5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71cd57c0a39c9efff0f951a19c93f1b94133eba860a04cd29141f1a192bb0770
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9067781667968061,
5
  "eval_steps": 500,
6
- "global_step": 22500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -154,174 +154,6 @@
154
  "learning_rate": 3.204737808427746e-05,
155
  "loss": 0.5001,
156
  "step": 10500
157
- },
158
- {
159
- "epoch": 0.4433137704339941,
160
- "grad_norm": 0.5442752242088318,
161
- "learning_rate": 3.092785813443196e-05,
162
- "loss": 0.5053,
163
- "step": 11000
164
- },
165
- {
166
- "epoch": 0.463464396362812,
167
- "grad_norm": 0.5094788074493408,
168
- "learning_rate": 2.9808338184586448e-05,
169
- "loss": 0.502,
170
- "step": 11500
171
- },
172
- {
173
- "epoch": 0.48361502229162995,
174
- "grad_norm": 0.5177408456802368,
175
- "learning_rate": 2.8688818234740942e-05,
176
- "loss": 0.4972,
177
- "step": 12000
178
- },
179
- {
180
- "epoch": 0.5037656482204479,
181
- "grad_norm": 0.5165345072746277,
182
- "learning_rate": 2.756929828489544e-05,
183
- "loss": 0.495,
184
- "step": 12500
185
- },
186
- {
187
- "epoch": 0.5239162741492658,
188
- "grad_norm": 0.6729832291603088,
189
- "learning_rate": 2.644977833504993e-05,
190
- "loss": 0.4942,
191
- "step": 13000
192
- },
193
- {
194
- "epoch": 0.5440669000780837,
195
- "grad_norm": 0.5211122035980225,
196
- "learning_rate": 2.5330258385204423e-05,
197
- "loss": 0.4928,
198
- "step": 13500
199
- },
200
- {
201
- "epoch": 0.5642175260069016,
202
- "grad_norm": 0.6845284104347229,
203
- "learning_rate": 2.421073843535892e-05,
204
- "loss": 0.4935,
205
- "step": 14000
206
- },
207
- {
208
- "epoch": 0.5843681519357194,
209
- "grad_norm": 0.8934305906295776,
210
- "learning_rate": 2.3091218485513413e-05,
211
- "loss": 0.4889,
212
- "step": 14500
213
- },
214
- {
215
- "epoch": 0.6045187778645374,
216
- "grad_norm": 0.5098538994789124,
217
- "learning_rate": 2.1971698535667904e-05,
218
- "loss": 0.4863,
219
- "step": 15000
220
- },
221
- {
222
- "epoch": 0.6246694037933553,
223
- "grad_norm": 0.7277902364730835,
224
- "learning_rate": 2.0852178585822403e-05,
225
- "loss": 0.4866,
226
- "step": 15500
227
- },
228
- {
229
- "epoch": 0.6448200297221732,
230
- "grad_norm": 0.5824995040893555,
231
- "learning_rate": 1.9732658635976894e-05,
232
- "loss": 0.4872,
233
- "step": 16000
234
- },
235
- {
236
- "epoch": 0.6649706556509911,
237
- "grad_norm": 0.5100158452987671,
238
- "learning_rate": 1.861313868613139e-05,
239
- "loss": 0.4891,
240
- "step": 16500
241
- },
242
- {
243
- "epoch": 0.6851212815798091,
244
- "grad_norm": 0.7346712946891785,
245
- "learning_rate": 1.7493618736285884e-05,
246
- "loss": 0.4819,
247
- "step": 17000
248
- },
249
- {
250
- "epoch": 0.705271907508627,
251
- "grad_norm": 0.805208146572113,
252
- "learning_rate": 1.6374098786440375e-05,
253
- "loss": 0.4805,
254
- "step": 17500
255
- },
256
- {
257
- "epoch": 0.7254225334374449,
258
- "grad_norm": 0.6093766093254089,
259
- "learning_rate": 1.5254578836594868e-05,
260
- "loss": 0.4816,
261
- "step": 18000
262
- },
263
- {
264
- "epoch": 0.7455731593662628,
265
- "grad_norm": 0.5469486117362976,
266
- "learning_rate": 1.4135058886749365e-05,
267
- "loss": 0.4815,
268
- "step": 18500
269
- },
270
- {
271
- "epoch": 0.7657237852950808,
272
- "grad_norm": 0.6185249090194702,
273
- "learning_rate": 1.3015538936903856e-05,
274
- "loss": 0.4823,
275
- "step": 19000
276
- },
277
- {
278
- "epoch": 0.7858744112238987,
279
- "grad_norm": 0.6320446133613586,
280
- "learning_rate": 1.189601898705835e-05,
281
- "loss": 0.48,
282
- "step": 19500
283
- },
284
- {
285
- "epoch": 0.8060250371527166,
286
- "grad_norm": 0.7233290672302246,
287
- "learning_rate": 1.0776499037212844e-05,
288
- "loss": 0.4776,
289
- "step": 20000
290
- },
291
- {
292
- "epoch": 0.8261756630815345,
293
- "grad_norm": 0.5041834115982056,
294
- "learning_rate": 9.656979087367337e-06,
295
- "loss": 0.4758,
296
- "step": 20500
297
- },
298
- {
299
- "epoch": 0.8463262890103523,
300
- "grad_norm": 0.5525909066200256,
301
- "learning_rate": 8.53745913752183e-06,
302
- "loss": 0.4748,
303
- "step": 21000
304
- },
305
- {
306
- "epoch": 0.8664769149391703,
307
- "grad_norm": 0.493874192237854,
308
- "learning_rate": 7.417939187676325e-06,
309
- "loss": 0.4789,
310
- "step": 21500
311
- },
312
- {
313
- "epoch": 0.8866275408679882,
314
- "grad_norm": 0.4965188205242157,
315
- "learning_rate": 6.298419237830819e-06,
316
- "loss": 0.4736,
317
- "step": 22000
318
- },
319
- {
320
- "epoch": 0.9067781667968061,
321
- "grad_norm": 0.5210591554641724,
322
- "learning_rate": 5.178899287985312e-06,
323
- "loss": 0.4782,
324
- "step": 22500
325
  }
326
  ],
327
  "logging_steps": 500,
@@ -341,7 +173,7 @@
341
  "attributes": {}
342
  }
343
  },
344
- "total_flos": 3.5951230851136782e+19,
345
  "train_batch_size": 2,
346
  "trial_name": null,
347
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.42316314450517617,
5
  "eval_steps": 500,
6
+ "global_step": 10500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
154
  "learning_rate": 3.204737808427746e-05,
155
  "loss": 0.5001,
156
  "step": 10500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  }
158
  ],
159
  "logging_steps": 500,
 
173
  "attributes": {}
174
  }
175
  },
176
+ "total_flos": 1.677250108543338e+19,
177
  "train_batch_size": 2,
178
  "trial_name": null,
179
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc31d462bd82ad45ffcdb48c070e8e8357be08cdb71164ba74143674f9a54e04
3
- size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfe1550ddce5e600f8af695537ba4b034ec5ea1be78e55ddc34720ae0669092
3
+ size 5176