Hanzalwi commited on
Commit
fbfebaf
1 Parent(s): aaec66a

Training in progress, step 2100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc72c15dfcf5711fb70f2cb68d673c7895270b34b117d6132862ad3fb8844720
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c62ca606aaf3a5e391ab79efbdbd6f136ff872fffc76bb44f3f93cc16aa7ee48
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae63a50b8ef2796c2e42054bfd7b04cb18331479058e64cec3b3f51f2b924764
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94c906645f5fa7e35290dbfc313513f00acb313f5a0c14568377f5e8a35a792e
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:581654e7247b86e5cf537a4f388a8b452ef6efcbea66ae5b2507bf4e41fdff67
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0770ff6204965e70605ce6b3d87c7de79ab315ff8e5a70f192ba4c9b0362a1ca
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:720bafe9a34b3041027dccef7d9c03bb23064e1fcdd80ae5f27e717d41b7cf31
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1238b3bfa75e49a19396161e9e7b72ab89cdd1a3f63b51c0ab4d6e8d216c5a5
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.2992783784866333,
3
- "best_model_checkpoint": "./outputs/checkpoint-2000",
4
- "epoch": 2.6666666666666665,
5
  "eval_steps": 100,
6
- "global_step": 2000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -287,6 +287,20 @@
287
  "eval_samples_per_second": 30.674,
288
  "eval_steps_per_second": 3.847,
289
  "step": 2000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  }
291
  ],
292
  "logging_steps": 100,
@@ -294,7 +308,7 @@
294
  "num_input_tokens_seen": 0,
295
  "num_train_epochs": 3,
296
  "save_steps": 100,
297
- "total_flos": 6.706239746605056e+16,
298
  "trial_name": null,
299
  "trial_params": null
300
  }
 
1
  {
2
+ "best_metric": 1.2948063611984253,
3
+ "best_model_checkpoint": "./outputs/checkpoint-2100",
4
+ "epoch": 2.8,
5
  "eval_steps": 100,
6
+ "global_step": 2100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
287
  "eval_samples_per_second": 30.674,
288
  "eval_steps_per_second": 3.847,
289
  "step": 2000
290
+ },
291
+ {
292
+ "epoch": 2.8,
293
+ "learning_rate": 0.0002,
294
+ "loss": 1.3869,
295
+ "step": 2100
296
+ },
297
+ {
298
+ "epoch": 2.8,
299
+ "eval_loss": 1.2948063611984253,
300
+ "eval_runtime": 47.587,
301
+ "eval_samples_per_second": 30.492,
302
+ "eval_steps_per_second": 3.825,
303
+ "step": 2100
304
  }
305
  ],
306
  "logging_steps": 100,
 
308
  "num_input_tokens_seen": 0,
309
  "num_train_epochs": 3,
310
  "save_steps": 100,
311
+ "total_flos": 7.041614616723456e+16,
312
  "trial_name": null,
313
  "trial_params": null
314
  }