rizkyjun commited on
Commit
9b8c780
·
1 Parent(s): afc7ff6

Training in progress, step 300, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb703e356b523ee0a02fdfd156266ef8f9dd795b1aabb08dc8ed3032bff95ae9
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63032741a7fe23ca45903e22d7c0da503ce18374c09ebc8573f580e40714603a
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9af1efe58bb0a4d3ef86aa8f892e8a1e220d1f9a4bd6f443bafc7dae9c5f4d6b
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c0a2da5e1e8be9d887b1b3abae0480246ee61517416e19119c12ed2e43920a8
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ae75103ef104e9777a5ae5b55a7cb90c71c3a63aa7b14a23492610705b9c048
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e24ee25a852e4bccfaf30d5bb4faf1e6a8bbd9e10f131bfb0436af716b822b9c
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7aa2c8b84e17817e6a4dcba5955fca913e266fdcd47f5594a29933ebd4972a01
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3e02e4b8a74e841ee5a1ce8c453a9becb15f5be6cbcdd100b814e93235dab36
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.6725997924804688,
3
- "best_model_checkpoint": "./outputs/checkpoint-200",
4
- "epoch": 0.14571948998178508,
5
  "eval_steps": 100,
6
- "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -35,13 +35,27 @@
35
  "eval_samples_per_second": 30.624,
36
  "eval_steps_per_second": 3.832,
37
  "step": 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  }
39
  ],
40
  "logging_steps": 100,
41
  "max_steps": 4116,
42
  "num_train_epochs": 3,
43
  "save_steps": 100,
44
- "total_flos": 5679101804544000.0,
45
  "trial_name": null,
46
  "trial_params": null
47
  }
 
1
  {
2
+ "best_metric": 2.6377696990966797,
3
+ "best_model_checkpoint": "./outputs/checkpoint-300",
4
+ "epoch": 0.2185792349726776,
5
  "eval_steps": 100,
6
+ "global_step": 300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
35
  "eval_samples_per_second": 30.624,
36
  "eval_steps_per_second": 3.832,
37
  "step": 200
38
+ },
39
+ {
40
+ "epoch": 0.22,
41
+ "learning_rate": 0.0002,
42
+ "loss": 2.6509,
43
+ "step": 300
44
+ },
45
+ {
46
+ "epoch": 0.22,
47
+ "eval_loss": 2.6377696990966797,
48
+ "eval_runtime": 204.8779,
49
+ "eval_samples_per_second": 30.623,
50
+ "eval_steps_per_second": 3.832,
51
+ "step": 300
52
  }
53
  ],
54
  "logging_steps": 100,
55
  "max_steps": 4116,
56
  "num_train_epochs": 3,
57
  "save_steps": 100,
58
+ "total_flos": 8537730427699200.0,
59
  "trial_name": null,
60
  "trial_params": null
61
  }