dontjandra commited on
Commit
3498658
1 Parent(s): 20e131f

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b747b902cf488c338b42395ca734abd9b9cbad0638c2424bff16e17dbf15acd
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f2452201b1a1c07a46260c67f6c5cb715cce3500579c5ec73919b798ace8d03
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b1207ad6bcf1979dbaa993490890a53c0ebc800589da600b6b1282aaca71968
3
  size 42545748
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c5827c92eda7d133f5f2e02378b82df19bba09950052d529a9f45087b2691f4
3
  size 42545748
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:051ef68e76ed7d1f7e722937ec594a046926e550a4052c7ce2c65763a149f537
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f4827f3907c884124cf5304ad6de566f4005bfd0b47fc207be524313cec4714
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ced1fab70f37dfdd161d53ad45356c3e3789a4ff9624d1b399d33b98b7f6e47
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30b806c9629c135739ae955bdbf613e7c7323c34a5bcf2bbc2116e176d24f227
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.12345679012345678,
5
  "eval_steps": 1000,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -49,6 +49,41 @@
49
  "learning_rate": 1e-05,
50
  "loss": 1.3828,
51
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  }
53
  ],
54
  "logging_steps": 10,
@@ -68,7 +103,7 @@
68
  "attributes": {}
69
  }
70
  },
71
- "total_flos": 8759069950771200.0,
72
  "train_batch_size": 2,
73
  "trial_name": null,
74
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.24691358024691357,
5
  "eval_steps": 1000,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
49
  "learning_rate": 1e-05,
50
  "loss": 1.3828,
51
  "step": 50
52
+ },
53
+ {
54
+ "epoch": 0.14814814814814814,
55
+ "grad_norm": 111082.546875,
56
+ "learning_rate": 1.2e-05,
57
+ "loss": 1.3397,
58
+ "step": 60
59
+ },
60
+ {
61
+ "epoch": 0.1728395061728395,
62
+ "grad_norm": 121515.2578125,
63
+ "learning_rate": 1.4000000000000001e-05,
64
+ "loss": 1.2831,
65
+ "step": 70
66
+ },
67
+ {
68
+ "epoch": 0.19753086419753085,
69
+ "grad_norm": 108325.3984375,
70
+ "learning_rate": 1.6000000000000003e-05,
71
+ "loss": 1.3085,
72
+ "step": 80
73
+ },
74
+ {
75
+ "epoch": 0.2222222222222222,
76
+ "grad_norm": 175475.78125,
77
+ "learning_rate": 1.8e-05,
78
+ "loss": 1.2624,
79
+ "step": 90
80
+ },
81
+ {
82
+ "epoch": 0.24691358024691357,
83
+ "grad_norm": 116793.203125,
84
+ "learning_rate": 2e-05,
85
+ "loss": 1.2683,
86
+ "step": 100
87
  }
88
  ],
89
  "logging_steps": 10,
 
103
  "attributes": {}
104
  }
105
  },
106
+ "total_flos": 1.75181399015424e+16,
107
  "train_batch_size": 2,
108
  "trial_name": null,
109
  "trial_params": null