Kyrmasch commited on
Commit
c7ae2e6
1 Parent(s): dc2421d
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad765f47af71011be3d640489246c7085d198b046f8e2dd772864e81ca97a408
3
  size 2219797189
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7f616dd892d65e0d0839627fbdb98aa327b78ef38f72e08bffe2ebe0a7e0ef2
3
  size 2219797189
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:560bdb3380b22af02e00cdcafea5a445304ab6cf80042fac6513548eeb49fece
3
  size 1109883945
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f969327ada3bd8dbee21381bf85e5eab0012f56c8b38e3b91b8598b80c158c67
3
  size 1109883945
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd275d096cd6cf629c69200ea47d62fc9cd3b36b7e62400520fe4bf673338149
3
  size 13553
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:556647febb788da6dd49a88a8ed603748321637cc7055b1bb12850e2d4f60da4
3
  size 13553
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65a6cbff39d4fbfc6aec126e1a969fa70287244e7814cf7317a9aaf9b3c0921e
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05a70eda53deab8173d8fbbc2a7e82544de63b2afac213bd0479ab675d199b57
3
  size 627
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bae0289b9bb66bfbf85d834f6313ebd8e40f484df2bd0351539e74a0e4ddeb4
3
- size 17098081
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:167066c03829058ec15ff5404e54585316251e89eec3f758bced173717de5322
3
+ size 17098346
trainer_state.json CHANGED
@@ -1,85 +1,25 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.248569092395748,
5
  "eval_steps": 500,
6
- "global_step": 5500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.2,
13
- "learning_rate": 2.795584627964023e-05,
14
- "loss": 2.2019,
15
  "step": 500
16
- },
17
- {
18
- "epoch": 0.41,
19
- "learning_rate": 2.591169255928046e-05,
20
- "loss": 1.9636,
21
- "step": 1000
22
- },
23
- {
24
- "epoch": 0.61,
25
- "learning_rate": 2.3867538838920688e-05,
26
- "loss": 1.9246,
27
- "step": 1500
28
- },
29
- {
30
- "epoch": 0.82,
31
- "learning_rate": 2.1823385118560917e-05,
32
- "loss": 1.8572,
33
- "step": 2000
34
- },
35
- {
36
- "epoch": 1.02,
37
- "learning_rate": 1.9779231398201146e-05,
38
- "loss": 1.7804,
39
- "step": 2500
40
- },
41
- {
42
- "epoch": 1.23,
43
- "learning_rate": 1.7735077677841375e-05,
44
- "loss": 1.5332,
45
- "step": 3000
46
- },
47
- {
48
- "epoch": 1.43,
49
- "learning_rate": 1.56909239574816e-05,
50
- "loss": 1.5243,
51
- "step": 3500
52
- },
53
- {
54
- "epoch": 1.64,
55
- "learning_rate": 1.3646770237121831e-05,
56
- "loss": 1.5248,
57
- "step": 4000
58
- },
59
- {
60
- "epoch": 1.84,
61
- "learning_rate": 1.160261651676206e-05,
62
- "loss": 1.5064,
63
- "step": 4500
64
- },
65
- {
66
- "epoch": 2.04,
67
- "learning_rate": 9.55846279640229e-06,
68
- "loss": 1.4177,
69
- "step": 5000
70
- },
71
- {
72
- "epoch": 2.25,
73
- "learning_rate": 7.514309076042518e-06,
74
- "loss": 1.2049,
75
- "step": 5500
76
  }
77
  ],
78
  "logging_steps": 500,
79
- "max_steps": 7338,
80
- "num_train_epochs": 3,
81
  "save_steps": 500,
82
- "total_flos": 1.2931445842486272e+16,
83
  "trial_name": null,
84
  "trial_params": null
85
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5780346820809249,
5
  "eval_steps": 500,
6
+ "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.58,
13
+ "learning_rate": 1.2658959537572255e-05,
14
+ "loss": 1.7414,
15
  "step": 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  }
17
  ],
18
  "logging_steps": 500,
19
+ "max_steps": 865,
20
+ "num_train_epochs": 1,
21
  "save_steps": 500,
22
+ "total_flos": 1175835405312000.0,
23
  "trial_name": null,
24
  "trial_params": null
25
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61f4841925514c34c11b4abf14bd89c4a66c79f5e0b25faec4459849fb0e99b2
3
  size 4027
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a448ce07274e9228c49e2d8399ef7896ad395414437462eb8541947ca7e188f
3
  size 4027