Training in progress, step 1400, checkpoint
Browse files
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2751040864
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2a303e416b8e87e82b0a9e76a5d7a78b1fc5109468b48fe4e63f33e9c3bd855
|
3 |
size 2751040864
|
last-checkpoint/pytorch_model-00001-of-00002.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5000078781
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac654860a07162e4c0f37a59df869b9b1b80e34174d1a714bd957e4f32abd2d1
|
3 |
size 5000078781
|
last-checkpoint/pytorch_model-00002-of-00002.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 482838574
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:354c2aa6152650ba3dfa40e3f219ee3e964aac74b46fa672914384b3af535e9b
|
3 |
size 482838574
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a364d71973980816c47dc3b9dabc9ae54ad71e3b4a66da0300314d8bb9a5a804
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a051f2b7b66e1d1bb2f8dad5db282e6521639d1f5915c20f67b57d63e76b058
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.37990298867225647,
|
3 |
"best_model_checkpoint": "../KevinKibe/nllb-200-distilled-1.3B-finetuned-finetuned-finetuned/checkpoint-600",
|
4 |
-
"epoch":
|
5 |
"eval_steps": 200,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -92,6 +92,40 @@
|
|
92 |
"eval_samples_per_second": 0.19,
|
93 |
"eval_steps_per_second": 0.095,
|
94 |
"step": 1000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
}
|
96 |
],
|
97 |
"logging_steps": 200,
|
@@ -99,7 +133,7 @@
|
|
99 |
"num_input_tokens_seen": 0,
|
100 |
"num_train_epochs": 2000,
|
101 |
"save_steps": 200,
|
102 |
-
"total_flos":
|
103 |
"train_batch_size": 16,
|
104 |
"trial_name": null,
|
105 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.37990298867225647,
|
3 |
"best_model_checkpoint": "../KevinKibe/nllb-200-distilled-1.3B-finetuned-finetuned-finetuned/checkpoint-600",
|
4 |
+
"epoch": 1400.0,
|
5 |
"eval_steps": 200,
|
6 |
+
"global_step": 1400,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
92 |
"eval_samples_per_second": 0.19,
|
93 |
"eval_steps_per_second": 0.095,
|
94 |
"step": 1000
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"epoch": 1200.0,
|
98 |
+
"grad_norm": 0.02252182736992836,
|
99 |
+
"learning_rate": 8.000000000000001e-06,
|
100 |
+
"loss": 0.0013,
|
101 |
+
"step": 1200
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"epoch": 1200.0,
|
105 |
+
"eval_gen_len": 13.5,
|
106 |
+
"eval_loss": 0.39042866230010986,
|
107 |
+
"eval_rouge": 0.1765,
|
108 |
+
"eval_runtime": 10.4367,
|
109 |
+
"eval_samples_per_second": 0.192,
|
110 |
+
"eval_steps_per_second": 0.096,
|
111 |
+
"step": 1200
|
112 |
+
},
|
113 |
+
{
|
114 |
+
"epoch": 1400.0,
|
115 |
+
"grad_norm": 0.019408540800213814,
|
116 |
+
"learning_rate": 6e-06,
|
117 |
+
"loss": 0.0011,
|
118 |
+
"step": 1400
|
119 |
+
},
|
120 |
+
{
|
121 |
+
"epoch": 1400.0,
|
122 |
+
"eval_gen_len": 13.5,
|
123 |
+
"eval_loss": 0.3919622004032135,
|
124 |
+
"eval_rouge": 0.1765,
|
125 |
+
"eval_runtime": 9.8993,
|
126 |
+
"eval_samples_per_second": 0.202,
|
127 |
+
"eval_steps_per_second": 0.101,
|
128 |
+
"step": 1400
|
129 |
}
|
130 |
],
|
131 |
"logging_steps": 200,
|
|
|
133 |
"num_input_tokens_seen": 0,
|
134 |
"num_train_epochs": 2000,
|
135 |
"save_steps": 200,
|
136 |
+
"total_flos": 4096215416832000.0,
|
137 |
"train_batch_size": 16,
|
138 |
"trial_name": null,
|
139 |
"trial_params": null
|