Training in progress, step 4770
Browse files- model.safetensors +1 -1
- run-3/checkpoint-4770/model.safetensors +1 -1
- run-3/checkpoint-4770/optimizer.pt +1 -1
- run-3/checkpoint-4770/scheduler.pt +1 -1
- run-3/checkpoint-4770/trainer_state.json +107 -107
- run-3/checkpoint-4770/training_args.bin +1 -1
- runs/Oct20_13-24-54_87443764e281/events.out.tfevents.1729433136.87443764e281.307.3 +3 -0
- training_args.bin +1 -1
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 268290900
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb3d9d9b7904e5c4b2b4e5f8e2e04d0d88e148d3c050c56c1c175fa004a5e190
|
3 |
size 268290900
|
run-3/checkpoint-4770/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 268290900
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb3d9d9b7904e5c4b2b4e5f8e2e04d0d88e148d3c050c56c1c175fa004a5e190
|
3 |
size 268290900
|
run-3/checkpoint-4770/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 536643898
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d273212006c666110dba1b4525d9539d8db16079acce1aa3213059046e396c29
|
3 |
size 536643898
|
run-3/checkpoint-4770/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:797c8d7d26e9180dee2526f838ce1bd0f7cdff0bf714d2114e0bcc548438283e
|
3 |
size 1064
|
run-3/checkpoint-4770/trainer_state.json
CHANGED
@@ -10,233 +10,233 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.9968553459119497,
|
13 |
-
"grad_norm": 0.
|
14 |
-
"learning_rate": 0.
|
15 |
-
"loss": 0.
|
16 |
"step": 317
|
17 |
},
|
18 |
{
|
19 |
"epoch": 1.0,
|
20 |
-
"eval_accuracy": 0.
|
21 |
-
"eval_loss": 0.
|
22 |
-
"eval_runtime": 5.
|
23 |
-
"eval_samples_per_second":
|
24 |
-
"eval_steps_per_second":
|
25 |
"step": 318
|
26 |
},
|
27 |
{
|
28 |
"epoch": 1.9937106918238994,
|
29 |
-
"grad_norm": 0.
|
30 |
-
"learning_rate": 0.
|
31 |
-
"loss": 0.
|
32 |
"step": 634
|
33 |
},
|
34 |
{
|
35 |
"epoch": 2.0,
|
36 |
"eval_accuracy": 0.03225806451612903,
|
37 |
-
"eval_loss": 0.
|
38 |
-
"eval_runtime": 5.
|
39 |
-
"eval_samples_per_second":
|
40 |
-
"eval_steps_per_second":
|
41 |
"step": 636
|
42 |
},
|
43 |
{
|
44 |
"epoch": 2.990566037735849,
|
45 |
-
"grad_norm": 0.
|
46 |
-
"learning_rate": 0.
|
47 |
-
"loss": 0.
|
48 |
"step": 951
|
49 |
},
|
50 |
{
|
51 |
"epoch": 3.0,
|
52 |
"eval_accuracy": 0.03225806451612903,
|
53 |
-
"eval_loss": 0.
|
54 |
-
"eval_runtime": 5.
|
55 |
-
"eval_samples_per_second":
|
56 |
-
"eval_steps_per_second":
|
57 |
"step": 954
|
58 |
},
|
59 |
{
|
60 |
"epoch": 3.9874213836477987,
|
61 |
-
"grad_norm": 0.
|
62 |
-
"learning_rate": 0.
|
63 |
-
"loss": 0.
|
64 |
"step": 1268
|
65 |
},
|
66 |
{
|
67 |
"epoch": 4.0,
|
68 |
"eval_accuracy": 0.03225806451612903,
|
69 |
-
"eval_loss": 0.
|
70 |
-
"eval_runtime": 5.
|
71 |
-
"eval_samples_per_second":
|
72 |
-
"eval_steps_per_second": 12.
|
73 |
"step": 1272
|
74 |
},
|
75 |
{
|
76 |
"epoch": 4.984276729559748,
|
77 |
-
"grad_norm": 0.
|
78 |
-
"learning_rate": 0.
|
79 |
-
"loss": 0.
|
80 |
"step": 1585
|
81 |
},
|
82 |
{
|
83 |
"epoch": 5.0,
|
84 |
"eval_accuracy": 0.03225806451612903,
|
85 |
-
"eval_loss": 0.
|
86 |
-
"eval_runtime": 5.
|
87 |
-
"eval_samples_per_second":
|
88 |
-
"eval_steps_per_second":
|
89 |
"step": 1590
|
90 |
},
|
91 |
{
|
92 |
"epoch": 5.981132075471698,
|
93 |
-
"grad_norm": 0.
|
94 |
-
"learning_rate": 0.
|
95 |
-
"loss": 0.
|
96 |
"step": 1902
|
97 |
},
|
98 |
{
|
99 |
"epoch": 6.0,
|
100 |
"eval_accuracy": 0.03225806451612903,
|
101 |
-
"eval_loss": 0.
|
102 |
-
"eval_runtime": 5.
|
103 |
-
"eval_samples_per_second":
|
104 |
-
"eval_steps_per_second":
|
105 |
"step": 1908
|
106 |
},
|
107 |
{
|
108 |
"epoch": 6.977987421383648,
|
109 |
-
"grad_norm": 0.
|
110 |
-
"learning_rate": 0.
|
111 |
-
"loss": 0.
|
112 |
"step": 2219
|
113 |
},
|
114 |
{
|
115 |
"epoch": 7.0,
|
116 |
"eval_accuracy": 0.03225806451612903,
|
117 |
-
"eval_loss": 0.
|
118 |
-
"eval_runtime": 5.
|
119 |
-
"eval_samples_per_second":
|
120 |
-
"eval_steps_per_second": 12.
|
121 |
"step": 2226
|
122 |
},
|
123 |
{
|
124 |
"epoch": 7.9748427672955975,
|
125 |
-
"grad_norm": 0.
|
126 |
-
"learning_rate": 0.
|
127 |
-
"loss": 0.
|
128 |
"step": 2536
|
129 |
},
|
130 |
{
|
131 |
"epoch": 8.0,
|
132 |
"eval_accuracy": 0.03225806451612903,
|
133 |
-
"eval_loss": 0.
|
134 |
-
"eval_runtime": 5.
|
135 |
-
"eval_samples_per_second":
|
136 |
-
"eval_steps_per_second": 12.
|
137 |
"step": 2544
|
138 |
},
|
139 |
{
|
140 |
"epoch": 8.971698113207546,
|
141 |
-
"grad_norm": 0.
|
142 |
-
"learning_rate": 0.
|
143 |
-
"loss": 0.
|
144 |
"step": 2853
|
145 |
},
|
146 |
{
|
147 |
"epoch": 9.0,
|
148 |
"eval_accuracy": 0.03225806451612903,
|
149 |
-
"eval_loss": 0.
|
150 |
-
"eval_runtime": 5.
|
151 |
-
"eval_samples_per_second":
|
152 |
-
"eval_steps_per_second":
|
153 |
"step": 2862
|
154 |
},
|
155 |
{
|
156 |
"epoch": 9.968553459119496,
|
157 |
-
"grad_norm": 0.
|
158 |
-
"learning_rate": 0.
|
159 |
-
"loss": 0.
|
160 |
"step": 3170
|
161 |
},
|
162 |
{
|
163 |
"epoch": 10.0,
|
164 |
"eval_accuracy": 0.03225806451612903,
|
165 |
-
"eval_loss": 0.
|
166 |
-
"eval_runtime": 5.
|
167 |
-
"eval_samples_per_second":
|
168 |
-
"eval_steps_per_second":
|
169 |
"step": 3180
|
170 |
},
|
171 |
{
|
172 |
"epoch": 10.965408805031446,
|
173 |
-
"grad_norm": 0.
|
174 |
-
"learning_rate": 0.
|
175 |
-
"loss": 0.
|
176 |
"step": 3487
|
177 |
},
|
178 |
{
|
179 |
"epoch": 11.0,
|
180 |
"eval_accuracy": 0.03225806451612903,
|
181 |
-
"eval_loss": 0.
|
182 |
-
"eval_runtime": 5.
|
183 |
-
"eval_samples_per_second":
|
184 |
-
"eval_steps_per_second": 12.
|
185 |
"step": 3498
|
186 |
},
|
187 |
{
|
188 |
"epoch": 11.962264150943396,
|
189 |
-
"grad_norm": 0.
|
190 |
-
"learning_rate":
|
191 |
-
"loss": 0.
|
192 |
"step": 3804
|
193 |
},
|
194 |
{
|
195 |
"epoch": 12.0,
|
196 |
"eval_accuracy": 0.03225806451612903,
|
197 |
-
"eval_loss": 0.
|
198 |
-
"eval_runtime": 5.
|
199 |
-
"eval_samples_per_second":
|
200 |
-
"eval_steps_per_second": 12.
|
201 |
"step": 3816
|
202 |
},
|
203 |
{
|
204 |
"epoch": 12.959119496855346,
|
205 |
-
"grad_norm": 0.
|
206 |
-
"learning_rate":
|
207 |
-
"loss": 0.
|
208 |
"step": 4121
|
209 |
},
|
210 |
{
|
211 |
"epoch": 13.0,
|
212 |
"eval_accuracy": 0.03225806451612903,
|
213 |
-
"eval_loss": 0.
|
214 |
-
"eval_runtime": 5.
|
215 |
-
"eval_samples_per_second":
|
216 |
-
"eval_steps_per_second":
|
217 |
"step": 4134
|
218 |
},
|
219 |
{
|
220 |
"epoch": 13.955974842767295,
|
221 |
-
"grad_norm": 0.
|
222 |
-
"learning_rate":
|
223 |
-
"loss": 0.
|
224 |
"step": 4438
|
225 |
},
|
226 |
{
|
227 |
"epoch": 14.0,
|
228 |
"eval_accuracy": 0.03225806451612903,
|
229 |
-
"eval_loss": 0.
|
230 |
-
"eval_runtime": 5.
|
231 |
-
"eval_samples_per_second":
|
232 |
-
"eval_steps_per_second":
|
233 |
"step": 4452
|
234 |
},
|
235 |
{
|
236 |
"epoch": 14.952830188679245,
|
237 |
-
"grad_norm": 0.
|
238 |
-
"learning_rate":
|
239 |
-
"loss": 0.
|
240 |
"step": 4755
|
241 |
}
|
242 |
],
|
@@ -261,11 +261,11 @@
|
|
261 |
"train_batch_size": 48,
|
262 |
"trial_name": null,
|
263 |
"trial_params": {
|
264 |
-
"alpha": 0.
|
265 |
-
"learning_rate": 0.
|
266 |
-
"lr_scheduler_type": "
|
267 |
"num_train_epochs": 15,
|
268 |
-
"temperature":
|
269 |
-
"weight_decay": 0.
|
270 |
}
|
271 |
}
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.9968553459119497,
|
13 |
+
"grad_norm": 0.007859878242015839,
|
14 |
+
"learning_rate": 0.0007934093547399718,
|
15 |
+
"loss": 0.5931,
|
16 |
"step": 317
|
17 |
},
|
18 |
{
|
19 |
"epoch": 1.0,
|
20 |
+
"eval_accuracy": 0.03225806451612903,
|
21 |
+
"eval_loss": 0.5877403020858765,
|
22 |
+
"eval_runtime": 5.3692,
|
23 |
+
"eval_samples_per_second": 577.366,
|
24 |
+
"eval_steps_per_second": 12.106,
|
25 |
"step": 318
|
26 |
},
|
27 |
{
|
28 |
"epoch": 1.9937106918238994,
|
29 |
+
"grad_norm": 0.013005654327571392,
|
30 |
+
"learning_rate": 0.0007676597858899992,
|
31 |
+
"loss": 0.5933,
|
32 |
"step": 634
|
33 |
},
|
34 |
{
|
35 |
"epoch": 2.0,
|
36 |
"eval_accuracy": 0.03225806451612903,
|
37 |
+
"eval_loss": 0.5876849293708801,
|
38 |
+
"eval_runtime": 5.3979,
|
39 |
+
"eval_samples_per_second": 574.293,
|
40 |
+
"eval_steps_per_second": 12.042,
|
41 |
"step": 636
|
42 |
},
|
43 |
{
|
44 |
"epoch": 2.990566037735849,
|
45 |
+
"grad_norm": 0.011686289682984352,
|
46 |
+
"learning_rate": 0.0007259882616863973,
|
47 |
+
"loss": 0.5936,
|
48 |
"step": 951
|
49 |
},
|
50 |
{
|
51 |
"epoch": 3.0,
|
52 |
"eval_accuracy": 0.03225806451612903,
|
53 |
+
"eval_loss": 0.5876944661140442,
|
54 |
+
"eval_runtime": 5.3376,
|
55 |
+
"eval_samples_per_second": 580.786,
|
56 |
+
"eval_steps_per_second": 12.178,
|
57 |
"step": 954
|
58 |
},
|
59 |
{
|
60 |
"epoch": 3.9874213836477987,
|
61 |
+
"grad_norm": 0.010250881314277649,
|
62 |
+
"learning_rate": 0.0006702046329072582,
|
63 |
+
"loss": 0.5932,
|
64 |
"step": 1268
|
65 |
},
|
66 |
{
|
67 |
"epoch": 4.0,
|
68 |
"eval_accuracy": 0.03225806451612903,
|
69 |
+
"eval_loss": 0.5876566171646118,
|
70 |
+
"eval_runtime": 5.3623,
|
71 |
+
"eval_samples_per_second": 578.11,
|
72 |
+
"eval_steps_per_second": 12.122,
|
73 |
"step": 1272
|
74 |
},
|
75 |
{
|
76 |
"epoch": 4.984276729559748,
|
77 |
+
"grad_norm": 0.013469184748828411,
|
78 |
+
"learning_rate": 0.0006027316581600536,
|
79 |
+
"loss": 0.594,
|
80 |
"step": 1585
|
81 |
},
|
82 |
{
|
83 |
"epoch": 5.0,
|
84 |
"eval_accuracy": 0.03225806451612903,
|
85 |
+
"eval_loss": 0.5876731276512146,
|
86 |
+
"eval_runtime": 5.3565,
|
87 |
+
"eval_samples_per_second": 578.738,
|
88 |
+
"eval_steps_per_second": 12.135,
|
89 |
"step": 1590
|
90 |
},
|
91 |
{
|
92 |
"epoch": 5.981132075471698,
|
93 |
+
"grad_norm": 0.015176467597484589,
|
94 |
+
"learning_rate": 0.0005264997801914848,
|
95 |
+
"loss": 0.5936,
|
96 |
"step": 1902
|
97 |
},
|
98 |
{
|
99 |
"epoch": 6.0,
|
100 |
"eval_accuracy": 0.03225806451612903,
|
101 |
+
"eval_loss": 0.5876610279083252,
|
102 |
+
"eval_runtime": 5.3967,
|
103 |
+
"eval_samples_per_second": 574.42,
|
104 |
+
"eval_steps_per_second": 12.044,
|
105 |
"step": 1908
|
106 |
},
|
107 |
{
|
108 |
"epoch": 6.977987421383648,
|
109 |
+
"grad_norm": 0.01225706934928894,
|
110 |
+
"learning_rate": 0.0004448198527870465,
|
111 |
+
"loss": 0.593,
|
112 |
"step": 2219
|
113 |
},
|
114 |
{
|
115 |
"epoch": 7.0,
|
116 |
"eval_accuracy": 0.03225806451612903,
|
117 |
+
"eval_loss": 0.5876200795173645,
|
118 |
+
"eval_runtime": 5.386,
|
119 |
+
"eval_samples_per_second": 575.564,
|
120 |
+
"eval_steps_per_second": 12.068,
|
121 |
"step": 2226
|
122 |
},
|
123 |
{
|
124 |
"epoch": 7.9748427672955975,
|
125 |
+
"grad_norm": 0.014281037263572216,
|
126 |
+
"learning_rate": 0.00036123934590356535,
|
127 |
+
"loss": 0.5938,
|
128 |
"step": 2536
|
129 |
},
|
130 |
{
|
131 |
"epoch": 8.0,
|
132 |
"eval_accuracy": 0.03225806451612903,
|
133 |
+
"eval_loss": 0.5876643061637878,
|
134 |
+
"eval_runtime": 5.34,
|
135 |
+
"eval_samples_per_second": 580.525,
|
136 |
+
"eval_steps_per_second": 12.172,
|
137 |
"step": 2544
|
138 |
},
|
139 |
{
|
140 |
"epoch": 8.971698113207546,
|
141 |
+
"grad_norm": 0.020413335412740707,
|
142 |
+
"learning_rate": 0.0002793882742407039,
|
143 |
+
"loss": 0.5934,
|
144 |
"step": 2853
|
145 |
},
|
146 |
{
|
147 |
"epoch": 9.0,
|
148 |
"eval_accuracy": 0.03225806451612903,
|
149 |
+
"eval_loss": 0.5876378417015076,
|
150 |
+
"eval_runtime": 5.3343,
|
151 |
+
"eval_samples_per_second": 581.141,
|
152 |
+
"eval_steps_per_second": 12.185,
|
153 |
"step": 2862
|
154 |
},
|
155 |
{
|
156 |
"epoch": 9.968553459119496,
|
157 |
+
"grad_norm": 0.010601122863590717,
|
158 |
+
"learning_rate": 0.00020282154078240177,
|
159 |
+
"loss": 0.5935,
|
160 |
"step": 3170
|
161 |
},
|
162 |
{
|
163 |
"epoch": 10.0,
|
164 |
"eval_accuracy": 0.03225806451612903,
|
165 |
+
"eval_loss": 0.5876396298408508,
|
166 |
+
"eval_runtime": 5.3546,
|
167 |
+
"eval_samples_per_second": 578.942,
|
168 |
+
"eval_steps_per_second": 12.139,
|
169 |
"step": 3180
|
170 |
},
|
171 |
{
|
172 |
"epoch": 10.965408805031446,
|
173 |
+
"grad_norm": 0.015482204966247082,
|
174 |
+
"learning_rate": 0.00013486454254193946,
|
175 |
+
"loss": 0.5936,
|
176 |
"step": 3487
|
177 |
},
|
178 |
{
|
179 |
"epoch": 11.0,
|
180 |
"eval_accuracy": 0.03225806451612903,
|
181 |
+
"eval_loss": 0.5875952839851379,
|
182 |
+
"eval_runtime": 5.3966,
|
183 |
+
"eval_samples_per_second": 574.436,
|
184 |
+
"eval_steps_per_second": 12.045,
|
185 |
"step": 3498
|
186 |
},
|
187 |
{
|
188 |
"epoch": 11.962264150943396,
|
189 |
+
"grad_norm": 0.007544202264398336,
|
190 |
+
"learning_rate": 7.846874406237966e-05,
|
191 |
+
"loss": 0.5932,
|
192 |
"step": 3804
|
193 |
},
|
194 |
{
|
195 |
"epoch": 12.0,
|
196 |
"eval_accuracy": 0.03225806451612903,
|
197 |
+
"eval_loss": 0.587660014629364,
|
198 |
+
"eval_runtime": 5.388,
|
199 |
+
"eval_samples_per_second": 575.35,
|
200 |
+
"eval_steps_per_second": 12.064,
|
201 |
"step": 3816
|
202 |
},
|
203 |
{
|
204 |
"epoch": 12.959119496855346,
|
205 |
+
"grad_norm": 0.015037346631288528,
|
206 |
+
"learning_rate": 3.608349131102299e-05,
|
207 |
+
"loss": 0.594,
|
208 |
"step": 4121
|
209 |
},
|
210 |
{
|
211 |
"epoch": 13.0,
|
212 |
"eval_accuracy": 0.03225806451612903,
|
213 |
+
"eval_loss": 0.5875839591026306,
|
214 |
+
"eval_runtime": 5.3407,
|
215 |
+
"eval_samples_per_second": 580.45,
|
216 |
+
"eval_steps_per_second": 12.171,
|
217 |
"step": 4134
|
218 |
},
|
219 |
{
|
220 |
"epoch": 13.955974842767295,
|
221 |
+
"grad_norm": 0.013815987855196,
|
222 |
+
"learning_rate": 9.549633264184268e-06,
|
223 |
+
"loss": 0.593,
|
224 |
"step": 4438
|
225 |
},
|
226 |
{
|
227 |
"epoch": 14.0,
|
228 |
"eval_accuracy": 0.03225806451612903,
|
229 |
+
"eval_loss": 0.5875993371009827,
|
230 |
+
"eval_runtime": 5.3398,
|
231 |
+
"eval_samples_per_second": 580.541,
|
232 |
+
"eval_steps_per_second": 12.173,
|
233 |
"step": 4452
|
234 |
},
|
235 |
{
|
236 |
"epoch": 14.952830188679245,
|
237 |
+
"grad_norm": 0.010695732198655605,
|
238 |
+
"learning_rate": 1.9571341049241364e-08,
|
239 |
+
"loss": 0.5934,
|
240 |
"step": 4755
|
241 |
}
|
242 |
],
|
|
|
261 |
"train_batch_size": 48,
|
262 |
"trial_name": null,
|
263 |
"trial_params": {
|
264 |
+
"alpha": 0.5457565605433671,
|
265 |
+
"learning_rate": 0.0008021186295599815,
|
266 |
+
"lr_scheduler_type": "cosine",
|
267 |
"num_train_epochs": 15,
|
268 |
+
"temperature": 13.83793993486481,
|
269 |
+
"weight_decay": 0.09136269626429569
|
270 |
}
|
271 |
}
|
run-3/checkpoint-4770/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5240
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:897f112baf000af50343a07b648e18d94170f7867db99d9989dad76b8d4ae6a7
|
3 |
size 5240
|
runs/Oct20_13-24-54_87443764e281/events.out.tfevents.1729433136.87443764e281.307.3
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:742b6d0d21ad38e949126f20c1628a16142201b4bae170fb91e5943c5cd936f2
|
3 |
+
size 20825
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5240
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:897f112baf000af50343a07b648e18d94170f7867db99d9989dad76b8d4ae6a7
|
3 |
size 5240
|