vovadevico commited on
Commit
e8dbe09
·
verified ·
1 Parent(s): 35856d1

End of training

Browse files
Files changed (5) hide show
  1. README.md +7 -5
  2. all_results.json +13 -0
  3. eval_results.json +8 -0
  4. train_results.json +8 -0
  5. trainer_state.json +325 -0
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: google/vit-large-patch16-384
5
  tags:
 
 
6
  - generated_from_trainer
7
  datasets:
8
  - imagefolder
@@ -15,7 +17,7 @@ model-index:
15
  name: Image Classification
16
  type: image-classification
17
  dataset:
18
- name: imagefolder
19
  type: imagefolder
20
  config: default
21
  split: train
@@ -23,7 +25,7 @@ model-index:
23
  metrics:
24
  - name: Accuracy
25
  type: accuracy
26
- value: 0.9641970198675497
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -31,10 +33,10 @@ should probably proofread and complete it, then remove this comment. -->
31
 
32
  # fashion-images-perspectives-vit-large-patch16-384-v1
33
 
34
- This model is a fine-tuned version of [google/vit-large-patch16-384](https://huggingface.co/google/vit-large-patch16-384) on the imagefolder dataset.
35
  It achieves the following results on the evaluation set:
36
- - Loss: 0.2290
37
- - Accuracy: 0.9642
38
 
39
  ## Model description
40
 
 
3
  license: apache-2.0
4
  base_model: google/vit-large-patch16-384
5
  tags:
6
+ - image-classification
7
+ - vision
8
  - generated_from_trainer
9
  datasets:
10
  - imagefolder
 
17
  name: Image Classification
18
  type: image-classification
19
  dataset:
20
+ name: touchtech/fashion-images-perspectives-v2
21
  type: imagefolder
22
  config: default
23
  split: train
 
25
  metrics:
26
  - name: Accuracy
27
  type: accuracy
28
+ value: 0.9544701986754967
29
  ---
30
 
31
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
33
 
34
  # fashion-images-perspectives-vit-large-patch16-384-v1
35
 
36
+ This model is a fine-tuned version of [google/vit-large-patch16-384](https://huggingface.co/google/vit-large-patch16-384) on the touchtech/fashion-images-perspectives-v2 dataset.
37
  It achieves the following results on the evaluation set:
38
+ - Loss: 0.1572
39
+ - Accuracy: 0.9545
40
 
41
  ## Model description
42
 
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9544701986754967,
4
+ "eval_loss": 0.15720506012439728,
5
+ "eval_runtime": 447.4514,
6
+ "eval_samples_per_second": 10.799,
7
+ "eval_steps_per_second": 1.35,
8
+ "total_flos": 1.1034252095877513e+20,
9
+ "train_loss": 0.08469340640865429,
10
+ "train_runtime": 22735.4231,
11
+ "train_samples_per_second": 6.021,
12
+ "train_steps_per_second": 0.753
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9544701986754967,
4
+ "eval_loss": 0.15720506012439728,
5
+ "eval_runtime": 447.4514,
6
+ "eval_samples_per_second": 10.799,
7
+ "eval_steps_per_second": 1.35
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 1.1034252095877513e+20,
4
+ "train_loss": 0.08469340640865429,
5
+ "train_runtime": 22735.4231,
6
+ "train_samples_per_second": 6.021,
7
+ "train_steps_per_second": 0.753
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.15720506012439728,
3
+ "best_model_checkpoint": "/workspace/training_output/perspectives-vit-large-patch16-384-v1/checkpoint-3423",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 17115,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.14607069821793747,
13
+ "grad_norm": 2.9760661125183105,
14
+ "learning_rate": 1.941571720712825e-05,
15
+ "loss": 0.4878,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.29214139643587494,
20
+ "grad_norm": 0.5538421869277954,
21
+ "learning_rate": 1.8831434414256503e-05,
22
+ "loss": 0.2583,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.43821209465381245,
27
+ "grad_norm": 8.382941246032715,
28
+ "learning_rate": 1.8247151621384752e-05,
29
+ "loss": 0.2558,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.5842827928717499,
34
+ "grad_norm": 2.5761232376098633,
35
+ "learning_rate": 1.7662868828513e-05,
36
+ "loss": 0.2137,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.7303534910896874,
41
+ "grad_norm": 8.756667137145996,
42
+ "learning_rate": 1.707858603564125e-05,
43
+ "loss": 0.1906,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.8764241893076249,
48
+ "grad_norm": 0.7560040354728699,
49
+ "learning_rate": 1.6494303242769503e-05,
50
+ "loss": 0.1766,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 1.0,
55
+ "eval_accuracy": 0.9544701986754967,
56
+ "eval_loss": 0.15720506012439728,
57
+ "eval_runtime": 454.0694,
58
+ "eval_samples_per_second": 10.642,
59
+ "eval_steps_per_second": 1.33,
60
+ "step": 3423
61
+ },
62
+ {
63
+ "epoch": 1.0224948875255624,
64
+ "grad_norm": 0.2756825089454651,
65
+ "learning_rate": 1.591002044989775e-05,
66
+ "loss": 0.1593,
67
+ "step": 3500
68
+ },
69
+ {
70
+ "epoch": 1.1685655857434998,
71
+ "grad_norm": 14.493992805480957,
72
+ "learning_rate": 1.5325737657026e-05,
73
+ "loss": 0.0946,
74
+ "step": 4000
75
+ },
76
+ {
77
+ "epoch": 1.3146362839614374,
78
+ "grad_norm": 1.4798998832702637,
79
+ "learning_rate": 1.4741454864154251e-05,
80
+ "loss": 0.096,
81
+ "step": 4500
82
+ },
83
+ {
84
+ "epoch": 1.4607069821793748,
85
+ "grad_norm": 0.03895518556237221,
86
+ "learning_rate": 1.41571720712825e-05,
87
+ "loss": 0.1097,
88
+ "step": 5000
89
+ },
90
+ {
91
+ "epoch": 1.6067776803973124,
92
+ "grad_norm": 0.019592493772506714,
93
+ "learning_rate": 1.3572889278410753e-05,
94
+ "loss": 0.1052,
95
+ "step": 5500
96
+ },
97
+ {
98
+ "epoch": 1.7528483786152498,
99
+ "grad_norm": 0.019370147958397865,
100
+ "learning_rate": 1.2988606485539002e-05,
101
+ "loss": 0.115,
102
+ "step": 6000
103
+ },
104
+ {
105
+ "epoch": 1.8989190768331872,
106
+ "grad_norm": 2.880488634109497,
107
+ "learning_rate": 1.2404323692667253e-05,
108
+ "loss": 0.1108,
109
+ "step": 6500
110
+ },
111
+ {
112
+ "epoch": 2.0,
113
+ "eval_accuracy": 0.9579884105960265,
114
+ "eval_loss": 0.1967967003583908,
115
+ "eval_runtime": 457.9224,
116
+ "eval_samples_per_second": 10.552,
117
+ "eval_steps_per_second": 1.319,
118
+ "step": 6846
119
+ },
120
+ {
121
+ "epoch": 2.044989775051125,
122
+ "grad_norm": 0.12547972798347473,
123
+ "learning_rate": 1.1820040899795502e-05,
124
+ "loss": 0.0913,
125
+ "step": 7000
126
+ },
127
+ {
128
+ "epoch": 2.1910604732690624,
129
+ "grad_norm": 0.01506296917796135,
130
+ "learning_rate": 1.123575810692375e-05,
131
+ "loss": 0.0423,
132
+ "step": 7500
133
+ },
134
+ {
135
+ "epoch": 2.3371311714869996,
136
+ "grad_norm": 19.676223754882812,
137
+ "learning_rate": 1.0651475314052001e-05,
138
+ "loss": 0.048,
139
+ "step": 8000
140
+ },
141
+ {
142
+ "epoch": 2.483201869704937,
143
+ "grad_norm": 0.19931885600090027,
144
+ "learning_rate": 1.006719252118025e-05,
145
+ "loss": 0.0416,
146
+ "step": 8500
147
+ },
148
+ {
149
+ "epoch": 2.629272567922875,
150
+ "grad_norm": 0.01677100360393524,
151
+ "learning_rate": 9.482909728308503e-06,
152
+ "loss": 0.0493,
153
+ "step": 9000
154
+ },
155
+ {
156
+ "epoch": 2.775343266140812,
157
+ "grad_norm": 0.008517206646502018,
158
+ "learning_rate": 8.898626935436752e-06,
159
+ "loss": 0.044,
160
+ "step": 9500
161
+ },
162
+ {
163
+ "epoch": 2.9214139643587496,
164
+ "grad_norm": 0.006620448548346758,
165
+ "learning_rate": 8.314344142565001e-06,
166
+ "loss": 0.0456,
167
+ "step": 10000
168
+ },
169
+ {
170
+ "epoch": 3.0,
171
+ "eval_accuracy": 0.9606788079470199,
172
+ "eval_loss": 0.2058100551366806,
173
+ "eval_runtime": 454.6414,
174
+ "eval_samples_per_second": 10.628,
175
+ "eval_steps_per_second": 1.329,
176
+ "step": 10269
177
+ },
178
+ {
179
+ "epoch": 3.067484662576687,
180
+ "grad_norm": 0.0024903384037315845,
181
+ "learning_rate": 7.730061349693252e-06,
182
+ "loss": 0.0234,
183
+ "step": 10500
184
+ },
185
+ {
186
+ "epoch": 3.213555360794625,
187
+ "grad_norm": 0.015993278473615646,
188
+ "learning_rate": 7.1457785568215025e-06,
189
+ "loss": 0.0075,
190
+ "step": 11000
191
+ },
192
+ {
193
+ "epoch": 3.359626059012562,
194
+ "grad_norm": 0.027190232649445534,
195
+ "learning_rate": 6.561495763949752e-06,
196
+ "loss": 0.0112,
197
+ "step": 11500
198
+ },
199
+ {
200
+ "epoch": 3.5056967572304996,
201
+ "grad_norm": 0.010302845388650894,
202
+ "learning_rate": 5.977212971078002e-06,
203
+ "loss": 0.0188,
204
+ "step": 12000
205
+ },
206
+ {
207
+ "epoch": 3.651767455448437,
208
+ "grad_norm": 0.0008801922085694969,
209
+ "learning_rate": 5.392930178206253e-06,
210
+ "loss": 0.0189,
211
+ "step": 12500
212
+ },
213
+ {
214
+ "epoch": 3.7978381536663743,
215
+ "grad_norm": 12.479740142822266,
216
+ "learning_rate": 4.808647385334502e-06,
217
+ "loss": 0.025,
218
+ "step": 13000
219
+ },
220
+ {
221
+ "epoch": 3.943908851884312,
222
+ "grad_norm": 0.0013448131503537297,
223
+ "learning_rate": 4.224364592462753e-06,
224
+ "loss": 0.0118,
225
+ "step": 13500
226
+ },
227
+ {
228
+ "epoch": 4.0,
229
+ "eval_accuracy": 0.9633692052980133,
230
+ "eval_loss": 0.2165926992893219,
231
+ "eval_runtime": 447.8313,
232
+ "eval_samples_per_second": 10.79,
233
+ "eval_steps_per_second": 1.349,
234
+ "step": 13692
235
+ },
236
+ {
237
+ "epoch": 4.08997955010225,
238
+ "grad_norm": 0.0009766396833583713,
239
+ "learning_rate": 3.6400817995910027e-06,
240
+ "loss": 0.012,
241
+ "step": 14000
242
+ },
243
+ {
244
+ "epoch": 4.236050248320187,
245
+ "grad_norm": 0.0005415708874352276,
246
+ "learning_rate": 3.055799006719252e-06,
247
+ "loss": 0.0037,
248
+ "step": 14500
249
+ },
250
+ {
251
+ "epoch": 4.382120946538125,
252
+ "grad_norm": 0.0017334007425233722,
253
+ "learning_rate": 2.4715162138475024e-06,
254
+ "loss": 0.007,
255
+ "step": 15000
256
+ },
257
+ {
258
+ "epoch": 4.5281916447560615,
259
+ "grad_norm": 0.00246521458029747,
260
+ "learning_rate": 1.8872334209757523e-06,
261
+ "loss": 0.0058,
262
+ "step": 15500
263
+ },
264
+ {
265
+ "epoch": 4.674262342973999,
266
+ "grad_norm": 0.0018645540112629533,
267
+ "learning_rate": 1.3029506281040024e-06,
268
+ "loss": 0.0066,
269
+ "step": 16000
270
+ },
271
+ {
272
+ "epoch": 4.820333041191937,
273
+ "grad_norm": 0.0014726301888003945,
274
+ "learning_rate": 7.186678352322525e-07,
275
+ "loss": 0.0047,
276
+ "step": 16500
277
+ },
278
+ {
279
+ "epoch": 4.966403739409874,
280
+ "grad_norm": 0.0010302780428901315,
281
+ "learning_rate": 1.3438504236050248e-07,
282
+ "loss": 0.007,
283
+ "step": 17000
284
+ },
285
+ {
286
+ "epoch": 5.0,
287
+ "eval_accuracy": 0.9641970198675497,
288
+ "eval_loss": 0.22900521755218506,
289
+ "eval_runtime": 454.8427,
290
+ "eval_samples_per_second": 10.623,
291
+ "eval_steps_per_second": 1.328,
292
+ "step": 17115
293
+ },
294
+ {
295
+ "epoch": 5.0,
296
+ "step": 17115,
297
+ "total_flos": 1.1034252095877513e+20,
298
+ "train_loss": 0.08469340640865429,
299
+ "train_runtime": 22735.4231,
300
+ "train_samples_per_second": 6.021,
301
+ "train_steps_per_second": 0.753
302
+ }
303
+ ],
304
+ "logging_steps": 500,
305
+ "max_steps": 17115,
306
+ "num_input_tokens_seen": 0,
307
+ "num_train_epochs": 5,
308
+ "save_steps": 500,
309
+ "stateful_callbacks": {
310
+ "TrainerControl": {
311
+ "args": {
312
+ "should_epoch_stop": false,
313
+ "should_evaluate": false,
314
+ "should_log": false,
315
+ "should_save": true,
316
+ "should_training_stop": true
317
+ },
318
+ "attributes": {}
319
+ }
320
+ },
321
+ "total_flos": 1.1034252095877513e+20,
322
+ "train_batch_size": 8,
323
+ "trial_name": null,
324
+ "trial_params": null
325
+ }