ssilvera10
commited on
Modelo de ViT Version 1.0.0
Browse files- README.md +2 -1
- all_results.json +9 -9
- eval_results.json +5 -5
- train_results.json +4 -4
README.md
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
license: apache-2.0
|
3 |
base_model: google/vit-base-patch16-224-in21k
|
4 |
tags:
|
|
|
5 |
- generated_from_trainer
|
6 |
metrics:
|
7 |
- accuracy
|
@@ -15,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
15 |
|
16 |
# vit_models
|
17 |
|
18 |
-
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on
|
19 |
It achieves the following results on the evaluation set:
|
20 |
- Loss: 0.0299
|
21 |
- Accuracy: 0.9774
|
|
|
2 |
license: apache-2.0
|
3 |
base_model: google/vit-base-patch16-224-in21k
|
4 |
tags:
|
5 |
+
- image-classification
|
6 |
- generated_from_trainer
|
7 |
metrics:
|
8 |
- accuracy
|
|
|
16 |
|
17 |
# vit_models
|
18 |
|
19 |
+
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset.
|
20 |
It achieves the following results on the evaluation set:
|
21 |
- Loss: 0.0299
|
22 |
- Accuracy: 0.9774
|
all_results.json
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
{
|
2 |
"epoch": 4.0,
|
3 |
-
"eval_accuracy": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_runtime": 3.
|
6 |
-
"eval_samples_per_second":
|
7 |
-
"eval_steps_per_second": 5.
|
8 |
"total_flos": 3.205097416476426e+17,
|
9 |
-
"train_loss": 0.
|
10 |
-
"train_runtime":
|
11 |
-
"train_samples_per_second":
|
12 |
-
"train_steps_per_second": 2.
|
13 |
}
|
|
|
1 |
{
|
2 |
"epoch": 4.0,
|
3 |
+
"eval_accuracy": 0.9774436090225563,
|
4 |
+
"eval_loss": 0.029863391071558,
|
5 |
+
"eval_runtime": 3.137,
|
6 |
+
"eval_samples_per_second": 42.398,
|
7 |
+
"eval_steps_per_second": 5.419,
|
8 |
"total_flos": 3.205097416476426e+17,
|
9 |
+
"train_loss": 0.1251147744174187,
|
10 |
+
"train_runtime": 245.7854,
|
11 |
+
"train_samples_per_second": 16.828,
|
12 |
+
"train_steps_per_second": 2.116
|
13 |
}
|
eval_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 4.0,
|
3 |
-
"eval_accuracy": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_runtime": 3.
|
6 |
-
"eval_samples_per_second":
|
7 |
-
"eval_steps_per_second": 5.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 4.0,
|
3 |
+
"eval_accuracy": 0.9774436090225563,
|
4 |
+
"eval_loss": 0.029863391071558,
|
5 |
+
"eval_runtime": 3.137,
|
6 |
+
"eval_samples_per_second": 42.398,
|
7 |
+
"eval_steps_per_second": 5.419
|
8 |
}
|
train_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 4.0,
|
3 |
"total_flos": 3.205097416476426e+17,
|
4 |
-
"train_loss": 0.
|
5 |
-
"train_runtime":
|
6 |
-
"train_samples_per_second":
|
7 |
-
"train_steps_per_second": 2.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 4.0,
|
3 |
"total_flos": 3.205097416476426e+17,
|
4 |
+
"train_loss": 0.1251147744174187,
|
5 |
+
"train_runtime": 245.7854,
|
6 |
+
"train_samples_per_second": 16.828,
|
7 |
+
"train_steps_per_second": 2.116
|
8 |
}
|