Shresthadev403 commited on
Commit
4261c1a
1 Parent(s): 4897d02

End of training

Browse files
README.md CHANGED
@@ -17,13 +17,13 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
- - eval_loss: 0.7243
21
- - eval_accuracy: 0.8202
22
- - eval_runtime: 153.8982
23
- - eval_samples_per_second: 98.442
24
- - eval_steps_per_second: 6.153
25
- - epoch: 23.23
26
- - step: 22000
27
 
28
  ## Model description
29
 
 
17
 
18
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset.
19
  It achieves the following results on the evaluation set:
20
+ - eval_loss: 0.7594
21
+ - eval_accuracy: 0.8104
22
+ - eval_runtime: 155.7313
23
+ - eval_samples_per_second: 97.283
24
+ - eval_steps_per_second: 6.081
25
+ - epoch: 24.29
26
+ - step: 23000
27
 
28
  ## Model description
29
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1dcec261e11534ea2e4e2de550fb5c3ba9a68d71402198f972c441f52648a96
3
  size 343528508
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dc6f5a578f532ad87106d4cec6c0cdf748aa5b9c3cd35e88f7f8ad52cbb735f
3
  size 343528508
runs/Feb05_03-08-58_983d148b451b/events.out.tfevents.1707102539.983d148b451b.26.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:57a78def9c1f785eb4d7f6bbaa73ab47ee215105a841c610607e72b4365bb855
3
- size 19915
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ce6e73591fdb51be189a8d88f82f51132563b067d67c396fb99cde827572455
3
+ size 20404
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8158415841584158,
3
- "best_model_checkpoint": "food-image-classification/checkpoint-16000",
4
- "epoch": 23.231256599788807,
5
  "eval_steps": 1000,
6
- "global_step": 22000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -337,6 +337,21 @@
337
  "eval_samples_per_second": 98.442,
338
  "eval_steps_per_second": 6.153,
339
  "step": 22000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  }
341
  ],
342
  "logging_steps": 1000,
@@ -344,7 +359,7 @@
344
  "num_input_tokens_seen": 0,
345
  "num_train_epochs": 500,
346
  "save_steps": 1000,
347
- "total_flos": 1.0919126298846044e+20,
348
  "train_batch_size": 16,
349
  "trial_name": null,
350
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8201980198019801,
3
+ "best_model_checkpoint": "food-image-classification/checkpoint-22000",
4
+ "epoch": 24.287222808870116,
5
  "eval_steps": 1000,
6
+ "global_step": 23000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
337
  "eval_samples_per_second": 98.442,
338
  "eval_steps_per_second": 6.153,
339
  "step": 22000
340
+ },
341
+ {
342
+ "epoch": 24.29,
343
+ "learning_rate": 2.4287222808870115e-05,
344
+ "loss": 0.3785,
345
+ "step": 23000
346
+ },
347
+ {
348
+ "epoch": 24.29,
349
+ "eval_accuracy": 0.8103630363036304,
350
+ "eval_loss": 0.7593609690666199,
351
+ "eval_runtime": 155.7313,
352
+ "eval_samples_per_second": 97.283,
353
+ "eval_steps_per_second": 6.081,
354
+ "step": 23000
355
  }
356
  ],
357
  "logging_steps": 1000,
 
359
  "num_input_tokens_seen": 0,
360
  "num_train_epochs": 500,
361
  "save_steps": 1000,
362
+ "total_flos": 1.1415453041911726e+20,
363
  "train_batch_size": 16,
364
  "trial_name": null,
365
  "trial_params": null