fjfur10 commited on
Commit
b36b4fc
·
verified ·
1 Parent(s): 4d2af60

fjfur10/trocr-cs682-2

Browse files
Files changed (5) hide show
  1. README.md +9 -9
  2. config.json +3 -1
  3. generation_config.json +1 -1
  4. model.safetensors +1 -1
  5. training_args.bin +2 -2
README.md CHANGED
@@ -15,8 +15,8 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  This model is a fine-tuned version of [microsoft/trocr-small-handwritten](https://huggingface.co/microsoft/trocr-small-handwritten) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 18.4189
19
- - Cer: 2.3451
20
 
21
  ## Model description
22
 
@@ -39,7 +39,7 @@ The following hyperparameters were used during training:
39
  - train_batch_size: 8
40
  - eval_batch_size: 8
41
  - seed: 42
42
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
  - num_epochs: 3
45
  - mixed_precision_training: Native AMP
@@ -48,13 +48,13 @@ The following hyperparameters were used during training:
48
 
49
  | Training Loss | Epoch | Step | Validation Loss | Cer |
50
  |:-------------:|:-----:|:----:|:---------------:|:------:|
51
- | 18.267 | 1.0 | 9 | 18.4702 | 2.3908 |
52
- | 18.5157 | 2.0 | 18 | 18.4523 | 2.3099 |
53
- | 17.8628 | 3.0 | 27 | 18.4189 | 2.3451 |
54
 
55
 
56
  ### Framework versions
57
 
58
- - Transformers 4.44.2
59
- - Pytorch 2.5.0+cu121
60
- - Tokenizers 0.19.1
 
15
 
16
  This model is a fine-tuned version of [microsoft/trocr-small-handwritten](https://huggingface.co/microsoft/trocr-small-handwritten) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 9.6329
19
+ - Cer: 0.9928
20
 
21
  ## Model description
22
 
 
39
  - train_batch_size: 8
40
  - eval_batch_size: 8
41
  - seed: 42
42
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
  - num_epochs: 3
45
  - mixed_precision_training: Native AMP
 
48
 
49
  | Training Loss | Epoch | Step | Validation Loss | Cer |
50
  |:-------------:|:-----:|:----:|:---------------:|:------:|
51
+ | 14.31 | 1.0 | 184 | 10.6901 | 1.1449 |
52
+ | 12.83 | 2.0 | 368 | 9.7730 | 0.9783 |
53
+ | 12.4054 | 3.0 | 552 | 9.6329 | 0.9928 |
54
 
55
 
56
  ### Framework versions
57
 
58
+ - Transformers 4.46.2
59
+ - Pytorch 2.5.1+cu121
60
+ - Tokenizers 0.20.3
config.json CHANGED
@@ -4,6 +4,7 @@
4
  "VisionEncoderDecoderModel"
5
  ],
6
  "decoder": {
 
7
  "_name_or_path": "",
8
  "activation_dropout": 0.0,
9
  "activation_function": "relu",
@@ -85,6 +86,7 @@
85
  },
86
  "decoder_start_token_id": 0,
87
  "encoder": {
 
88
  "_name_or_path": "",
89
  "add_cross_attention": false,
90
  "architectures": null,
@@ -167,6 +169,6 @@
167
  "pad_token_id": 1,
168
  "tie_word_embeddings": false,
169
  "torch_dtype": "float32",
170
- "transformers_version": "4.44.2",
171
  "vocab_size": 64044
172
  }
 
4
  "VisionEncoderDecoderModel"
5
  ],
6
  "decoder": {
7
+ "_attn_implementation_autoset": false,
8
  "_name_or_path": "",
9
  "activation_dropout": 0.0,
10
  "activation_function": "relu",
 
86
  },
87
  "decoder_start_token_id": 0,
88
  "encoder": {
89
+ "_attn_implementation_autoset": false,
90
  "_name_or_path": "",
91
  "add_cross_attention": false,
92
  "architectures": null,
 
169
  "pad_token_id": 1,
170
  "tie_word_embeddings": false,
171
  "torch_dtype": "float32",
172
+ "transformers_version": "4.46.2",
173
  "vocab_size": 64044
174
  }
generation_config.json CHANGED
@@ -3,6 +3,6 @@
3
  "decoder_start_token_id": 2,
4
  "eos_token_id": 2,
5
  "pad_token_id": 1,
6
- "transformers_version": "4.44.2",
7
  "use_cache": false
8
  }
 
3
  "decoder_start_token_id": 2,
4
  "eos_token_id": 2,
5
  "pad_token_id": 1,
6
+ "transformers_version": "4.46.2",
7
  "use_cache": false
8
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c59932f3453bac762dff97f7ec9f34bdeb880b319ee0a9be2b2381d3d36dcd4
3
  size 246430696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d085ed608848698e18f355b78b570bc70a43d0da95624c611303293c03e1666
3
  size 246430696
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6a2afe01e6cfa975e63451dfc3d5bf3d2cb9baf50a3922c168064c39a8e63ca
3
- size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf23883cb68cb6374fa76b2a5e3189f97e929a60f91285dce7e479e5589c2e97
3
+ size 5368