rushabhGod commited on
Commit
6b2b386
1 Parent(s): b248799

End of training

Browse files
README.md CHANGED
@@ -15,19 +15,19 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  This model is a fine-tuned version of [SCUT-DLVCLab/lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - eval_loss: 1.7170
19
- - eval_ANSWER: {'precision': 0.8775510204081632, 'recall': 0.8947368421052632, 'f1': 0.886060606060606, 'number': 817}
20
- - eval_HEADER: {'precision': 0.4644808743169399, 'recall': 0.7142857142857143, 'f1': 0.5629139072847683, 'number': 119}
21
- - eval_QUESTION: {'precision': 0.8934348239771646, 'recall': 0.871866295264624, 'f1': 0.8825187969924814, 'number': 1077}
22
- - eval_overall_precision: 0.8491
23
- - eval_overall_recall: 0.8718
24
- - eval_overall_f1: 0.8603
25
- - eval_overall_accuracy: 0.7983
26
- - eval_runtime: 59.4689
27
- - eval_samples_per_second: 0.841
28
- - eval_steps_per_second: 0.118
29
- - epoch: 84.2105
30
- - step: 1600
31
 
32
  ## Model description
33
 
 
15
 
16
  This model is a fine-tuned version of [SCUT-DLVCLab/lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - eval_loss: 1.6706
19
+ - eval_ANSWER: {'precision': 0.875, 'recall': 0.9082007343941249, 'f1': 0.8912912912912913, 'number': 817}
20
+ - eval_HEADER: {'precision': 0.6666666666666666, 'recall': 0.5714285714285714, 'f1': 0.6153846153846153, 'number': 119}
21
+ - eval_QUESTION: {'precision': 0.8880931065353626, 'recall': 0.9210770659238626, 'f1': 0.9042844120328168, 'number': 1077}
22
+ - eval_overall_precision: 0.8718
23
+ - eval_overall_recall: 0.8952
24
+ - eval_overall_f1: 0.8833
25
+ - eval_overall_accuracy: 0.8026
26
+ - eval_runtime: 50.8863
27
+ - eval_samples_per_second: 0.983
28
+ - eval_steps_per_second: 0.138
29
+ - epoch: 90.8421
30
+ - step: 1726
31
 
32
  ## Model description
33
 
logs/events.out.tfevents.1721227049.ACER1278.6244.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd81fc53f616f44a086375a215389a50d85231ea0da672c82d27b9d5adc0db41
3
- size 10950
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efb660c49a89ba57532a47eabd69e23f7c6c0299f3c591277200ca94735e4742
3
+ size 11454
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "apply_ocr": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "LayoutLMv3FeatureExtractor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "ocr_lang": null,
18
+ "processor_class": "LayoutLMv3Processor",
19
+ "resample": 2,
20
+ "rescale_factor": 0.00392156862745098,
21
+ "size": {
22
+ "height": 224,
23
+ "width": 224
24
+ },
25
+ "tesseract_config": ""
26
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": true,
47
+ "cls_token": "<s>",
48
+ "cls_token_box": [
49
+ 0,
50
+ 0,
51
+ 0,
52
+ 0
53
+ ],
54
+ "eos_token": "</s>",
55
+ "errors": "replace",
56
+ "mask_token": "<mask>",
57
+ "model_max_length": 512,
58
+ "only_label_first_subword": true,
59
+ "pad_token": "<pad>",
60
+ "pad_token_box": [
61
+ 0,
62
+ 0,
63
+ 0,
64
+ 0
65
+ ],
66
+ "pad_token_label": -100,
67
+ "processor_class": "LayoutLMv3Processor",
68
+ "sep_token": "</s>",
69
+ "sep_token_box": [
70
+ 0,
71
+ 0,
72
+ 0,
73
+ 0
74
+ ],
75
+ "tokenizer_class": "LayoutLMv3Tokenizer",
76
+ "trim_offsets": true,
77
+ "unk_token": "<unk>"
78
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff