eclec commited on
Commit
b76c01b
·
1 Parent(s): 774cdc3

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -1,67 +1,34 @@
1
  {
2
- "_name_or_path": "C:\\Research\\haas-mfe-g28-cg\\model\\patentClassfication2\\",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
6
- "attention_mode": "longformer",
7
  "attention_probs_dropout_prob": 0.1,
8
- "attention_window": [
9
- 512,
10
- 512,
11
- 512,
12
- 512,
13
- 512,
14
- 512,
15
- 512,
16
- 512,
17
- 512,
18
- 512,
19
- 512,
20
- 512,
21
- 512,
22
- 512,
23
- 512,
24
- 512,
25
- 512,
26
- 512,
27
- 512,
28
- 512,
29
- 512,
30
- 512,
31
- 512,
32
- 512
33
- ],
34
- "bos_token_id": 0,
35
  "classifier_dropout": null,
36
- "eos_token_id": 2,
37
- "gradient_checkpointing": false,
38
  "hidden_act": "gelu",
39
  "hidden_dropout_prob": 0.1,
40
- "hidden_size": 1024,
41
  "id2label": {
42
  "0": "NOT_REJECTED",
43
  "1": "REJECTED"
44
  },
45
- "ignore_attention_mask": false,
46
  "initializer_range": 0.02,
47
- "intermediate_size": 4096,
48
  "label2id": {
49
  "NOT_REJECTED": 0,
50
  "REJECTED": 1
51
  },
52
- "layer_norm_eps": 1e-05,
53
- "max_position_embeddings": 4098,
54
  "model_type": "bert",
55
- "num_attention_heads": 16,
56
- "num_hidden_layers": 24,
57
- "onnx_export": false,
58
- "pad_token_id": 1,
59
  "position_embedding_type": "absolute",
60
  "problem_type": "single_label_classification",
61
- "sep_token_id": 2,
62
  "torch_dtype": "float32",
63
  "transformers_version": "4.31.0",
64
- "type_vocab_size": 1,
65
  "use_cache": true,
66
- "vocab_size": 50265
67
  }
 
1
  {
2
+ "_name_or_path": "allenai/scibert_scivocab_uncased",
3
  "architectures": [
4
  "BertForSequenceClassification"
5
  ],
 
6
  "attention_probs_dropout_prob": 0.1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "classifier_dropout": null,
 
 
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 768,
11
  "id2label": {
12
  "0": "NOT_REJECTED",
13
  "1": "REJECTED"
14
  },
 
15
  "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
  "label2id": {
18
  "NOT_REJECTED": 0,
19
  "REJECTED": 1
20
  },
21
+ "layer_norm_eps": 1e-12,
22
+ "max_position_embeddings": 512,
23
  "model_type": "bert",
24
+ "num_attention_heads": 12,
25
+ "num_hidden_layers": 12,
26
+ "pad_token_id": 0,
 
27
  "position_embedding_type": "absolute",
28
  "problem_type": "single_label_classification",
 
29
  "torch_dtype": "float32",
30
  "transformers_version": "4.31.0",
31
+ "type_vocab_size": 2,
32
  "use_cache": true,
33
+ "vocab_size": 31090
34
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4e5619f912c51d6e78b8546db09f5d91c81068978d9b751f18a0cc354d6fb2b
3
- size 1436261681
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f2fee94789e720d48dfe18ca40284a4264780e2f0c70de853edcd1940a13bea
3
+ size 439748401
special_tokens_map.json CHANGED
@@ -1,15 +1,7 @@
1
  {
2
- "bos_token": "<s>",
3
- "cls_token": "<s>",
4
- "eos_token": "</s>",
5
- "mask_token": {
6
- "content": "<mask>",
7
- "lstrip": true,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "pad_token": "<pad>",
13
- "sep_token": "</s>",
14
- "unk_token": "<unk>"
15
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
7
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
- "add_prefix_space": false,
3
- "bos_token": "<s>",
4
  "clean_up_tokenization_spaces": true,
5
- "cls_token": "<s>",
6
- "eos_token": "</s>",
7
- "errors": "replace",
8
- "mask_token": "<mask>",
9
- "model_max_length": 4096,
10
- "pad_token": "<pad>",
11
- "sep_token": "</s>",
12
- "tokenizer_class": "LongformerTokenizer",
13
- "trim_offsets": true,
14
- "unk_token": "<unk>"
 
 
15
  }
 
1
  {
 
 
2
  "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 512,
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "strip_accents": null,
12
+ "tokenize_chinese_chars": true,
13
+ "tokenizer_class": "BertTokenizer",
14
+ "unk_token": "[UNK]"
15
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cdab9ac8f8024ad006c6764549fd220ce2f6fdb654551c8bf0004e269af0088a
3
  size 3963
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a80012863f2942aca51e0049c0dd5c36bfc5d5591a468a3fef6847758b3fbece
3
  size 3963