Harini2506 commited on
Commit
3c5bd16
·
1 Parent(s): dcf81fc

Training in progress, epoch 1

Browse files
config.json CHANGED
@@ -1,10 +1,9 @@
1
  {
2
- "_name_or_path": "allenai/scibert_scivocab_uncased",
3
  "architectures": [
4
- "BertForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
- "classifier_dropout": null,
8
  "hidden_act": "gelu",
9
  "hidden_dropout_prob": 0.1,
10
  "hidden_size": 768,
@@ -44,16 +43,24 @@
44
  "I-TaskName": 14,
45
  "O": 0
46
  },
47
- "layer_norm_eps": 1e-12,
48
  "max_position_embeddings": 512,
49
- "model_type": "bert",
 
50
  "num_attention_heads": 12,
51
  "num_hidden_layers": 12,
52
  "pad_token_id": 0,
53
- "position_embedding_type": "absolute",
 
 
 
 
 
 
 
 
54
  "torch_dtype": "float32",
55
  "transformers_version": "4.34.1",
56
- "type_vocab_size": 2,
57
- "use_cache": true,
58
- "vocab_size": 31090
59
  }
 
1
  {
2
+ "_name_or_path": "microsoft/deberta-base",
3
  "architectures": [
4
+ "DebertaForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
 
7
  "hidden_act": "gelu",
8
  "hidden_dropout_prob": 0.1,
9
  "hidden_size": 768,
 
43
  "I-TaskName": 14,
44
  "O": 0
45
  },
46
+ "layer_norm_eps": 1e-07,
47
  "max_position_embeddings": 512,
48
+ "max_relative_positions": -1,
49
+ "model_type": "deberta",
50
  "num_attention_heads": 12,
51
  "num_hidden_layers": 12,
52
  "pad_token_id": 0,
53
+ "pooler_dropout": 0,
54
+ "pooler_hidden_act": "gelu",
55
+ "pooler_hidden_size": 768,
56
+ "pos_att_type": [
57
+ "c2p",
58
+ "p2c"
59
+ ],
60
+ "position_biased_input": false,
61
+ "relative_attention": true,
62
  "torch_dtype": "float32",
63
  "transformers_version": "4.34.1",
64
+ "type_vocab_size": 0,
65
+ "vocab_size": 50265
 
66
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad55fd07b8a5b59ba0195e7e68561848a440ce2f9f40f77dece63da645a4d146
3
- size 437425830
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec0561a321fd1592d0606df589bd8fd4a2b5b2ab660b584f8db0afc057aced8c
3
+ size 554521634
special_tokens_map.json CHANGED
@@ -1,7 +1,51 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
 
1
  {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "[PAD]",
@@ -8,50 +10,50 @@
8
  "single_word": false,
9
  "special": true
10
  },
11
- "101": {
12
- "content": "[UNK]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
16
  "single_word": false,
17
  "special": true
18
  },
19
- "102": {
20
- "content": "[CLS]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
26
  },
27
- "103": {
28
- "content": "[SEP]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
- "104": {
36
  "content": "[MASK]",
37
- "lstrip": false,
38
- "normalized": false,
39
  "rstrip": false,
40
  "single_word": false,
41
  "special": true
42
  }
43
  },
 
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
- "do_basic_tokenize": true,
47
- "do_lower_case": true,
 
48
  "mask_token": "[MASK]",
49
- "model_max_length": 1000000000000000019884624838656,
50
- "never_split": null,
51
  "pad_token": "[PAD]",
52
  "sep_token": "[SEP]",
53
- "strip_accents": null,
54
- "tokenize_chinese_chars": true,
55
- "tokenizer_class": "BertTokenizer",
56
- "unk_token": "[UNK]"
57
  }
 
1
  {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": true,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "[PAD]",
 
10
  "single_word": false,
11
  "special": true
12
  },
13
+ "1": {
14
+ "content": "[CLS]",
15
  "lstrip": false,
16
  "normalized": false,
17
  "rstrip": false,
18
  "single_word": false,
19
  "special": true
20
  },
21
+ "2": {
22
+ "content": "[SEP]",
23
  "lstrip": false,
24
  "normalized": false,
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
28
  },
29
+ "3": {
30
+ "content": "[UNK]",
31
  "lstrip": false,
32
  "normalized": false,
33
  "rstrip": false,
34
  "single_word": false,
35
  "special": true
36
  },
37
+ "50264": {
38
  "content": "[MASK]",
39
+ "lstrip": true,
40
+ "normalized": true,
41
  "rstrip": false,
42
  "single_word": false,
43
  "special": true
44
  }
45
  },
46
+ "bos_token": "[CLS]",
47
  "clean_up_tokenization_spaces": true,
48
  "cls_token": "[CLS]",
49
+ "do_lower_case": false,
50
+ "eos_token": "[SEP]",
51
+ "errors": "replace",
52
  "mask_token": "[MASK]",
53
+ "model_max_length": 512,
 
54
  "pad_token": "[PAD]",
55
  "sep_token": "[SEP]",
56
+ "tokenizer_class": "DebertaTokenizer",
57
+ "unk_token": "[UNK]",
58
+ "vocab_type": "gpt2"
 
59
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a65c0149f7b70d180dda004a54af8972847a206043e4caa7c0a6d6c611a2f52c
3
  size 4472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf901de1a38305f3848943331c905ae4f6f2a1a4915e92cf15f364a91042a83b
3
  size 4472
vocab.json CHANGED
The diff for this file is too large to render. See raw diff
 
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff