Carlos Rosas commited on
Commit
9d65dd7
1 Parent(s): 15ca0cc

Upload 8 files

Browse files
config(3).json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/content/drive/MyDrive/deberta_large",
3
+ "architectures": [
4
+ "DebertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 1024,
10
+ "id2label": {
11
+ "0": "Crime et justice",
12
+ "1": "Incidents et accidents",
13
+ "2": "Faits divers locaux",
14
+ "3": "\u00c9conomie",
15
+ "4": "Climat",
16
+ "5": "Immigration et asile",
17
+ "6": "Politique nationale",
18
+ "7": "Politique locale",
19
+ "8": "Soci\u00e9t\u00e9",
20
+ "9": "\u00c9ducation",
21
+ "10": "Sport",
22
+ "11": "Culture",
23
+ "12": "Sant\u00e9"
24
+ },
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 4096,
27
+ "label2id": {
28
+ "Climat": 4,
29
+ "Crime et justice": 0,
30
+ "Culture": 11,
31
+ "Faits divers locaux": 2,
32
+ "Immigration et asile": 5,
33
+ "Incidents et accidents": 1,
34
+ "Politique locale": 7,
35
+ "Politique nationale": 6,
36
+ "Sant\u00e9": 12,
37
+ "Soci\u00e9t\u00e9": 8,
38
+ "Sport": 10,
39
+ "\u00c9conomie": 3,
40
+ "\u00c9ducation": 9
41
+ },
42
+ "layer_norm_eps": 1e-07,
43
+ "max_position_embeddings": 512,
44
+ "max_relative_positions": -1,
45
+ "model_type": "deberta",
46
+ "num_attention_heads": 16,
47
+ "num_hidden_layers": 24,
48
+ "pad_token_id": 0,
49
+ "pooler_dropout": 0,
50
+ "pooler_hidden_act": "gelu",
51
+ "pooler_hidden_size": 1024,
52
+ "pos_att_type": [
53
+ "c2p",
54
+ "p2c"
55
+ ],
56
+ "position_biased_input": false,
57
+ "problem_type": "multi_label_classification",
58
+ "relative_attention": true,
59
+ "torch_dtype": "float32",
60
+ "transformers_version": "4.46.2",
61
+ "type_vocab_size": 0,
62
+ "vocab_size": 50265
63
+ }
merges(2).txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0df9bb74bc3897e887543ec4529877319af5e85e49aacf1caa889ac912acb38
3
+ size 1624952148
special_tokens_map(1).json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer(1).json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config(3).json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "[PAD]",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "[CLS]",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "[SEP]",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "[UNK]",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "50264": {
38
+ "content": "[MASK]",
39
+ "lstrip": true,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ }
45
+ },
46
+ "bos_token": "[CLS]",
47
+ "clean_up_tokenization_spaces": false,
48
+ "cls_token": "[CLS]",
49
+ "do_lower_case": false,
50
+ "eos_token": "[SEP]",
51
+ "errors": "replace",
52
+ "mask_token": "[MASK]",
53
+ "model_max_length": 1000000000000000019884624838656,
54
+ "pad_token": "[PAD]",
55
+ "sep_token": "[SEP]",
56
+ "tokenizer_class": "DebertaTokenizer",
57
+ "unk_token": "[UNK]",
58
+ "vocab_type": "gpt2"
59
+ }
training_args(1).bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:354e928153fd012d5919f3ea636d5ebf61b052893289ab750dfcefd85f13e78c
3
+ size 5304
vocab(2).json ADDED
The diff for this file is too large to render. See raw diff