Narsil HF staff commited on
Commit
309cd45
1 Parent(s): 21a9660

Initial commit

Browse files
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/nicolas/data/weights/distilbert/distil_bert_for_sequence_classification",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 32,
9
+ "dropout": 0.1,
10
+ "hidden_act": "gelu",
11
+ "hidden_dim": 37,
12
+ "initializer_range": 0.02,
13
+ "max_position_embeddings": 512,
14
+ "model_type": "distilbert",
15
+ "n_heads": 4,
16
+ "n_layers": 5,
17
+ "pad_token_id": 0,
18
+ "qa_dropout": 0.1,
19
+ "seq_classif_dropout": 0.2,
20
+ "sinusoidal_pos_embds": false,
21
+ "transformers_version": "4.10.0.dev0",
22
+ "vocab_size": 99
23
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9a77cb35d6dfbcb9342b918ae53021b9f36515875273b70683252809360a9f3
3
+ size 245213
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c98f13faf1076bba8af527e266da2bb43ffc862c84608de6c832933cd0a1830
3
+ size 331628
tokenizer.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"version":"1.0","truncation":null,"padding":null,"added_tokens":[{"id":0,"special":true,"content":"[PAD]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false},{"id":1,"special":true,"content":"[UNK]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false},{"id":2,"special":true,"content":"[CLS]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false},{"id":3,"special":true,"content":"[SEP]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false},{"id":4,"special":true,"content":"[MASK]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false}],"normalizer":{"type":"BertNormalizer","clean_text":true,"handle_chinese_chars":true,"strip_accents":null,"lowercase":true},"pre_tokenizer":{"type":"BertPreTokenizer"},"post_processor":{"type":"TemplateProcessing","single":[{"SpecialToken":{"id":"[CLS]","type_id":0}},{"Sequence":{"id":"A","type_id":0}},{"SpecialToken":{"id":"[SEP]","type_id":0}}],"pair":[{"SpecialToken":{"id":"[CLS]","type_id":0}},{"Sequence":{"id":"A","type_id":0}},{"SpecialToken":{"id":"[SEP]","type_id":0}},{"Sequence":{"id":"B","type_id":1}},{"SpecialToken":{"id":"[SEP]","type_id":1}}],"special_tokens":{"[CLS]":{"id":"[CLS]","ids":[2],"tokens":["[CLS]"]},"[SEP]":{"id":"[SEP]","ids":[3],"tokens":["[SEP]"]}}},"decoder":{"type":"WordPiece","prefix":"##","cleanup":true},"model":{"type":"WordPiece","unk_token":"[UNK]","continuing_subword_prefix":"##","max_input_chars_per_word":100,"vocab":{"[PAD]":0,"[UNK]":1,"[CLS]":2,"[SEP]":3,"[MASK]":4,"0":5,"1":6,"2":7,"3":8,"4":9,"5":10,"6":11,"7":12,"8":13,"9":14,"a":15,"b":16,"c":17,"d":18,"e":19,"f":20,"g":21,"h":22,"i":23,"j":24,"k":25,"l":26,"m":27,"n":28,"o":29,"p":30,"q":31,"r":32,"s":33,"t":34,"u":35,"v":36,"w":37,"x":38,"y":39,"z":40}}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "name_or_path": "distilbert-base-uncased", "tokenizer_class": "DistilBertTokenizer"}
vocab.txt ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [PAD]
2
+ [UNK]
3
+ [CLS]
4
+ [SEP]
5
+ [MASK]
6
+ 0
7
+ 1
8
+ 2
9
+ 3
10
+ 4
11
+ 5
12
+ 6
13
+ 7
14
+ 8
15
+ 9
16
+ a
17
+ b
18
+ c
19
+ d
20
+ e
21
+ f
22
+ g
23
+ h
24
+ i
25
+ j
26
+ k
27
+ l
28
+ m
29
+ n
30
+ o
31
+ p
32
+ q
33
+ r
34
+ s
35
+ t
36
+ u
37
+ v
38
+ w
39
+ x
40
+ y
41
+ z