nyust-eb210
commited on
Commit
•
a07b945
1
Parent(s):
70840cc
Update file structure from vincentwu
Browse files- tf_model/config.json → config.json +0 -0
- pt_model/config.json +0 -30
- pt_model/tokenizer_config.json +0 -1
- pt_model/pytorch_model.bin → pytorch_model.bin +0 -0
- pt_model/special_tokens_map.json → special_tokens_map.json +0 -0
- tf_model/tf_model.h5 → tf_model.h5 +0 -0
- tf_model/special_tokens_map.json +0 -1
- tf_model/vocab.txt +0 -0
- tf_model/tokenizer_config.json → tokenizer_config.json +0 -0
- pt_model/vocab.txt → vocab.txt +0 -0
tf_model/config.json → config.json
RENAMED
File without changes
|
pt_model/config.json
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_name_or_path": "tf_model",
|
3 |
-
"architectures": [
|
4 |
-
"BertForQuestionAnswering"
|
5 |
-
],
|
6 |
-
"attention_probs_dropout_prob": 0.1,
|
7 |
-
"directionality": "bidi",
|
8 |
-
"gradient_checkpointing": false,
|
9 |
-
"hidden_act": "gelu",
|
10 |
-
"hidden_dropout_prob": 0.1,
|
11 |
-
"hidden_size": 768,
|
12 |
-
"initializer_range": 0.02,
|
13 |
-
"intermediate_size": 3072,
|
14 |
-
"layer_norm_eps": 1e-12,
|
15 |
-
"max_position_embeddings": 512,
|
16 |
-
"model_type": "bert",
|
17 |
-
"num_attention_heads": 12,
|
18 |
-
"num_hidden_layers": 12,
|
19 |
-
"pad_token_id": 0,
|
20 |
-
"pooler_fc_size": 768,
|
21 |
-
"pooler_num_attention_heads": 12,
|
22 |
-
"pooler_num_fc_layers": 3,
|
23 |
-
"pooler_size_per_head": 128,
|
24 |
-
"pooler_type": "first_token_transform",
|
25 |
-
"position_embedding_type": "absolute",
|
26 |
-
"transformers_version": "4.5.0",
|
27 |
-
"type_vocab_size": 2,
|
28 |
-
"use_cache": true,
|
29 |
-
"vocab_size": 21128
|
30 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pt_model/tokenizer_config.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "bert-base-chinese"}
|
|
|
|
pt_model/pytorch_model.bin → pytorch_model.bin
RENAMED
File without changes
|
pt_model/special_tokens_map.json → special_tokens_map.json
RENAMED
File without changes
|
tf_model/tf_model.h5 → tf_model.h5
RENAMED
File without changes
|
tf_model/special_tokens_map.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
|
|
|
tf_model/vocab.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|
tf_model/tokenizer_config.json → tokenizer_config.json
RENAMED
File without changes
|
pt_model/vocab.txt → vocab.txt
RENAMED
File without changes
|