Adding ONNX file of this model
Browse filesBeep boop I am the [ONNX export bot 🤖🏎️](https://huggingface.co/spaces/onnx/export). On behalf of [moki09](https://huggingface.co/moki09), I would like to add to this repository the model converted to ONNX.
What is ONNX? It stands for "Open Neural Network Exchange", and is the most commonly used open standard for machine learning interoperability. You can find out more at [onnx.ai](https://onnx.ai/)!
The exported ONNX model can be then be consumed by various backends as TensorRT or TVM, or simply be used in a few lines with 🤗 Optimum through ONNX Runtime, check out how [here](https://huggingface.co/docs/optimum/main/en/onnxruntime/usage_guides/models)!
- .gitattributes +1 -0
- onnx/added_tokens.json +5 -0
- onnx/config.json +60 -0
- onnx/decoder_model.onnx +3 -0
- onnx/decoder_model_merged.onnx +3 -0
- onnx/decoder_with_past_model.onnx +3 -0
- onnx/encoder_model.onnx +3 -0
- onnx/generation_config.json +16 -0
- onnx/source.spm +3 -0
- onnx/special_tokens_map.json +5 -0
- onnx/target.spm +0 -0
- onnx/tokenizer_config.json +40 -0
- onnx/vocab.json +0 -0
.gitattributes
CHANGED
@@ -6,3 +6,4 @@
|
|
6 |
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
|
|
|
6 |
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
9 |
+
onnx/source.spm filter=lfs diff=lfs merge=lfs -text
|
onnx/added_tokens.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</s>": 0,
|
3 |
+
"<pad>": 62306,
|
4 |
+
"<unk>": 1
|
5 |
+
}
|
onnx/config.json
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Helsinki-NLP/opus-mt-th-en",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"activation_function": "swish",
|
5 |
+
"add_bias_logits": false,
|
6 |
+
"add_final_layer_norm": false,
|
7 |
+
"architectures": [
|
8 |
+
"MarianMTModel"
|
9 |
+
],
|
10 |
+
"attention_dropout": 0.0,
|
11 |
+
"bad_words_ids": [
|
12 |
+
[
|
13 |
+
62306
|
14 |
+
]
|
15 |
+
],
|
16 |
+
"bos_token_id": 0,
|
17 |
+
"classif_dropout": 0.0,
|
18 |
+
"classifier_dropout": 0.0,
|
19 |
+
"d_model": 512,
|
20 |
+
"decoder_attention_heads": 8,
|
21 |
+
"decoder_ffn_dim": 2048,
|
22 |
+
"decoder_layerdrop": 0.0,
|
23 |
+
"decoder_layers": 6,
|
24 |
+
"decoder_start_token_id": 62306,
|
25 |
+
"decoder_vocab_size": 62307,
|
26 |
+
"dropout": 0.1,
|
27 |
+
"encoder_attention_heads": 8,
|
28 |
+
"encoder_ffn_dim": 2048,
|
29 |
+
"encoder_layerdrop": 0.0,
|
30 |
+
"encoder_layers": 6,
|
31 |
+
"eos_token_id": 0,
|
32 |
+
"extra_pos_embeddings": 62307,
|
33 |
+
"forced_eos_token_id": 0,
|
34 |
+
"id2label": {
|
35 |
+
"0": "LABEL_0",
|
36 |
+
"1": "LABEL_1",
|
37 |
+
"2": "LABEL_2"
|
38 |
+
},
|
39 |
+
"init_std": 0.02,
|
40 |
+
"is_encoder_decoder": true,
|
41 |
+
"label2id": {
|
42 |
+
"LABEL_0": 0,
|
43 |
+
"LABEL_1": 1,
|
44 |
+
"LABEL_2": 2
|
45 |
+
},
|
46 |
+
"max_length": 512,
|
47 |
+
"max_position_embeddings": 512,
|
48 |
+
"model_type": "marian",
|
49 |
+
"normalize_before": false,
|
50 |
+
"normalize_embedding": false,
|
51 |
+
"num_beams": 6,
|
52 |
+
"num_hidden_layers": 6,
|
53 |
+
"pad_token_id": 62306,
|
54 |
+
"scale_embedding": true,
|
55 |
+
"share_encoder_decoder_embeddings": true,
|
56 |
+
"static_position_embeddings": true,
|
57 |
+
"transformers_version": "4.34.0",
|
58 |
+
"use_cache": true,
|
59 |
+
"vocab_size": 62307
|
60 |
+
}
|
onnx/decoder_model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fac3f8c4debec9bc0062a7675e340e08215270bd51f06e8105be48cbb3b97022
|
3 |
+
size 357680260
|
onnx/decoder_model_merged.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91345792847cd4398f8e78d3a6496fd41f55c9f9e4faa8a50798d8baef3166d3
|
3 |
+
size 357915742
|
onnx/decoder_with_past_model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2694b51893ed2284e6c74e3f42d9a9b54da93fa377db27558d01ec8cc408c55
|
3 |
+
size 345029751
|
onnx/encoder_model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ddd655f4ed541dd44106ea29bec4b7c35101b22bebcbd3d13dfab01ec8bec058
|
3 |
+
size 204420908
|
onnx/generation_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bad_words_ids": [
|
3 |
+
[
|
4 |
+
62306
|
5 |
+
]
|
6 |
+
],
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"decoder_start_token_id": 62306,
|
9 |
+
"eos_token_id": 0,
|
10 |
+
"forced_eos_token_id": 0,
|
11 |
+
"max_length": 512,
|
12 |
+
"num_beams": 6,
|
13 |
+
"pad_token_id": 62306,
|
14 |
+
"renormalize_logits": true,
|
15 |
+
"transformers_version": "4.34.0"
|
16 |
+
}
|
onnx/source.spm
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01ecb88d0f92bbb20c21257b92e93e301a10971ecd60971e8e637bc3eacd9124
|
3 |
+
size 1213743
|
onnx/special_tokens_map.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"eos_token": "</s>",
|
3 |
+
"pad_token": "<pad>",
|
4 |
+
"unk_token": "<unk>"
|
5 |
+
}
|
onnx/target.spm
ADDED
Binary file (810 kB). View file
|
|
onnx/tokenizer_config.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "</s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<unk>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"62306": {
|
20 |
+
"content": "<pad>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
}
|
27 |
+
},
|
28 |
+
"additional_special_tokens": [],
|
29 |
+
"clean_up_tokenization_spaces": true,
|
30 |
+
"eos_token": "</s>",
|
31 |
+
"model_max_length": 512,
|
32 |
+
"pad_token": "<pad>",
|
33 |
+
"separate_vocabs": false,
|
34 |
+
"source_lang": "tha",
|
35 |
+
"sp_model_kwargs": {},
|
36 |
+
"target_lang": "eng",
|
37 |
+
"tokenizer_class": "MarianTokenizer",
|
38 |
+
"tokenizer_file": null,
|
39 |
+
"unk_token": "<unk>"
|
40 |
+
}
|
onnx/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|