jonatasgrosman
commited on
Commit
·
1ea8798
1
Parent(s):
c01a478
update model
Browse files- README.md +16 -9
- config.json +9 -1
- preprocessor_config.json +1 -0
- pytorch_model.bin +1 -1
- vocab.json +1 -1
README.md
CHANGED
@@ -24,10 +24,10 @@ model-index:
|
|
24 |
metrics:
|
25 |
- name: Test WER
|
26 |
type: wer
|
27 |
-
value: 11.
|
28 |
- name: Test CER
|
29 |
type: cer
|
30 |
-
value: 4.
|
31 |
---
|
32 |
|
33 |
# Wav2Vec2-Large-XLSR-53-Portuguese
|
@@ -49,7 +49,7 @@ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
|
49 |
|
50 |
LANG_ID = "pt"
|
51 |
MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-portuguese"
|
52 |
-
SAMPLES =
|
53 |
|
54 |
test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]")
|
55 |
|
@@ -81,11 +81,16 @@ for i, predicted_sentence in enumerate(predicted_sentences):
|
|
81 |
|
82 |
| Reference | Prediction |
|
83 |
| ------------- | ------------- |
|
84 |
-
| NEM O RADAR NEM OS OUTROS INSTRUMENTOS DETECTARAM O BOMBARDEIRO STEALTH. |
|
85 |
-
| PEDIR DINHEIRO EMPRESTADO ÀS PESSOAS DA ALDEIA |
|
86 |
| OITO | OITO |
|
87 |
-
| TRANCÁ-LOS |
|
88 |
| REALIZAR UMA INVESTIGAÇÃO PARA RESOLVER O PROBLEMA | REALIZAR UMA INVESTIGAÇÃO PARA RESOLVER O PROBLEMA |
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
## Evaluation
|
91 |
|
@@ -102,9 +107,11 @@ LANG_ID = "pt"
|
|
102 |
MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-portuguese"
|
103 |
DEVICE = "cuda"
|
104 |
|
105 |
-
CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
|
106 |
"؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
|
107 |
-
"=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。"
|
|
|
|
|
108 |
|
109 |
test_dataset = load_dataset("common_voice", LANG_ID, split="test")
|
110 |
|
@@ -156,7 +163,7 @@ In the table below I report the Word Error Rate (WER) and the Character Error Ra
|
|
156 |
|
157 |
| Model | WER | CER |
|
158 |
| ------------- | ------------- | ------------- |
|
159 |
-
| jonatasgrosman/wav2vec2-large-xlsr-53-portuguese | **11.
|
160 |
| joorock12/wav2vec2-large-xlsr-portuguese-a | 15.52% | 5.12% |
|
161 |
| joorock12/wav2vec2-large-xlsr-portuguese | 15.95% | 5.31% |
|
162 |
| gchhablani/wav2vec2-large-xlsr-pt | 17.64% | 6.04% |
|
|
|
24 |
metrics:
|
25 |
- name: Test WER
|
26 |
type: wer
|
27 |
+
value: 11.81
|
28 |
- name: Test CER
|
29 |
type: cer
|
30 |
+
value: 4.06
|
31 |
---
|
32 |
|
33 |
# Wav2Vec2-Large-XLSR-53-Portuguese
|
|
|
49 |
|
50 |
LANG_ID = "pt"
|
51 |
MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-portuguese"
|
52 |
+
SAMPLES = 10
|
53 |
|
54 |
test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]")
|
55 |
|
|
|
81 |
|
82 |
| Reference | Prediction |
|
83 |
| ------------- | ------------- |
|
84 |
+
| NEM O RADAR NEM OS OUTROS INSTRUMENTOS DETECTARAM O BOMBARDEIRO STEALTH. | NEM UM VADAMEIM OS SOFTWART'S INSTRUMENTOS DE TETEN UM BAMBEDEIRO STER |
|
85 |
+
| PEDIR DINHEIRO EMPRESTADO ÀS PESSOAS DA ALDEIA | PEDIR GINHEIRO E EMPRESTAR DAS PESSOAS DO ALDEIA |
|
86 |
| OITO | OITO |
|
87 |
+
| TRANCÁ-LOS | TRÃM CALMOS |
|
88 |
| REALIZAR UMA INVESTIGAÇÃO PARA RESOLVER O PROBLEMA | REALIZAR UMA INVESTIGAÇÃO PARA RESOLVER O PROBLEMA |
|
89 |
+
| O YOUTUBE AINDA É A MELHOR PLATAFORMA DE VÍDEOS. | YOUTUBE AINDA É A MELHOR PLATAFORMA DE VÍDEOS |
|
90 |
+
| MENINA E MENINO BEIJANDO NAS SOMBRAS | MENINA E MENINO BEIJANDO NAS SOMBRAS |
|
91 |
+
| EU SOU O SENHOR | EU SOU O SENHOR |
|
92 |
+
| DUAS MULHERES QUE SENTAM-SE PARA BAIXO LENDO JORNAIS. | DUAS MERES QUES SENTAM-SE PARA BAIXO GRANDES DE UM NAIS |
|
93 |
+
| EU ORIGINALMENTE ESPERAVA | EU ORIGINALMENTE ESPERAVA |
|
94 |
|
95 |
## Evaluation
|
96 |
|
|
|
107 |
MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-portuguese"
|
108 |
DEVICE = "cuda"
|
109 |
|
110 |
+
CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
|
111 |
"؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
|
112 |
+
"{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。",
|
113 |
+
"、", "﹂", "﹁", "‧", "~", "﹏", ",", "{", "}", "(", ")", "[", "]", "【", "】", "‥", "〽",
|
114 |
+
"『", "』", "〝", "〟", "⟨", "⟩", "〜", ":", "!", "?", "♪", "؛", "/", "\\", "º", "−", "^", "ʻ", "ˆ"]
|
115 |
|
116 |
test_dataset = load_dataset("common_voice", LANG_ID, split="test")
|
117 |
|
|
|
163 |
|
164 |
| Model | WER | CER |
|
165 |
| ------------- | ------------- | ------------- |
|
166 |
+
| jonatasgrosman/wav2vec2-large-xlsr-53-portuguese | **11.81%** | **4.06%** |
|
167 |
| joorock12/wav2vec2-large-xlsr-portuguese-a | 15.52% | 5.12% |
|
168 |
| joorock12/wav2vec2-large-xlsr-portuguese | 15.95% | 5.31% |
|
169 |
| gchhablani/wav2vec2-large-xlsr-pt | 17.64% | 6.04% |
|
config.json
CHANGED
@@ -7,6 +7,8 @@
|
|
7 |
],
|
8 |
"attention_dropout": 0.1,
|
9 |
"bos_token_id": 1,
|
|
|
|
|
10 |
"conv_bias": true,
|
11 |
"conv_dim": [
|
12 |
512,
|
@@ -37,12 +39,14 @@
|
|
37 |
],
|
38 |
"ctc_loss_reduction": "mean",
|
39 |
"ctc_zero_infinity": true,
|
|
|
40 |
"do_stable_layer_norm": true,
|
41 |
"eos_token_id": 2,
|
42 |
"feat_extract_activation": "gelu",
|
43 |
"feat_extract_dropout": 0.0,
|
44 |
"feat_extract_norm": "layer",
|
45 |
"feat_proj_dropout": 0.05,
|
|
|
46 |
"final_dropout": 0.0,
|
47 |
"gradient_checkpointing": true,
|
48 |
"hidden_act": "gelu",
|
@@ -66,11 +70,15 @@
|
|
66 |
"mask_time_selection": "static",
|
67 |
"model_type": "wav2vec2",
|
68 |
"num_attention_heads": 16,
|
|
|
|
|
69 |
"num_conv_pos_embedding_groups": 16,
|
70 |
"num_conv_pos_embeddings": 128,
|
71 |
"num_feat_extract_layers": 7,
|
72 |
"num_hidden_layers": 24,
|
|
|
73 |
"pad_token_id": 0,
|
74 |
-
"
|
|
|
75 |
"vocab_size": 46
|
76 |
}
|
|
|
7 |
],
|
8 |
"attention_dropout": 0.1,
|
9 |
"bos_token_id": 1,
|
10 |
+
"codevector_dim": 768,
|
11 |
+
"contrastive_logits_temperature": 0.1,
|
12 |
"conv_bias": true,
|
13 |
"conv_dim": [
|
14 |
512,
|
|
|
39 |
],
|
40 |
"ctc_loss_reduction": "mean",
|
41 |
"ctc_zero_infinity": true,
|
42 |
+
"diversity_loss_weight": 0.1,
|
43 |
"do_stable_layer_norm": true,
|
44 |
"eos_token_id": 2,
|
45 |
"feat_extract_activation": "gelu",
|
46 |
"feat_extract_dropout": 0.0,
|
47 |
"feat_extract_norm": "layer",
|
48 |
"feat_proj_dropout": 0.05,
|
49 |
+
"feat_quantizer_dropout": 0.0,
|
50 |
"final_dropout": 0.0,
|
51 |
"gradient_checkpointing": true,
|
52 |
"hidden_act": "gelu",
|
|
|
70 |
"mask_time_selection": "static",
|
71 |
"model_type": "wav2vec2",
|
72 |
"num_attention_heads": 16,
|
73 |
+
"num_codevector_groups": 2,
|
74 |
+
"num_codevectors_per_group": 320,
|
75 |
"num_conv_pos_embedding_groups": 16,
|
76 |
"num_conv_pos_embeddings": 128,
|
77 |
"num_feat_extract_layers": 7,
|
78 |
"num_hidden_layers": 24,
|
79 |
+
"num_negatives": 100,
|
80 |
"pad_token_id": 0,
|
81 |
+
"proj_codevector_dim": 768,
|
82 |
+
"transformers_version": "4.7.0.dev0",
|
83 |
"vocab_size": 46
|
84 |
}
|
preprocessor_config.json
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
{
|
2 |
"do_normalize": true,
|
|
|
3 |
"feature_size": 1,
|
4 |
"padding_side": "right",
|
5 |
"padding_value": 0.0,
|
|
|
1 |
{
|
2 |
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
"padding_value": 0.0,
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1262122455
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a34faf4721b64347b454d6b83095b764f25d6fc32df31908929e85a0080b476
|
3 |
size 1262122455
|
vocab.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "
|
|
|
1 |
+
{"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "'": 5, "-": 6, "A": 7, "B": 8, "C": 9, "D": 10, "E": 11, "F": 12, "G": 13, "H": 14, "I": 15, "J": 16, "K": 17, "L": 18, "M": 19, "N": 20, "O": 21, "P": 22, "Q": 23, "R": 24, "S": 25, "T": 26, "U": 27, "V": 28, "W": 29, "X": 30, "Y": 31, "Z": 32, "À": 33, "Á": 34, "Â": 35, "Ã": 36, "Ç": 37, "É": 38, "Ê": 39, "Í": 40, "Ó": 41, "Ô": 42, "Õ": 43, "Ú": 44, "Ü": 45}
|