Commit
·
e66a3e4
1
Parent(s):
35fdbb6
update model
Browse files- README.md +13 -6
- config.json +11 -4
- preprocessor_config.json +1 -0
- pytorch_model.bin +2 -2
- vocab.json +1 -1
README.md
CHANGED
|
@@ -24,10 +24,10 @@ model-index:
|
|
| 24 |
metrics:
|
| 25 |
- name: Test WER
|
| 26 |
type: wer
|
| 27 |
-
value:
|
| 28 |
- name: Test CER
|
| 29 |
type: cer
|
| 30 |
-
value:
|
| 31 |
---
|
| 32 |
|
| 33 |
# Wav2Vec2-Large-XLSR-53-Spanish
|
|
@@ -49,7 +49,7 @@ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
|
| 49 |
|
| 50 |
LANG_ID = "es"
|
| 51 |
MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-spanish"
|
| 52 |
-
SAMPLES =
|
| 53 |
|
| 54 |
test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]")
|
| 55 |
|
|
@@ -86,6 +86,11 @@ for i, predicted_sentence in enumerate(predicted_sentences):
|
|
| 86 |
| PARA VISITAR CONTACTAR PRIMERO CON LA DIRECCIÓN. | PARA VISITAR CONTACTAR PRIMERO CON LA DIRECCIÓN |
|
| 87 |
| TRES | TRES |
|
| 88 |
| REALIZÓ LOS ESTUDIOS PRIMARIOS EN FRANCIA, PARA CONTINUAR LUEGO EN ESPAÑA. | REALIZÓ LOS ESTUDIOS PRIMARIOS EN FRANCIA PARA CONTINUAR LUEGO EN ESPAÑA |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
## Evaluation
|
| 91 |
|
|
@@ -102,9 +107,11 @@ LANG_ID = "es"
|
|
| 102 |
MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-spanish"
|
| 103 |
DEVICE = "cuda"
|
| 104 |
|
| 105 |
-
CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
|
| 106 |
"؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
|
| 107 |
-
"=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。"
|
|
|
|
|
|
|
| 108 |
|
| 109 |
test_dataset = load_dataset("common_voice", LANG_ID, split="test")
|
| 110 |
|
|
@@ -156,7 +163,7 @@ In the table below I report the Word Error Rate (WER) and the Character Error Ra
|
|
| 156 |
|
| 157 |
| Model | WER | CER |
|
| 158 |
| ------------- | ------------- | ------------- |
|
| 159 |
-
| jonatasgrosman/wav2vec2-large-xlsr-53-spanish | **
|
| 160 |
| pcuenq/wav2vec2-large-xlsr-53-es | 10.55% | 3.20% |
|
| 161 |
| facebook/wav2vec2-large-xlsr-53-spanish | 16.99% | 5.40% |
|
| 162 |
| mrm8488/wav2vec2-large-xlsr-53-spanish | 19.20% | 5.96% |
|
|
|
|
| 24 |
metrics:
|
| 25 |
- name: Test WER
|
| 26 |
type: wer
|
| 27 |
+
value: 8.81
|
| 28 |
- name: Test CER
|
| 29 |
type: cer
|
| 30 |
+
value: 2.70
|
| 31 |
---
|
| 32 |
|
| 33 |
# Wav2Vec2-Large-XLSR-53-Spanish
|
|
|
|
| 49 |
|
| 50 |
LANG_ID = "es"
|
| 51 |
MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-spanish"
|
| 52 |
+
SAMPLES = 10
|
| 53 |
|
| 54 |
test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]")
|
| 55 |
|
|
|
|
| 86 |
| PARA VISITAR CONTACTAR PRIMERO CON LA DIRECCIÓN. | PARA VISITAR CONTACTAR PRIMERO CON LA DIRECCIÓN |
|
| 87 |
| TRES | TRES |
|
| 88 |
| REALIZÓ LOS ESTUDIOS PRIMARIOS EN FRANCIA, PARA CONTINUAR LUEGO EN ESPAÑA. | REALIZÓ LOS ESTUDIOS PRIMARIOS EN FRANCIA PARA CONTINUAR LUEGO EN ESPAÑA |
|
| 89 |
+
| EN LOS AÑOS QUE SIGUIERON, ESTE TRABAJO ESPARTA PRODUJO DOCENAS DE BUENOS JUGADORES. | EN LOS AÑOS QUE SIGUIERON ESTE TRABAJO ESPARTA PRODUJO DOCENA DE BUENOS JUGADORES |
|
| 90 |
+
| SE ESTÁ TRATANDO DE RECUPERAR SU CULTIVO EN LAS ISLAS CANARIAS. | SE ESTÓ TRATANDO DE RECUPERAR SU CULTIVO EN LAS ISLAS CANARIAS |
|
| 91 |
+
| SÍ | SÍ |
|
| 92 |
+
| "FUE ""SACADA"" DE LA SERIE EN EL EPISODIO ""LEAD"", EN QUE ALEXANDRA CABOT REGRESÓ." | FUE SACADA DE LA SERIE EN EL EPISODIO LEED EN QUE ALEXANDRA KAOT REGRESÓ |
|
| 93 |
+
| SE UBICAN ESPECÍFICAMENTE EN EL VALLE DE MOKA, EN LA PROVINCIA DE BIOKO SUR. | SE UBICAN ESPECÍFICAMENTE EN EL VALLE DE MOCA EN LA PROVINCIA DE PÍOCOSUR |
|
| 94 |
|
| 95 |
## Evaluation
|
| 96 |
|
|
|
|
| 107 |
MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-spanish"
|
| 108 |
DEVICE = "cuda"
|
| 109 |
|
| 110 |
+
CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
|
| 111 |
"؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
|
| 112 |
+
"{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。",
|
| 113 |
+
"、", "﹂", "﹁", "‧", "~", "﹏", ",", "{", "}", "(", ")", "[", "]", "【", "】", "‥", "〽",
|
| 114 |
+
"『", "』", "〝", "〟", "⟨", "⟩", "〜", ":", "!", "?", "♪", "؛", "/", "\\", "º", "−", "^", "ʻ", "ˆ"]
|
| 115 |
|
| 116 |
test_dataset = load_dataset("common_voice", LANG_ID, split="test")
|
| 117 |
|
|
|
|
| 163 |
|
| 164 |
| Model | WER | CER |
|
| 165 |
| ------------- | ------------- | ------------- |
|
| 166 |
+
| jonatasgrosman/wav2vec2-large-xlsr-53-spanish | **8.81%** | **2.70%** |
|
| 167 |
| pcuenq/wav2vec2-large-xlsr-53-es | 10.55% | 3.20% |
|
| 168 |
| facebook/wav2vec2-large-xlsr-53-spanish | 16.99% | 5.40% |
|
| 169 |
| mrm8488/wav2vec2-large-xlsr-53-spanish | 19.20% | 5.96% |
|
config.json
CHANGED
|
@@ -43,20 +43,27 @@
|
|
| 43 |
"feat_extract_dropout": 0.0,
|
| 44 |
"feat_extract_norm": "layer",
|
| 45 |
"feat_proj_dropout": 0.05,
|
| 46 |
-
"final_dropout": 0.
|
| 47 |
"gradient_checkpointing": true,
|
| 48 |
"hidden_act": "gelu",
|
| 49 |
"hidden_dropout": 0.05,
|
| 50 |
-
"hidden_dropout_prob": 0.1,
|
| 51 |
"hidden_size": 1024,
|
| 52 |
"initializer_range": 0.02,
|
| 53 |
"intermediate_size": 4096,
|
| 54 |
"layer_norm_eps": 1e-05,
|
| 55 |
"layerdrop": 0.05,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
"mask_feature_length": 10,
|
| 57 |
"mask_feature_prob": 0.0,
|
| 58 |
"mask_time_length": 10,
|
|
|
|
|
|
|
| 59 |
"mask_time_prob": 0.05,
|
|
|
|
| 60 |
"model_type": "wav2vec2",
|
| 61 |
"num_attention_heads": 16,
|
| 62 |
"num_conv_pos_embedding_groups": 16,
|
|
@@ -64,6 +71,6 @@
|
|
| 64 |
"num_feat_extract_layers": 7,
|
| 65 |
"num_hidden_layers": 24,
|
| 66 |
"pad_token_id": 0,
|
| 67 |
-
"transformers_version": "4.
|
| 68 |
-
"vocab_size":
|
| 69 |
}
|
|
|
|
| 43 |
"feat_extract_dropout": 0.0,
|
| 44 |
"feat_extract_norm": "layer",
|
| 45 |
"feat_proj_dropout": 0.05,
|
| 46 |
+
"final_dropout": 0.0,
|
| 47 |
"gradient_checkpointing": true,
|
| 48 |
"hidden_act": "gelu",
|
| 49 |
"hidden_dropout": 0.05,
|
|
|
|
| 50 |
"hidden_size": 1024,
|
| 51 |
"initializer_range": 0.02,
|
| 52 |
"intermediate_size": 4096,
|
| 53 |
"layer_norm_eps": 1e-05,
|
| 54 |
"layerdrop": 0.05,
|
| 55 |
+
"mask_channel_length": 10,
|
| 56 |
+
"mask_channel_min_space": 1,
|
| 57 |
+
"mask_channel_other": 0.0,
|
| 58 |
+
"mask_channel_prob": 0.0,
|
| 59 |
+
"mask_channel_selection": "static",
|
| 60 |
"mask_feature_length": 10,
|
| 61 |
"mask_feature_prob": 0.0,
|
| 62 |
"mask_time_length": 10,
|
| 63 |
+
"mask_time_min_space": 1,
|
| 64 |
+
"mask_time_other": 0.0,
|
| 65 |
"mask_time_prob": 0.05,
|
| 66 |
+
"mask_time_selection": "static",
|
| 67 |
"model_type": "wav2vec2",
|
| 68 |
"num_attention_heads": 16,
|
| 69 |
"num_conv_pos_embedding_groups": 16,
|
|
|
|
| 71 |
"num_feat_extract_layers": 7,
|
| 72 |
"num_hidden_layers": 24,
|
| 73 |
"pad_token_id": 0,
|
| 74 |
+
"transformers_version": "4.7.0.dev0",
|
| 75 |
+
"vocab_size": 41
|
| 76 |
}
|
preprocessor_config.json
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
{
|
| 2 |
"do_normalize": true,
|
|
|
|
| 3 |
"feature_size": 1,
|
| 4 |
"padding_side": "right",
|
| 5 |
"padding_value": 0.0,
|
|
|
|
| 1 |
{
|
| 2 |
"do_normalize": true,
|
| 3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
| 4 |
"feature_size": 1,
|
| 5 |
"padding_side": "right",
|
| 6 |
"padding_value": 0.0,
|
pytorch_model.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:41c110e55d2eac8c79486ad87dbe8f9527ed034fe087a6adf03c891eeba914c1
|
| 3 |
+
size 1262101911
|
vocab.json
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "
|
|
|
|
| 1 |
+
{"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "'": 5, "-": 6, "A": 7, "B": 8, "C": 9, "D": 10, "E": 11, "F": 12, "G": 13, "H": 14, "I": 15, "J": 16, "K": 17, "L": 18, "M": 19, "N": 20, "O": 21, "P": 22, "Q": 23, "R": 24, "S": 25, "T": 26, "U": 27, "V": 28, "W": 29, "X": 30, "Y": 31, "Z": 32, "Á": 33, "É": 34, "Í": 35, "Ñ": 36, "Ó": 37, "Ö": 38, "Ú": 39, "Ü": 40}
|