cati commited on
Commit
d1c998d
1 Parent(s): 90f256f
Files changed (3) hide show
  1. .#ctcalign.py +0 -1
  2. app.py +9 -5
  3. ctcalign.py +29 -9
.#ctcalign.py DELETED
@@ -1 +0,0 @@
1
 
 
app.py CHANGED
@@ -65,13 +65,17 @@ All phoneme durations are measured automatically with no human correction. The p
65
  """
66
  )
67
 
68
- audio_file = gr.Audio(type="filepath")
69
- transcript_boxx = gr.Textbox(label="Transcript",placeholder="Type or paste the transcript here. Capitalisation and punctuation, if any, will be ignored.")
70
- alangmenu = gr.Radio(["Icelandic", "Faroese", "Norwegian"],value="Icelandic")
 
71
 
72
- al_btn = gr.Button(value="Run forced alignment")
73
 
74
- output_box = gr.Textbox(label="Forced alignment output")
 
 
 
75
 
76
 
77
 
 
65
  """
66
  )
67
 
68
+ with gr.Row():
69
+ with gr.Column():
70
+ transcript_boxx = gr.Textbox(label="Transcript",placeholder="Type or paste the transcript here. Capitalisation and punctuation, if any, will be ignored.")
71
+ alangmenu = gr.Radio(["Icelandic", "Faroese", "Norwegian"],value="Icelandic")
72
 
73
+ audio_file = gr.Audio(type="filepath")
74
 
75
+ al_btn = gr.Button(value="Run forced alignment")
76
+
77
+ with gr.Column():
78
+ output_box = gr.Textbox(label="Forced alignment output")
79
 
80
 
81
 
ctcalign.py CHANGED
@@ -11,23 +11,43 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
  torch.random.manual_seed(0)
12
 
13
  # info: https://huggingface.co/carlosdanielhernandezmena/wav2vec2-large-xlsr-53-icelandic-ep10-1000h/blob/main/vocab.json
14
- MODEL_PATH="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-icelandic-ep10-1000h"
15
- model_blank_token = '[PAD]' # important to know for CTC decoding
16
- model_word_separator = '|'
17
- labels_dict = {"f": 0, "a": 1, "é": 2, "t": 3, "o": 4, "n": 5, "e": 6, "y": 8, "k": 9, "j": 10, "u": 11, "d": 12, "w": 13, "l": 14, "ú": 15, "q": 16, "g": 17, "í": 18, "s": 19, "r": 20, "ý": 21, "i": 22, "z": 23, "m": 24, "h": 25, "ó": 26, "þ": 27, "æ": 28, "c": 29, "á": 30, "v": 31, "b": 32, "ð": 33, "x": 34, "ö": 35, "p": 36, "|": 7, "[UNK]": 37, "[PAD]": 38}
18
 
19
- model = Wav2Vec2ForCTC.from_pretrained(MODEL_PATH).to(device)
20
- processor = Wav2Vec2Processor.from_pretrained(MODEL_PATH)
21
- inverse_dict = {v:k for k,v in labels_dict.items()}
22
- all_labels = tuple(labels_dict.keys())
23
- blank_id = labels_dict[model_blank_token]
24
 
25
 
26
 
 
 
 
 
27
 
 
 
 
 
 
28
 
29
 
30
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
 
33
 
 
11
  torch.random.manual_seed(0)
12
 
13
  # info: https://huggingface.co/carlosdanielhernandezmena/wav2vec2-large-xlsr-53-icelandic-ep10-1000h/blob/main/vocab.json
14
+ is_MODEL_PATH="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-icelandic-ep10-1000h"
15
+ is_model_blank_token = '[PAD]' # important to know for CTC decoding
16
+ is_model_word_separator = '|'
17
+ is_labels_dict = {"f": 0, "a": 1, "é": 2, "t": 3, "o": 4, "n": 5, "e": 6, "y": 8, "k": 9, "j": 10, "u": 11, "d": 12, "w": 13, "l": 14, "ú": 15, "q": 16, "g": 17, "í": 18, "s": 19, "r": 20, "ý": 21, "i": 22, "z": 23, "m": 24, "h": 25, "ó": 26, "þ": 27, "æ": 28, "c": 29, "á": 30, "v": 31, "b": 32, "ð": 33, "x": 34, "ö": 35, "p": 36, "|": 7, "[UNK]": 37, "[PAD]": 38}
18
 
19
+ is_model = Wav2Vec2ForCTC.from_pretrained(is_MODEL_PATH).to(device)
20
+ is_processor = Wav2Vec2Processor.from_pretrained(is_MODEL_PATH)
21
+ is_inverse_dict = {v:k for k,v in is_labels_dict.items()}
22
+ is_all_labels = tuple(is_labels_dict.keys())
23
+ is_blank_id = is_labels_dict[is_model_blank_token]
24
 
25
 
26
 
27
+ fo_MODEL_PATH="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-faroese-100h"
28
+ fo_model_blank_token = '[PAD]' # important to know for CTC decoding
29
+ fo_model_word_separator = '|'
30
+ fo_labels_dict = {"w": 0, "i": 1, "6": 2, "s": 3, "_": 4, "k": 5, "l": 6, "ú": 7, "2": 8, "4": 9, "d": 10, "z": 11, "3": 12, "ð": 13, "t": 15, "ø": 16, "x": 17, "p": 18, "o": 19, "æ": 20, "n": 21, "f": 22, "á": 23, "5": 24, "g": 25, "ý": 26, "r": 27, "é": 28, "u": 29, "ü": 30, "y": 31, "í": 32, "h": 33, "q": 34, "b": 35, "e": 36, "v": 37, "-": 38, "c": 39, "j": 40, ".": 41, "ó": 42, "'": 43, "m": 44, "a": 45, "|": 14, "[UNK]": 46, "[PAD]": 47}
31
 
32
+ fo_model = Wav2Vec2ForCTC.from_pretrained(fo_MODEL_PATH).to(device)
33
+ fo_processor = Wav2Vec2Processor.from_pretrained(fo_MODEL_PATH)
34
+ fo_inverse_dict = {v:k for k,v in fo_labels_dict.items()}
35
+ fo_all_labels = tuple(fo_labels_dict.keys())
36
+ fo_blank_id = fo_labels_dict[fo_model_blank_token]
37
 
38
 
39
 
40
+ no_MODEL_PATH="NbAiLab/nb-wav2vec2-1b-bokmaal"
41
+ no_model_blank_token = '[PAD]' # important to know for CTC decoding
42
+ no_model_word_separator = '|'
43
+ no_labels_dict = {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "q": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "å": 27, "æ": 28, "ø": 29, "|": 0, "[UNK]": 30, "[PAD]": 31}
44
+
45
+ no_model = Wav2Vec2ForCTC.from_pretrained(no_MODEL_PATH).to(device)
46
+ no_processor = Wav2Vec2Processor.from_pretrained(no_MODEL_PATH)
47
+ no_inverse_dict = {v:k for k,v in no_labels_dict.items()}
48
+ no_all_labels = tuple(no_labels_dict.keys())
49
+ no_blank_id = no_labels_dict[no_model_blank_token]
50
+
51
 
52
 
53