minjibi commited on
Commit
dbfe252
·
1 Parent(s): 8121657

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -0
app.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Importing all the necessary packages
2
+ import nltk
3
+ import librosa
4
+ import torch
5
+ import gradio as gr
6
+ from transformers import Wav2Vec2Tokenizer, Wav2Vec2ForCTC
7
+ nltk.download("punkt")
8
+
9
+ #Loading the pre-trained model and the tokenizer
10
+ model_name = "shizukanabasho/north2"
11
+ tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_name)
12
+ model = Wav2Vec2ForCTC.from_pretrained(model_name)
13
+
14
+ def load_data(input_file):
15
+ #reading the file
16
+ speech, sample_rate = librosa.load(input_file)
17
+ #make it 1-D
18
+ if len(speech.shape) > 1:
19
+ speech = speech[:,0] + speech[:,1]
20
+ #Resampling the audio at 16KHz
21
+ if sample_rate !=16000:
22
+ speech = librosa.resample(speech, sample_rate,16000)
23
+ return speech
24
+
25
+ def correct_casing(input_sentence):
26
+
27
+ sentences = nltk.sent_tokenize(input_sentence)
28
+ return (''.join([s.replace(s[0],s[0].capitalize(),1) for s in sentences]))
29
+
30
+ def asr_transcript(input_file):
31
+
32
+ speech = load_data(input_file)
33
+ #Tokenize
34
+ input_values = tokenizer(speech, return_tensors="pt").input_values
35
+ #Take logits
36
+ logits = model(input_values).logits
37
+ #Take argmax
38
+ predicted_ids = torch.argmax(logits, dim=-1)
39
+ #Get the words from predicted word ids
40
+ transcription = tokenizer.decode(predicted_ids[0])
41
+ #Correcting the letter casing
42
+ # transcription = correct_casing(transcription.lower())
43
+ return transcription
44
+
45
+ gr.Interface(asr_transcript,
46
+ inputs = gr.inputs.Audio(source="upload", type="filepath", optional=True, label="Upload"),
47
+ outputs = gr.outputs.Textbox(label="Output Text"),
48
+ title="ASR using Wav2Vec2.0",
49
+ description = "This application displays transcribed text for given audio input",
50
+ theme="grass").launch()
51
+
52
+ # gr.Interface(asr_transcript,
53
+ # inputs = [gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker"),
54
+ # gr.inputs.Audio(source="upload", type="filepath", optional=True, label="Speaker")],
55
+ # outputs = gr.outputs.Textbox(label="Output Text"),
56
+ # title="ASR using Wav2Vec2.0",
57
+ # description = "This application displays transcribed text for given audio input",
58
+ # theme="grass").launch()