Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -19,10 +19,23 @@ def get_completion(prompt, model='gpt-3.5-turbo'):
|
|
19 |
return response.choices[0].message['content']
|
20 |
|
21 |
def transcribe(audio):
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
|
|
|
|
|
|
|
|
26 |
|
27 |
|
28 |
|
@@ -38,14 +51,6 @@ def transcribe(audio):
|
|
38 |
# live=True).launch()
|
39 |
|
40 |
|
41 |
-
def speech_to_text(speech):
|
42 |
-
text = asr(speech)["text"]
|
43 |
-
return text
|
44 |
-
|
45 |
-
|
46 |
-
def text_to_sentiment(text):
|
47 |
-
return classifier(text)[0]["label"]
|
48 |
-
|
49 |
|
50 |
demo = gr.Blocks()
|
51 |
|
|
|
19 |
return response.choices[0].message['content']
|
20 |
|
21 |
def transcribe(audio):
|
22 |
+
|
23 |
+
#time.sleep(3)
|
24 |
+
# load audio and pad/trim it to fit 30 seconds
|
25 |
+
audio = whisper.load_audio(audio)
|
26 |
+
audio = whisper.pad_or_trim(audio)
|
27 |
+
|
28 |
+
# make log-Mel spectrogram and move to the same device as the model
|
29 |
+
mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
30 |
+
|
31 |
+
# detect the spoken language
|
32 |
+
_, probs = model.detect_language(mel)
|
33 |
+
print(f"Detected language: {max(probs, key=probs.get)}")
|
34 |
|
35 |
+
# decode the audio
|
36 |
+
options = whisper.DecodingOptions(fp16 = False)
|
37 |
+
result = whisper.decode(model, mel, options)
|
38 |
+
return result.text
|
39 |
|
40 |
|
41 |
|
|
|
51 |
# live=True).launch()
|
52 |
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
demo = gr.Blocks()
|
56 |
|