Rachid Ammari commited on
Commit
dc1b498
·
1 Parent(s): d414f98

fixed examples

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -7,7 +7,6 @@ wav2vec_fr_model = pipeline("automatic-speech-recognition", model="facebook/wav2
7
  whisper_model = whisper.load_model("base")
8
 
9
  def transcribe_audio(language=None, mic=None, file=None):
10
- print(language)
11
  if mic is not None:
12
  audio = mic
13
  elif file is not None:
@@ -34,7 +33,7 @@ This Space allows easy comparisons for transcribed texts between Facebook's Wav2
34
  (Even if Whisper includes a language detection, here we have decided to select the language to speed up the computation and to focus only on the quality of the transcriptions. The default language is english)
35
  """
36
  article = "Check out [the OpenAI Whisper model](https://github.com/openai/whisper) and [the Facebook Wav2vec model](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) that this demo is based off of."
37
- examples = [["english_sentence.flac"], ["2022-a-Droite-un-fauteuil-pour-trois-3034044.mp3000.mp3"]]
38
 
39
  gr.Interface(
40
  fn=transcribe_audio,
 
7
  whisper_model = whisper.load_model("base")
8
 
9
  def transcribe_audio(language=None, mic=None, file=None):
 
10
  if mic is not None:
11
  audio = mic
12
  elif file is not None:
 
33
  (Even if Whisper includes a language detection, here we have decided to select the language to speed up the computation and to focus only on the quality of the transcriptions. The default language is english)
34
  """
35
  article = "Check out [the OpenAI Whisper model](https://github.com/openai/whisper) and [the Facebook Wav2vec model](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) that this demo is based off of."
36
+ examples = [["en", None, "english_sentence.flac"], ["fr", None, "2022-a-Droite-un-fauteuil-pour-trois-3034044.mp3000.mp3"]]
37
 
38
  gr.Interface(
39
  fn=transcribe_audio,