barbaroo commited on
Commit
f9935f2
1 Parent(s): 502159a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -15,7 +15,7 @@ else:
15
  p = pipeline("automatic-speech-recognition",
16
  model="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-faroese-100h")
17
 
18
- chunk_size = 30 # Adjust the chunk size as needed
19
 
20
  def transcribe(audio, state="", uploaded_audio=None):
21
  if uploaded_audio is not None:
@@ -25,11 +25,15 @@ def transcribe(audio, state="", uploaded_audio=None):
25
 
26
  try:
27
  state += "Transcribing...\n"
28
- chunks = [audio[i:i + chunk_size] for i in range(0, len(audio), chunk_size)]
29
- for chunk in chunks:
30
- text = p(chunk)["text"]
31
  state += text + "\n"
32
- time.sleep(1) # Simulate processing time for each chunk
 
 
 
 
 
33
  return state, state
34
  except Exception as e:
35
  return "An error occurred during transcription.", state # Handle other exceptions
@@ -47,3 +51,4 @@ gr.Interface(
47
  ],
48
  live=True
49
  ).launch()
 
 
15
  p = pipeline("automatic-speech-recognition",
16
  model="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-faroese-100h")
17
 
18
+ chunk_size = 10 # Adjust the chunk size as needed
19
 
20
  def transcribe(audio, state="", uploaded_audio=None):
21
  if uploaded_audio is not None:
 
25
 
26
  try:
27
  state += "Transcribing...\n"
28
+ if len(audio) <= chunk_size:
29
+ text = p(audio)["text"]
 
30
  state += text + "\n"
31
+ else:
32
+ chunks = [audio[i:i + chunk_size] for i in range(0, len(audio), chunk_size)]
33
+ for chunk in chunks:
34
+ text = p(chunk)["text"]
35
+ state += text + "\n"
36
+ time.sleep(1) # Simulate processing time for each chunk
37
  return state, state
38
  except Exception as e:
39
  return "An error occurred during transcription.", state # Handle other exceptions
 
51
  ],
52
  live=True
53
  ).launch()
54
+