JohnsonManuel commited on
Commit
28be9e8
1 Parent(s): d3c3317

Added Timestamps option

Browse files
Files changed (1) hide show
  1. app.py +19 -6
app.py CHANGED
@@ -5,21 +5,34 @@ import gradio as gr
5
  model = whisper.load_model('base')
6
 
7
 
8
- def transcribe(inputs, task):
9
- if inputs is None:
10
- raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
11
 
 
 
 
 
12
  result = model.transcribe(inputs)
13
- return result["text"]
 
 
 
 
 
 
 
 
 
 
 
14
 
15
 
16
  interface = gr.Interface(
17
  fn=transcribe,
18
- inputs=gr.Audio(sources=["upload"],type="filepath"),
 
19
  outputs="text",
20
  title="Whisper Large V3: Transcribe Audio",
21
  description=(
22
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
23
  )
24
  )
25
 
 
5
  model = whisper.load_model('base')
6
 
7
 
 
 
 
8
 
9
+ def transcribe(inputs , timestamp):
10
+ if inputs is None:
11
+ raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
12
+ output = ""
13
  result = model.transcribe(inputs)
14
+ if timestamp == "Yes":
15
+ for indx, segment in enumerate(result['segments']):
16
+ output += str(datetime.timedelta (seconds=segment['start'])) +" "+ str(datetime.timedelta (seconds=segment['end'])) + "\n"
17
+ output += segment['text'].strip() + '\n'
18
+ else:
19
+ output = result["text"]
20
+
21
+
22
+ print(result)
23
+ return output
24
+
25
+
26
 
27
 
28
  interface = gr.Interface(
29
  fn=transcribe,
30
+ inputs=[gr.Audio(sources=["upload"],type="filepath"),
31
+ gr.Radio(["Yes", "No"], label="Timestamp", info="Displays with timestamp if needed."),],
32
  outputs="text",
33
  title="Whisper Large V3: Transcribe Audio",
34
  description=(
35
+ "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper API"
36
  )
37
  )
38