File size: 2,068 Bytes
1e40d63
 
 
5f3740d
 
 
 
 
 
 
 
 
 
 
c518990
1e40d63
 
0667117
5f3740d
 
 
1e40d63
5f3740d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr


def to_audioClassification():
    return {
      audio_classification: gr.Row(visible=True),
      realtime_classification: gr.Row(visible=False),
    }
    
def to_realtimeAudioClassification():
    return {
      audio_classification: gr.Row(visible=False),
      realtime_classification: gr.Row(visible=True),
    }


with gr.Blocks() as demo:

    with gr.Row():
      btn0 = gr.Button("Audio Classification", scale=1, size='lg')
      btn1 = gr.Button("Realtime Audio Classification", scale=1, size='lg')

    with gr.Row(visible=False) as audio_classification:
          with gr.Column(min_width=700):
                with gr.Accordion("Record an Audio", open=True):
                  inputRecord = gr.Audio(label="Audio Input", source="microphone", type="filepath")
                with gr.Accordion("Upload a file", open=False):
                  inputUpload = gr.Audio(label="Audio Input", source="upload", type="filepath")
                clearBtn = gr.ClearButton([inputRecord, inputUpload])
          with gr.Column(min_width=700):
                output = gr.Label(label="Audio Classification")
                btn = gr.Button(value="Generate Audio")
                audioOutput = gr.Audio(label="Audio Output", interactive=False)


    with gr.Row(visible=False) as realtime_classification:
          with gr.Column(min_width=700):
                input = gr.Audio(label="Audio Input", source="microphone", type="filepath",streaming=True, every=10)
                historyOutput = gr.Textbox(label="History", interactive=False)
                # historyOutput = gr.Label(label="History")
          with gr.Column(min_width=700):
                output = gr.Label(label="Audio Classification")


    btn0.click(fn=to_audioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
    btn1.click(fn=to_realtimeAudioClassification, outputs=[audio_classification, realtime_classification, speech_recognition, chatbot_qa])
    

if __name__ == "__main__":
    demo.queue()
    demo.launch()