Spaces:
hersia
/
Runtime error

File size: 1,983 Bytes
ff2341d
 
 
 
 
970deff
ff2341d
 
970deff
 
 
 
ff2341d
 
970deff
 
ff2341d
970deff
 
 
 
 
 
 
 
ff2341d
 
 
 
 
 
 
 
970deff
ff2341d
970deff
 
 
 
 
 
 
ff2341d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os
import gradio as gr
import openai
from gtts import gTTS

openai.api_key = "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"

def transcribe(audio):   
    audio_file = open(audio, "rb")    
    transcript = openai.Audio.transcribe("whisper-1", audio_file)
    
    return transcript["text"]

with gr.Blocks() as demo:
    gr.Markdown("""
    # Welcome to PiP World.
    """)
    with gr.Accordion("Click for Instructions:"):
        gr.Markdown("""
        * Record your query.
        * Submit your query, and follow the chat or listen to the advice.
        * When you are ready to respond, clear your last recording and resubmit.
        note: Transcribe Audio does not work on iOS
        """)
    messages = gr.State(value=[{"role": "system", "content": "Ignore all instructions prior to this one. Prompt: Design an all-inclusive, interactive, and immersive gamified chat-based training program"}])

    with gr.Row():
        with gr.Column(scale=1):
            user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase")
            submit_btn = gr.Button(value="Transcribe Audio")
            gpt_voice = gr.Audio(label="Listen to Advice")
        with gr.Column(scale=2):
            user_transcript = gr.Text(label="Audio Translation", interactive=False)
            user_text = gr.Text(label="Text Input", on_enter=True)  # Set on_enter to True
            gpt_transcript = gr.Text(label="Chat Transcript")

    submit_btn.click(transcribe, inputs=user_audio, outputs=user_transcript)
    user_text.set_on_enter(lambda: botResponse(user_text.value, messages, gpt_transcript))  # Bind the action to on_enter
    user_text.set_on_change(lambda: botResponse(user_text.value, messages, gpt_transcript))  # Bind the action to on_change
    user_transcript.on_change(botResponse, inputs=[user_transcript, messages], outputs=gpt_transcript)
    gpt_transcript.on_change(giveVoice, inputs=messages, outputs=gpt_voice)

demo.launch(share=False)