File size: 3,882 Bytes
b3b0738
 
 
f241696
b3b0738
f241696
b3b0738
 
f241696
 
 
99b2f92
 
24f06d9
a110eb7
 
 
 
 
b3b0738
a35346c
24f06d9
f241696
24f06d9
9d08bac
 
86b1ba3
24ff843
83addd5
 
 
86b1ba3
9d08bac
 
f241696
 
 
24f06d9
 
f241696
 
 
99b2f92
 
24f06d9
f241696
 
a110eb7
 
e27db14
a110eb7
 
f241696
 
 
a110eb7
 
f241696
 
 
 
a110eb7
 
 
 
 
 
b3b0738
f241696
 
 
 
 
99b2f92
 
7ee2f4c
 
a35346c
7ee2f4c
a35346c
 
 
 
 
 
 
f241696
 
 
 
b3b0738
092aeaf
b3b0738
50f2112
a206d4b
092aeaf
1ef1929
854d54e
a35346c
b7cf674
7ee2f4c
b3b0738
 
99b2f92
 
 
99c65a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import os
import gradio as gr
import openai
from gtts import gTTS # Google Text To Speech

# load the api key
openai.api_key = os.environ["OPEN_AI_KEY"]

# takes an audio file from the microphone
# submits the raw audio to OpenAI for 
# Speech to Text Translation
# input from Microphone Component
# output to User Input - Textbox Component
def transcribe(audio):   
        audio_file = open(audio, "rb")    
        # Call the transcribe method with the file-like object
        transcript = openai.Audio.transcribe("whisper-1", audio_file)
        
        return transcript["text"]



# Create a Gradio App using Blocks    
with gr.Blocks() as demo:
    with gr.Accordion("Open for Instructions"):
            gr.Markdown(
    """
    # Welcome to the Virtual Therapist Chat Bot!
    * Tell the therapist your problems, by recording your query.
    * Submit your query, and follow the chat or listen to the Therapists advice.
    * When you are ready to respond, clear your last recording and resubmit.
    """)
        
    
    # First message as instructions to OpenAI
    # Establishes a State object to create a
    # unique state for each user and on reload
    messages = gr.State(value=[{"role": "system", "content": "You are a therapist. Respond in less than 5 sentences."}])

    # Takes the users transcribed audio as a string
    # Takes the messages list as a reference
    # Sends the ongoing chat log to OpenAI
    # input from User Input - Textbox Component
    # output to Chat Log - Textbox Component
    def botResponse(user_input, messages):
        # adds the user input to the ongoing chat log
        # and submits the log to OpenAI
        messages.append({"role": "user", "content": user_input})
        response = openai.ChatCompletion.create(
          model="gpt-3.5-turbo-0301",
          messages=messages
        )

        # Parse the response from OpenAI and store
        # it in the chat log
        system_message = response["choices"][0]["message"]["content"]
        messages.append({"role": "assistant", "content": system_message})

        # Process the messages list to get the
        # chat log into a string. Exclude the
        # System responses from the string
        chat_transcript = ""
        for message in messages:
            if (message["role"] != "system"):
                chat_transcript += message["role"] + ": " + message["content"] + "\n\n"
    
        return chat_transcript

    # Gets the last message in the 
    # chat log and uses GTTS to
    # convert the last response into
    # an audio file. Returns a path to
    # the converted text as an mp3 file
    # input from messages as a reference
    # output to GPT Voice - Audio Component
    def giveVoice(messages):
        bot_message=messages[-1]
        
        myobj = gTTS(text=bot_message["content"])
        myobj.save("temp.mp3")
    
        dir = os.getcwd()
        new_path = os.path.join(dir, "temp.mp3")
    
        return new_path

    # Creates the Gradio interface objects
    # The submit button triggers a cascade of
    # events that each engage a different 
    # component as input/output
    with gr.Row():
        with gr.Column(scale=1):
            user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase")
            submit_btn = gr.Button(value="Transcribe")
            gpt_voice = gr.Audio(label="Voice Response")
        with gr.Column(scale=2):
            user_transcript = gr.Text(label="User Transcript")
            gpt_transcript = gr.Text(label="Chat Transcript")
    submit_btn.click(transcribe, user_audio, user_transcript)
    user_transcript.change(botResponse, [user_transcript, messages], gpt_transcript)
    gpt_transcript.change(giveVoice, messages, gpt_voice)
    
    
# creates a local web server
# if share=True creates a public
# demo on huggingface.co
demo.launch(share=False)