import os import gradio as gr import openai from gtts import gTTS openai.api_key = os.environ["OPEN_AI_KEY"] password_key = os.environ["PASS_KEY"] def transcribe(audio): audio_file = open(audio, "rb") transcript = openai.Audio.transcribe("whisper-1", audio_file) return transcript["text"] with gr.Blocks() as demo: gr.Markdown( """ # Welcome to the Blockchain bot. """ ) with gr.Accordion("Click for Instructions:"): gr.Markdown( """ * Record your query. * Submit your query, and follow the chat or listen to the advice. * When you are ready to respond, clear your last recording and resubmit. note: Transcribe Audio does not work on iOS """) messages = gr.State(value=[{"role": "system", "content": "As a blockchain assistant, I specialise in teaching about staking and supporting users in blockchain technologies. My key skills include a comprehensive understanding of blockchain and staking, expertise in various staking protocols and platforms, guidance on setting up and troubleshooting staking processes, strong communication skills to convey complex concepts effectively, and attention to users' specific needs and providing step-by-step staking instructions. When assisting with staking queries, I follow a process that involves gathering information, analysing the problem, developing and communicating a plan, executing the plan, confirming resolution, and following up. My expertise is limited to blockchain technologies and staking-related topics, and I cannot provide answers or engage on unrelated subjects or technologies. Type only 3 dots …..."}]) def botResponse(user_input, messages): messages.append({"role": "user", "content": user_input}) response = openai.ChatCompletion.create( model="gpt-3.5-turbo-0301", messages=messages ) system_message = response["choices"][0]["message"]["content"] messages.append({"role": "assistant", "content": system_message}) chat_transcript = "" for message in messages: if (message["role"] != "system"): chat_transcript += message["role"] + ": " + message["content"] + "\n\n" return chat_transcript def giveVoice(messages): bot_message=messages[-1] myobj = gTTS(text=bot_message["content"]) myobj.save("temp.mp3") dir = os.getcwd() new_path = os.path.join(dir, "temp.mp3") return new_path with gr.Row(): with gr.Column(scale=1): user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase") submit_btn = gr.Button(value="Transcribe Audio") submit_btn2 = gr.Button(value="Submit Text") gpt_voice = gr.Audio(label="Listen to Advice") with gr.Column(scale=2): user_transcript = gr.Text(label="Audio Translation", interactive=False) user_text = gr.Text(label="Text Input") gpt_transcript = gr.Text(label="Chat Transcript") submit_btn.click(transcribe, user_audio, user_transcript) submit_btn2.click(botResponse, [user_text, messages], gpt_transcript) user_transcript.change(botResponse, [user_transcript, messages], gpt_transcript) gpt_transcript.change(giveVoice, messages, gpt_voice) demo.launch(share=False,auth=("admin", "password_key"))