anupam_speech / app.py
gradioapp's picture
Update app.py
5a93e47
raw
history blame
1.6 kB
import openai
import gradio as gr
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
from transformers import pipeline
p = pipeline("automatic-speech-recognition",model="openai/whisper-tiny")
def transcribe(audio):
text = p(audio)["text"]
return text
# gr.Interface(
# fn=transcribe,
# inputs=gr.Audio(source="microphone", type="filepath"),
# outputs="text").launch()
messages = [
{"role": "system",
"content": "you name is Rebecca and you are a Pepsico call center assistant and your job is to take the order from the customer"}
]
def chatbot(input):
if input:
input = transcribe(input)
messages.append({"role": "user", "content": input})
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.2,
max_tokens=320,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
reply = chat.choices[0].message.content
messages.append({"role": "assistant", "content": reply})
return reply
##inputs = gr.inputs.Textbox(lines=7, label="Chat with PepsiCo AI assitant")
inputs= gr.Audio(source="microphone", type="filepath")
outputs = gr.outputs.Textbox(label="Reply")
gr.Interface(fn= chatbot,
inputs=inputs,
outputs=outputs,
title="chatbot",
description="Ask anything you want",
theme="compact").launch()
# gr.Interface(
# fn=transcribe,
# inputs= inputs,
# outputs="text"
# ).launch()