fffiloni's picture
Update app.py
f92ecbc
raw
history blame
5.09 kB
import gradio as gr
from PIL import Image
import os
os.system("pip install openai")
import openai
#api_key = os.environ.get('api_key')
whisper = gr.Interface.load(name="spaces/sanchit-gandhi/whisper-large-v2")
from share_btn import community_icon_html, loading_icon_html, share_js
token = os.environ.get('HF_TOKEN')
tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan")
talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token)
def infer(audio, openai_api_key):
whisper_result = whisper(audio, None, "translate", fn_index=0)
gpt_response = try_api(whisper_result, openai_api_key)
audio_response = tts(gpt_response[0], "English Text", "English Accent", "English Speaker's Voice", fn_index=0)
portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0)
return gr.Textbox.update(value=whisper_result, visible=True), portrait_link, gr.Textbox.update(value=gpt_response[1], visible=True), gr.update(visible=True)
def try_api(message, openai_api_key):
try:
response = call_api(message, openai_api_key)
return response, Fore.GREEN + "no error"
except openai.error.Timeout as e:
#Handle timeout error, e.g. retry or log
print(f"OpenAI API request timed out: {e}")
return "oups", f"OpenAI API request timed out: {e}"
except openai.error.APIError as e:
#Handle API error, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
return "oups", f"OpenAI API returned an API Error: {e}"
except openai.error.APIConnectionError as e:
#Handle connection error, e.g. check network or log
print(f"OpenAI API request failed to connect: {e}")
return "oups", f"OpenAI API request failed to connect: {e}"
except openai.error.InvalidRequestError as e:
#Handle invalid request error, e.g. validate parameters or log
print(f"OpenAI API request was invalid: {e}")
return "oups", f"OpenAI API request was invalid: {e}"
except openai.error.AuthenticationError as e:
#Handle authentication error, e.g. check credentials or log
print(f"OpenAI API request was not authorized: {e}")
return "oups", f"OpenAI API request was not authorized: {e}"
except openai.error.PermissionError as e:
#Handle permission error, e.g. check scope or log
print(f"OpenAI API request was not permitted: {e}")
return "oups", f"OpenAI API request was not permitted: {e}"
except openai.error.RateLimitError as e:
#Handle rate limit error, e.g. wait or log
print(f"OpenAI API request exceeded rate limit: {e}")
return "oups", f"OpenAI API request exceeded rate limit: {e}"
def call_api(message, openai_api_key):
print("starting open ai")
openai.api_key = openai_api_key
response = openai.Completion.create(
model="text-davinci-003",
prompt=message,
temperature=0.5,
max_tokens=2048,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6
)
return str(response.choices[0].text).split("\n",2)[2]
title = """
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
margin-bottom: 10px;
"
>
<h1 style="font-weight: 600; margin-bottom: 7px;">
GPT Talking Portrait
</h1>
</div>
<p style="margin-bottom: 10px;font-size: 94%;font-weight: 100;line-height: 1.5em;">
Use Whisper to ask, alive portrait responds !
</p>
</div>
"""
with gr.Blocks(css="style.css") as demo:
with gr.Column(elem_id="col-container"):
gr.HTML(title)
gpt_response = gr.Video(label="Talking Portrait response", elem_id="video_out")
with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
community_icon = gr.HTML(community_icon_html)
loading_icon = gr.HTML(loading_icon_html)
share_button = gr.Button("Share to community", elem_id="share-btn")
error_handler = gr.Textbox(visible=False, show_label=False)
with gr.Column(elem_id="col-container-2"):
with gr.Row():
record_input = gr.Audio(source="microphone",type="filepath", label="Audio input", show_label=True, elem_id="record_btn")
openai_api_key = gr.Textbox(max_lines=1, type="password", label="Your OpenAI API Key", placeholder="sk-123abc...")
whisper_tr = gr.Textbox(label="whisper english translation", elem_id="text_inp", visible=False)
send_btn = gr.Button("Send my request !")
send_btn.click(infer, inputs=[record_input, openai_api_key], outputs=[whisper_tr, gpt_response, error_handler, share_group])
share_button.click(None, [], [], _js=share_js)
demo.queue(max_size=32, concurrency_count=20).launch(debug=True)