import gradio as gr #import requests from PIL import Image import os token = os.environ.get('HF_TOKEN') whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT") tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan") talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token) def infer(audio): gpt_response = whisper_to_gpt(audio, "translate", fn_index=0) #print(gpt_response) audio_response = tts(gpt_response[1], "English Text", "English Accent", "English Speaker's Voice", fn_index=0) #image = Image.open(r"wise_woman_portrait.png") portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0) #portrait_response = requests.get(portrait_link, headers={'Authorization': 'Bearer ' + token}) #print(portrait_response.text) return portrait_link title = """

GPT Talking Portrait

Use Whisper to ask, alive portrait responds !

""" css = ''' #col-container, #col-container-2 {max-width: 510px; margin-left: auto; margin-right: auto;} a {text-decoration-line: underline; font-weight: 600;} div#record_btn > .mt-6 { margin-top: 0!important; } div#record_btn > .mt-6 button { width: 100%; height: 40px; } .footer { margin-bottom: 45px; margin-top: 10px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } ''' with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.HTML(title) with gr.Row(): record_input = gr.Audio(source="microphone",type="filepath", show_label=True,elem_id="record_btn") with gr.Row(): send_btn = gr.Button("Send my request !") with gr.Column(elem_id="col-container-2"): gpt_response = gr.Video() send_btn.click(infer, inputs=[record_input], outputs=[gpt_response]) demo.queue(max_size=32, concurrency_count=20).launch(debug=True)