Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
import subprocess | |
from gtts import gTTS | |
from pydub import AudioSegment | |
from transformers import GPT2LMHeadModel, GPT2Tokenizer, pipeline | |
model = GPT2LMHeadModel.from_pretrained("salomonsky/deepSP") | |
os.environ["TOKENIZERS_PARALLELISM"] = "true" | |
generator = pipeline('text-generation', model="salomonsky/deepSP") | |
tokenizer = GPT2Tokenizer.from_pretrained('salomonsky/deepSP') | |
def generate_output(text): | |
prompt = "" | |
input_tokens = tokenizer.encode(text, add_special_tokens=False) | |
input_text = tokenizer.decode(input_tokens) | |
gpt2_output = generator(input_text, max_length=20, do_sample=True, temperature=0.9) | |
if len(gpt2_output) == 0 or 'generated_text' not in gpt2_output[0]: | |
return None, "No se pudo generar el texto." | |
generated_text = gpt2_output[0]['generated_text'] | |
generated_text = generated_text.replace(input_text, "").strip() | |
try: | |
tts = gTTS(generated_text, lang='es') | |
temp_audio_path = "temp_audio.mp3" | |
tts.save(temp_audio_path) | |
audio_path = "audio.wav" | |
audio = AudioSegment.from_mp3(temp_audio_path) | |
audio.export(audio_path, format="wav") | |
print("Archivo de audio generado:", audio_path) | |
except Exception as e: | |
return None, f"No se pudo generar el audio: {str(e)}" | |
face_image_path = "face.jpg" | |
if not os.path.isfile(face_image_path): | |
return None, "No se encontr贸 el archivo de imagen de cara." | |
print("Archivo de imagen de cara:", face_image_path) | |
command = f"python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face {face_image_path} --audio {audio_path} --outfile video.mp4 --nosmooth" | |
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | |
if process.returncode != 0: | |
error_message = process.stderr.decode() | |
return None, f"No se pudo generar el video: {error_message}" | |
output_video_path = "video.mp4" | |
print("Archivo de video generado:", output_video_path) | |
os.remove(temp_audio_path) | |
if os.path.isfile(output_video_path): | |
return output_video_path, None | |
return None, "No se pudo generar el video." | |
def error_message_fn(error_message): | |
if error_message is not None: | |
return gr.outputs.Textbox(text=error_message, placeholder="Error") | |
else: | |
return None | |
iface = gr.Interface( | |
fn=generate_output, | |
inputs=gr.inputs.Textbox(lines=1, placeholder='Escribe tu nombre para presentarte con Andrea...'), | |
outputs=[ | |
gr.outputs.Video(label="Respuesta de Andrea (un minuto aproximadamente)"), | |
gr.outputs.Textbox(label="Mensaje de error", type="text") | |
], | |
title="Andrea - Humanoid Chatbot IA 2023(c)", | |
error="No se pudo generar la salida.", | |
error_message=error_message_fn | |
) | |
iface.launch() |