Spaces:
Sleeping
Sleeping
import os | |
import streamlit as st | |
import subprocess | |
from gtts import gTTS | |
import cv2 | |
from huggingface_hub import InferenceClient | |
import torch | |
device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
print('Using {} for inference.'.format(device)) | |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
def generate_output(prompt): | |
if not prompt: | |
return None, "El campo de la pregunta es obligatorio." | |
response = client.text_generation(prompt, max_new_tokens=50, temperature=0.6) | |
gpt3_output = response.strip() | |
personalized_response = f"{gpt3_output}" | |
try: | |
tts = gTTS(personalized_response, lang='es') | |
audio_path = "audio.mp3" | |
tts.save(audio_path) | |
except Exception as e: | |
return None, f"No se pudo generar el audio: {str(e)}" | |
video_path = "video.mp4" | |
command = f"CUDA_VISIBLE_DEVICES='' python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face face.jpg --audio {audio_path} --outfile {video_path} --nosmooth --resize_factor 4" | |
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) | |
if process.returncode != 0: | |
error_message = process.stderr | |
return None, f"No se pudo generar el video: {error_message}" | |
if os.path.isfile(video_path): | |
return video_path, None | |
return None, "No se pudo generar el video" | |
st.title("Lypsinc + Inteligencia Artificial") | |
prompt = st.text_input("Pregunta") | |
if st.button("Generar Video"): | |
video_path, error_message = generate_output(prompt) | |
if error_message: | |
st.error(f"Error: {error_message}") | |
else: | |
with open(video_path, "rb") as video_file: | |
st.video(video_file.read()) | |