Spaces:
Sleeping
Sleeping
salomonsky
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,9 @@ import cv2
|
|
6 |
from huggingface_hub import InferenceClient
|
7 |
import torch
|
8 |
|
9 |
-
device = torch.
|
|
|
|
|
10 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
11 |
|
12 |
def generate_output(prompt):
|
@@ -27,6 +29,7 @@ def generate_output(prompt):
|
|
27 |
video_path = "video.mp4"
|
28 |
command = f"CUDA_VISIBLE_DEVICES='' python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face face.jpg --audio {audio_path} --outfile {video_path} --nosmooth --resize_factor 2"
|
29 |
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
|
|
30 |
if process.returncode != 0:
|
31 |
error_message = process.stderr
|
32 |
return None, f"No se pudo generar el video: {error_message}"
|
@@ -43,5 +46,5 @@ if st.button("Generar Video"):
|
|
43 |
if error_message:
|
44 |
st.error(f"Error: {error_message}")
|
45 |
else:
|
46 |
-
|
47 |
-
|
|
|
6 |
from huggingface_hub import InferenceClient
|
7 |
import torch
|
8 |
|
9 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
10 |
+
print('Using {} for inference.'.format(device))
|
11 |
+
|
12 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
13 |
|
14 |
def generate_output(prompt):
|
|
|
29 |
video_path = "video.mp4"
|
30 |
command = f"CUDA_VISIBLE_DEVICES='' python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face face.jpg --audio {audio_path} --outfile {video_path} --nosmooth --resize_factor 2"
|
31 |
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
32 |
+
|
33 |
if process.returncode != 0:
|
34 |
error_message = process.stderr
|
35 |
return None, f"No se pudo generar el video: {error_message}"
|
|
|
46 |
if error_message:
|
47 |
st.error(f"Error: {error_message}")
|
48 |
else:
|
49 |
+
with open(video_path, "rb") as video_file:
|
50 |
+
st.video(video_file.read())
|