Commit
·
213ee04
1
Parent(s):
7787d6b
Update app.py
Browse files
app.py
CHANGED
@@ -15,6 +15,9 @@ import os
|
|
15 |
import ffmpeg
|
16 |
from io import BytesIO
|
17 |
import requests
|
|
|
|
|
|
|
18 |
|
19 |
from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
|
20 |
from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
|
@@ -108,7 +111,7 @@ def calculate(image_in, audio_in):
|
|
108 |
f.write(jq_run.stdout.decode('utf-8').strip())
|
109 |
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
110 |
# os.system(f"rm -rf /content/image_audio.mp4")
|
111 |
-
os.system(f"cd /content/one-shot-talking-face &&
|
112 |
return "/content/train/image_audio.mp4"
|
113 |
|
114 |
def merge_frames():
|
@@ -157,7 +160,7 @@ def one_shot_talking(image_in,audio_in):
|
|
157 |
os.makedirs("/content/results")
|
158 |
|
159 |
#Improve quality of input image
|
160 |
-
os.system(f"
|
161 |
# time.sleep(60)
|
162 |
|
163 |
image_in_one_shot='/content/results/image_pre.png'
|
@@ -168,12 +171,12 @@ def one_shot_talking(image_in,audio_in):
|
|
168 |
#Video Quality Improvement
|
169 |
os.system(f"rm -rf /content/extracted_frames/image_audio_frames")
|
170 |
#1. Extract the frames from the video file using PyVideoFramesExtractor
|
171 |
-
os.system(f"
|
172 |
|
173 |
#2. Improve image quality using GFPGAN on each frames
|
174 |
# os.system(f"rm -rf /content/extracted_frames/image_audio_frames")
|
175 |
os.system(f"rm -rf /content/video_results/")
|
176 |
-
os.system(f"
|
177 |
|
178 |
#3. Merge all the frames to a one video using imageio
|
179 |
merge_frames()
|
|
|
15 |
import ffmpeg
|
16 |
from io import BytesIO
|
17 |
import requests
|
18 |
+
import sys
|
19 |
+
|
20 |
+
python_path = sys.executable
|
21 |
|
22 |
from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
|
23 |
from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
|
|
|
111 |
f.write(jq_run.stdout.decode('utf-8').strip())
|
112 |
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
113 |
# os.system(f"rm -rf /content/image_audio.mp4")
|
114 |
+
os.system(f"cd /content/one-shot-talking-face && {python_path} -B test_script.py --img_path /content/image.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
|
115 |
return "/content/train/image_audio.mp4"
|
116 |
|
117 |
def merge_frames():
|
|
|
160 |
os.makedirs("/content/results")
|
161 |
|
162 |
#Improve quality of input image
|
163 |
+
os.system(f"{python_path} /content/GFPGAN/inference_gfpgan.py --upscale 2 -i /content/image_pre.png -o /content/results --bg_upsampler realesrgan")
|
164 |
# time.sleep(60)
|
165 |
|
166 |
image_in_one_shot='/content/results/image_pre.png'
|
|
|
171 |
#Video Quality Improvement
|
172 |
os.system(f"rm -rf /content/extracted_frames/image_audio_frames")
|
173 |
#1. Extract the frames from the video file using PyVideoFramesExtractor
|
174 |
+
os.system(f"{python_path} /content/PyVideoFramesExtractor/extract.py --video=/content/train/image_audio.mp4")
|
175 |
|
176 |
#2. Improve image quality using GFPGAN on each frames
|
177 |
# os.system(f"rm -rf /content/extracted_frames/image_audio_frames")
|
178 |
os.system(f"rm -rf /content/video_results/")
|
179 |
+
os.system(f"{python_path} /content/GFPGAN/inference_gfpgan.py --upscale 2 -i /content/extracted_frames/image_audio_frames -o /content/video_results --bg_upsampler realesrgan")
|
180 |
|
181 |
#3. Merge all the frames to a one video using imageio
|
182 |
merge_frames()
|