Ii
commited on
Update refacer.py
Browse files- refacer.py +23 -53
refacer.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import cv2
|
2 |
import onnxruntime as rt
|
3 |
import sys
|
@@ -103,21 +105,6 @@ class Refacer:
|
|
103 |
raise Exception('No face detected on "Destination face" image')
|
104 |
self.replacement_faces.append((feat_original,_faces[0],face_threshold))
|
105 |
|
106 |
-
def __convert_video(self,video_path,output_video_path):
|
107 |
-
if self.video_has_audio:
|
108 |
-
print("Merging audio with the refaced video...")
|
109 |
-
new_path = output_video_path + str(random.randint(0,999)) + "_c.mp4"
|
110 |
-
in1 = ffmpeg.input(output_video_path)
|
111 |
-
in2 = ffmpeg.input(video_path)
|
112 |
-
out = ffmpeg.output(in1.video, in2.audio, new_path,video_bitrate=self.ffmpeg_video_bitrate,vcodec=self.ffmpeg_video_encoder)
|
113 |
-
out.run(overwrite_output=True,quiet=True)
|
114 |
-
else:
|
115 |
-
new_path = output_video_path
|
116 |
-
print("The video doesn't have audio, so post-processing is not necessary")
|
117 |
-
|
118 |
-
print(f"The process has finished.\nThe refaced video can be found at {os.path.abspath(new_path)}")
|
119 |
-
return new_path
|
120 |
-
|
121 |
def __get_faces(self,frame,max_num=0):
|
122 |
bboxes, kpss = self.face_detector.detect(frame,max_num=max_num,metric='default')
|
123 |
|
@@ -158,19 +145,18 @@ class Refacer:
|
|
158 |
audio_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'), None)
|
159 |
if audio_stream is not None:
|
160 |
self.video_has_audio = True
|
161 |
-
|
162 |
-
def reface_group(self, faces, frames
|
|
|
163 |
with ThreadPoolExecutor(max_workers = self.use_num_cpus) as executor:
|
164 |
if self.first_face:
|
165 |
results = list(tqdm(executor.map(self.process_first_face, frames), total=len(frames),desc="Processing frames"))
|
166 |
else:
|
167 |
results = list(tqdm(executor.map(self.process_faces, frames), total=len(frames),desc="Processing frames"))
|
168 |
-
|
169 |
-
output.write(result)
|
170 |
|
171 |
def reface(self, video_path, faces):
|
172 |
self.__check_video_has_audio(video_path)
|
173 |
-
output_video_path = os.path.join('out',Path(video_path).name)
|
174 |
self.prepare_faces(faces)
|
175 |
|
176 |
cap = cv2.VideoCapture(video_path)
|
@@ -181,48 +167,32 @@ class Refacer:
|
|
181 |
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
182 |
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
183 |
|
184 |
-
|
185 |
-
output = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))
|
186 |
-
|
187 |
-
frames=[]
|
188 |
-
self.k = 1
|
189 |
with tqdm(total=total_frames,desc="Extracting frames") as pbar:
|
190 |
while cap.isOpened():
|
191 |
flag, frame = cap.read()
|
192 |
-
if flag and len(frame)>0:
|
193 |
frames.append(frame.copy())
|
194 |
pbar.update()
|
195 |
else:
|
196 |
break
|
197 |
-
if (len(frames) > 1000):
|
198 |
-
self.reface_group(faces,frames,output)
|
199 |
-
frames=[]
|
200 |
|
201 |
cap.release()
|
202 |
pbar.close()
|
203 |
|
204 |
-
self.reface_group(faces,frames
|
205 |
-
frames=[]
|
206 |
-
output.release()
|
207 |
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
self.ffmpeg_video_encoder='libx264'
|
223 |
-
self.ffmpeg_video_bitrate='0'
|
224 |
-
|
225 |
-
if self.__try_ffmpeg_encoder('libx265'):
|
226 |
-
self.ffmpeg_video_encoder = 'libx265'
|
227 |
-
elif self.__try_ffmpeg_encoder('libvpx-vp9'):
|
228 |
-
self.ffmpeg_video_encoder = 'libvpx-vp9'
|
|
|
1 |
+
import io
|
2 |
+
import gradio as gr
|
3 |
import cv2
|
4 |
import onnxruntime as rt
|
5 |
import sys
|
|
|
105 |
raise Exception('No face detected on "Destination face" image')
|
106 |
self.replacement_faces.append((feat_original,_faces[0],face_threshold))
|
107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
def __get_faces(self,frame,max_num=0):
|
109 |
bboxes, kpss = self.face_detector.detect(frame,max_num=max_num,metric='default')
|
110 |
|
|
|
145 |
audio_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'), None)
|
146 |
if audio_stream is not None:
|
147 |
self.video_has_audio = True
|
148 |
+
|
149 |
+
def reface_group(self, faces, frames):
|
150 |
+
results = []
|
151 |
with ThreadPoolExecutor(max_workers = self.use_num_cpus) as executor:
|
152 |
if self.first_face:
|
153 |
results = list(tqdm(executor.map(self.process_first_face, frames), total=len(frames),desc="Processing frames"))
|
154 |
else:
|
155 |
results = list(tqdm(executor.map(self.process_faces, frames), total=len(frames),desc="Processing frames"))
|
156 |
+
return results
|
|
|
157 |
|
158 |
def reface(self, video_path, faces):
|
159 |
self.__check_video_has_audio(video_path)
|
|
|
160 |
self.prepare_faces(faces)
|
161 |
|
162 |
cap = cv2.VideoCapture(video_path)
|
|
|
167 |
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
168 |
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
169 |
|
170 |
+
frames = []
|
|
|
|
|
|
|
|
|
171 |
with tqdm(total=total_frames,desc="Extracting frames") as pbar:
|
172 |
while cap.isOpened():
|
173 |
flag, frame = cap.read()
|
174 |
+
if flag and len(frame) > 0:
|
175 |
frames.append(frame.copy())
|
176 |
pbar.update()
|
177 |
else:
|
178 |
break
|
|
|
|
|
|
|
179 |
|
180 |
cap.release()
|
181 |
pbar.close()
|
182 |
|
183 |
+
refaced_frames = self.reface_group(faces, frames)
|
|
|
|
|
184 |
|
185 |
+
video_buffer = io.BytesIO()
|
186 |
+
out = cv2.VideoWriter('temp.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width, frame_height))
|
187 |
+
|
188 |
+
for frame in refaced_frames:
|
189 |
+
out.write(frame)
|
190 |
+
out.release()
|
191 |
+
|
192 |
+
with open('temp.mp4', 'rb') as f:
|
193 |
+
video_buffer.write(f.read())
|
194 |
+
video_buffer.seek(0)
|
195 |
+
|
196 |
+
os.remove('temp.mp4')
|
197 |
+
|
198 |
+
return video_buffer
|
|
|
|
|
|
|
|
|
|
|
|
|
|