Ii commited on
Commit
ca8242f
·
verified ·
1 Parent(s): dd21d33

Update refacer.py

Browse files
Files changed (1) hide show
  1. refacer.py +28 -246
refacer.py CHANGED
@@ -1,262 +1,44 @@
 
1
  import cv2
2
- import onnxruntime as rt
3
- import sys
4
  from insightface.app import FaceAnalysis
5
- sys.path.insert(1, './recognition')
6
- from scrfd import SCRFD
7
- from arcface_onnx import ArcFaceONNX
8
- import os.path as osp
9
- import os
10
- from pathlib import Path
11
- from tqdm import tqdm
12
- import ffmpeg
13
- import random
14
- import multiprocessing as mp
15
- from concurrent.futures import ThreadPoolExecutor
16
- from insightface.model_zoo.inswapper import INSwapper
17
- import psutil
18
- from enum import Enum
19
- from insightface.app.common import Face
20
- from insightface.utils.storage import ensure_available
21
- import re
22
- import subprocess
23
-
24
- class RefacerMode(Enum):
25
- CPU, CUDA, COREML, TENSORRT = range(1, 5)
26
 
27
  class Refacer:
28
- def __init__(self,force_cpu=False,colab_performance=False):
29
- self.first_face = False
30
  self.force_cpu = force_cpu
31
  self.colab_performance = colab_performance
32
- self.__check_encoders()
33
- self.__check_providers()
34
- self.total_mem = psutil.virtual_memory().total
35
- self.__init_apps()
36
-
37
- def __check_providers(self):
38
- if self.force_cpu :
39
- self.providers = ['CPUExecutionProvider']
40
- else:
41
- self.providers = rt.get_available_providers()
42
- rt.set_default_logger_severity(4)
43
- self.sess_options = rt.SessionOptions()
44
- self.sess_options.execution_mode = rt.ExecutionMode.ORT_SEQUENTIAL
45
- self.sess_options.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_ALL
46
-
47
- if len(self.providers) == 1 and 'CPUExecutionProvider' in self.providers:
48
- self.mode = RefacerMode.CPU
49
- self.use_num_cpus = mp.cpu_count()-1
50
- self.sess_options.intra_op_num_threads = int(self.use_num_cpus/3)
51
- print(f"CPU mode with providers {self.providers}")
52
- elif self.colab_performance:
53
- self.mode = RefacerMode.TENSORRT
54
- self.use_num_cpus = mp.cpu_count()-1
55
- self.sess_options.intra_op_num_threads = int(self.use_num_cpus/3)
56
- print(f"TENSORRT mode with providers {self.providers}")
57
- elif 'CoreMLExecutionProvider' in self.providers:
58
- self.mode = RefacerMode.COREML
59
- self.use_num_cpus = mp.cpu_count()-1
60
- self.sess_options.intra_op_num_threads = int(self.use_num_cpus/3)
61
- print(f"CoreML mode with providers {self.providers}")
62
- elif 'CUDAExecutionProvider' in self.providers:
63
- self.mode = RefacerMode.CUDA
64
- self.use_num_cpus = 2
65
- self.sess_options.intra_op_num_threads = 1
66
- if 'TensorrtExecutionProvider' in self.providers:
67
- self.providers.remove('TensorrtExecutionProvider')
68
- print(f"CUDA mode with providers {self.providers}")
69
- """
70
- elif 'TensorrtExecutionProvider' in self.providers:
71
- self.mode = RefacerMode.TENSORRT
72
- #self.use_num_cpus = 1
73
- #self.sess_options.intra_op_num_threads = 1
74
- self.use_num_cpus = mp.cpu_count()-1
75
- self.sess_options.intra_op_num_threads = int(self.use_num_cpus/3)
76
- print(f"TENSORRT mode with providers {self.providers}")
77
- """
78
-
79
-
80
- def __init_apps(self):
81
- assets_dir = ensure_available('models', 'buffalo_l', root='~/.insightface')
82
-
83
- model_path = os.path.join(assets_dir, 'det_10g.onnx')
84
- sess_face = rt.InferenceSession(model_path, self.sess_options, providers=self.providers)
85
- self.face_detector = SCRFD(model_path,sess_face)
86
- self.face_detector.prepare(0,input_size=(640, 640))
87
-
88
- model_path = os.path.join(assets_dir , 'w600k_r50.onnx')
89
- sess_rec = rt.InferenceSession(model_path, self.sess_options, providers=self.providers)
90
- self.rec_app = ArcFaceONNX(model_path,sess_rec)
91
- self.rec_app.prepare(0)
92
-
93
- model_path = 'inswapper_128.onnx'
94
- sess_swap = rt.InferenceSession(model_path, self.sess_options, providers=self.providers)
95
- self.face_swapper = INSwapper(model_path,sess_swap)
96
 
97
- def prepare_faces(self, faces):
98
- self.replacement_faces=[]
99
- for face in faces:
100
- #image1 = cv2.imread(face.origin)
101
- if "origin" in face:
102
- face_threshold = face['threshold']
103
- bboxes1, kpss1 = self.face_detector.autodetect(face['origin'], max_num=1)
104
- if len(kpss1)<1:
105
- raise Exception('No face detected on "Face to replace" image')
106
- feat_original = self.rec_app.get(face['origin'], kpss1[0])
107
- else:
108
- face_threshold = 0
109
- self.first_face = True
110
- feat_original = None
111
- print('No origin image: First face change')
112
- #image2 = cv2.imread(face.destination)
113
- _faces = self.__get_faces(face['destination'],max_num=1)
114
- if len(_faces)<1:
115
- raise Exception('No face detected on "Destination face" image')
116
- self.replacement_faces.append((feat_original,_faces[0],face_threshold))
117
-
118
- def __convert_video(self,video_path,output_video_path):
119
- if self.video_has_audio:
120
- print("Merging audio with the refaced video...")
121
- new_path = output_video_path + str(random.randint(0,999)) + "_c.mp4"
122
- #stream = ffmpeg.input(output_video_path)
123
- in1 = ffmpeg.input(output_video_path)
124
- in2 = ffmpeg.input(video_path)
125
- out = ffmpeg.output(in1.video, in2.audio, new_path,video_bitrate=self.ffmpeg_video_bitrate,vcodec=self.ffmpeg_video_encoder)
126
- out.run(overwrite_output=True,quiet=True)
127
- else:
128
- new_path = output_video_path
129
- print("The video doesn't have audio, so post-processing is not necessary")
130
-
131
- print(f"The process has finished.\nThe refaced video can be found at {os.path.abspath(new_path)}")
132
- return new_path
133
-
134
- def __get_faces(self,frame,max_num=0):
135
-
136
- bboxes, kpss = self.face_detector.detect(frame,max_num=max_num,metric='default')
137
-
138
- if bboxes.shape[0] == 0:
139
- return []
140
- ret = []
141
- for i in range(bboxes.shape[0]):
142
- bbox = bboxes[i, 0:4]
143
- det_score = bboxes[i, 4]
144
- kps = None
145
- if kpss is not None:
146
- kps = kpss[i]
147
- face = Face(bbox=bbox, kps=kps, det_score=det_score)
148
- face.embedding = self.rec_app.get(frame, kps)
149
- ret.append(face)
150
- return ret
151
-
152
- def process_first_face(self,frame):
153
- faces = self.__get_faces(frame,max_num=1)
154
- if len(faces) != 0:
155
- frame = self.face_swapper.get(frame, faces[0], self.replacement_faces[0][1], paste_back=True)
156
- return frame
157
-
158
- def process_faces(self,frame):
159
- faces = self.__get_faces(frame,max_num=0)
160
- for rep_face in self.replacement_faces:
161
- for i in range(len(faces) - 1, -1, -1):
162
- sim = self.rec_app.compute_sim(rep_face[0], faces[i].embedding)
163
- if sim>=rep_face[2]:
164
- frame = self.face_swapper.get(frame, faces[i], rep_face[1], paste_back=True)
165
- del faces[i]
166
- break
167
- return frame
168
-
169
- def __check_video_has_audio(self,video_path):
170
- self.video_has_audio = False
171
- probe = ffmpeg.probe(video_path)
172
- audio_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'), None)
173
- if audio_stream is not None:
174
- self.video_has_audio = True
175
-
176
- def reface_group(self, faces, frames, output):
177
- with ThreadPoolExecutor(max_workers = self.use_num_cpus) as executor:
178
- if self.first_face:
179
- results = list(tqdm(executor.map(self.process_first_face, frames), total=len(frames),desc="Processing frames"))
180
- else:
181
- results = list(tqdm(executor.map(self.process_faces, frames), total=len(frames),desc="Processing frames"))
182
- for result in results:
183
- output.write(result)
184
 
185
  def reface(self, video_path, faces):
186
- self.__check_video_has_audio(video_path)
187
- output_video_path = os.path.join('out',Path(video_path).name)
188
- self.prepare_faces(faces)
189
 
190
  cap = cv2.VideoCapture(video_path)
191
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
192
- print(f"Total frames: {total_frames}")
193
-
194
- fps = cap.get(cv2.CAP_PROP_FPS)
195
- frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
196
- frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
197
-
198
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
199
- output = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))
200
-
201
- frames=[]
202
- self.k = 1
203
- with tqdm(total=total_frames,desc="Extracting frames") as pbar:
204
- while cap.isOpened():
205
- flag, frame = cap.read()
206
- if flag and len(frame)>0:
207
- frames.append(frame.copy())
208
- pbar.update()
209
- else:
210
- break
211
- if (len(frames) > 1000):
212
- self.reface_group(faces,frames,output)
213
- frames=[]
214
 
215
- cap.release()
216
- pbar.close()
 
217
 
218
- self.reface_group(faces,frames,output)
219
- frames=[]
220
- output.release()
221
-
222
- return self.__convert_video(video_path,output_video_path)
223
-
224
- def __try_ffmpeg_encoder(self, vcodec):
225
- print(f"Trying FFMPEG {vcodec} encoder")
226
- command = ['ffmpeg', '-y', '-f','lavfi','-i','testsrc=duration=1:size=1280x720:rate=30','-vcodec',vcodec,'testsrc.mp4']
227
- try:
228
- subprocess.run(command, check=True, capture_output=True).stderr
229
- except subprocess.CalledProcessError as e:
230
- print(f"FFMPEG {vcodec} encoder doesn't work -> Disabled.")
231
- return False
232
- print(f"FFMPEG {vcodec} encoder works")
233
- return True
234
-
235
- def __check_encoders(self):
236
- self.ffmpeg_video_encoder='libx264'
237
- self.ffmpeg_video_bitrate='0'
238
 
239
- pattern = r"encoders: ([a-zA-Z0-9_]+(?: [a-zA-Z0-9_]+)*)"
240
- command = ['ffmpeg', '-codecs', '--list-encoders']
241
- commandout = subprocess.run(command, check=True, capture_output=True).stdout
242
- result = commandout.decode('utf-8').split('\n')
243
- for r in result:
244
- if "264" in r:
245
- encoders = re.search(pattern, r).group(1).split(' ')
246
- for v_c in Refacer.VIDEO_CODECS:
247
- for v_k in encoders:
248
- if v_c == v_k:
249
- if self.__try_ffmpeg_encoder(v_k):
250
- self.ffmpeg_video_encoder=v_k
251
- self.ffmpeg_video_bitrate=Refacer.VIDEO_CODECS[v_k]
252
- print(f"Video codec for FFMPEG: {self.ffmpeg_video_encoder}")
253
- return
254
 
255
- VIDEO_CODECS = {
256
- 'h264_videotoolbox':'0', #osx HW acceleration
257
- 'h264_nvenc':'0', #NVIDIA HW acceleration
258
- #'h264_qsv', #Intel HW acceleration
259
- #'h264_vaapi', #Intel HW acceleration
260
- #'h264_omx', #HW acceleration
261
- 'libx264':'0' #No HW acceleration
262
- }
 
1
+ import os
2
  import cv2
3
+ import numpy as np
 
4
  from insightface.app import FaceAnalysis
5
+ from insightface.model_zoo import model_zoo
6
+ from onnxruntime import InferenceSession
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  class Refacer:
9
+ def __init__(self, force_cpu=False, colab_performance=False):
 
10
  self.force_cpu = force_cpu
11
  self.colab_performance = colab_performance
12
+ self.model = self.load_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ def load_model(self):
15
+ model_path = "/home/user/app/inswapper_128.onnx" # Replace with your actual model path
16
+ if not os.path.exists(model_path):
17
+ raise FileNotFoundError(f"Model not found at {model_path}")
18
+ return InferenceSession(model_path, providers=["CPUExecutionProvider" if self.force_cpu else "CUDAExecutionProvider"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  def reface(self, video_path, faces):
21
+ if not os.path.exists(video_path):
22
+ raise FileNotFoundError(f"Video file not found at {video_path}")
 
23
 
24
  cap = cv2.VideoCapture(video_path)
 
 
 
 
 
 
 
25
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
26
+ output_path = "output_video.mp4"
27
+ out = cv2.VideoWriter(output_path, fourcc, cap.get(cv2.CAP_PROP_FPS),
28
+ (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
29
+
30
+ while cap.isOpened():
31
+ ret, frame = cap.read()
32
+ if not ret:
33
+ break
 
 
 
 
 
 
 
34
 
35
+ for face in faces:
36
+ # Here, you should apply face-swapping logic using ONNX and the destination face
37
+ pass # Replace this with face-swapping code
38
 
39
+ out.write(frame)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
+ cap.release()
42
+ out.release()
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ return output_path