Ii commited on
Commit
51b2611
·
verified ·
1 Parent(s): df7dae3

Upload 8 files

Browse files
Files changed (8) hide show
  1. .gitignore +173 -0
  2. LICENSE +21 -0
  3. app.py +91 -0
  4. refacer.py +262 -0
  5. requirements-COREML.txt +12 -0
  6. requirements-GPU.txt +12 -0
  7. requirements.txt +12 -0
  8. script.py +41 -0
.gitignore ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ out/*
163
+ !out/.gitkeep
164
+ media
165
+ tests
166
+ *.onnx
167
+
168
+ aaa.md
169
+
170
+ *_test.py
171
+ img.jpg
172
+ test_data
173
+ testsrc.mp4
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 xaviviro
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from refacer import Refacer
3
+ import argparse
4
+ import os
5
+ import requests
6
+
7
+ # Hugging Face URL to download the model
8
+ model_url = "https://huggingface.co/ofter/4x-UltraSharp/resolve/main/inswapper_128.onnx"
9
+ model_path = "./inswapper_128.onnx"
10
+
11
+ # Function to download the model
12
+ def download_model():
13
+ if not os.path.exists(model_path):
14
+ print("Downloading inswapper_128.onnx...")
15
+ response = requests.get(model_url)
16
+ if response.status_code == 200:
17
+ with open(model_path, 'wb') as f:
18
+ f.write(response.content)
19
+ print("Model downloaded successfully!")
20
+ else:
21
+ raise Exception(f"Failed to download the model. Status code: {response.status_code}")
22
+ else:
23
+ print("Model already exists.")
24
+
25
+ # Download the model when the script runs
26
+ download_model()
27
+
28
+ # Argument parser
29
+ parser = argparse.ArgumentParser(description='Refacer')
30
+ parser.add_argument("--max_num_faces", type=int, help="Max number of faces on UI", default=5)
31
+ parser.add_argument("--force_cpu", help="Force CPU mode", default=False, action="store_true")
32
+ parser.add_argument("--share_gradio", help="Share Gradio", default=False, action="store_true")
33
+ parser.add_argument("--server_name", type=str, help="Server IP address", default="127.0.0.1")
34
+ parser.add_argument("--server_port", type=int, help="Server port", default=7860)
35
+ parser.add_argument("--colab_performance", help="Use in colab for better performance", default=False, action="store_true")
36
+ args = parser.parse_args()
37
+
38
+ # Initialize the Refacer class
39
+ refacer = Refacer(force_cpu=args.force_cpu, colab_performance=args.colab_performance)
40
+
41
+ num_faces = args.max_num_faces
42
+
43
+ # Run function for refacing video
44
+ def run(*vars):
45
+ video_path = vars[0]
46
+ origins = vars[1:(num_faces+1)]
47
+ destinations = vars[(num_faces+1):(num_faces*2)+1]
48
+ thresholds = vars[(num_faces*2)+1:]
49
+
50
+ faces = []
51
+ for k in range(0, num_faces):
52
+ if origins[k] is not None and destinations[k] is not None:
53
+ faces.append({
54
+ 'origin': origins[k],
55
+ 'destination': destinations[k],
56
+ 'threshold': thresholds[k]
57
+ })
58
+
59
+ # Call refacer to process video and get file path
60
+ refaced_video_path = refacer.reface(video_path, faces) # refaced video path
61
+ print(f"Refaced video can be found at {refaced_video_path}")
62
+
63
+ return refaced_video_path # Return the file path to show in Gradio output
64
+
65
+ # Prepare Gradio components
66
+ origin = []
67
+ destination = []
68
+ thresholds = []
69
+
70
+ with gr.Blocks() as demo:
71
+ with gr.Row():
72
+ gr.Markdown("# Refacer")
73
+ with gr.Row():
74
+ video = gr.Video(label="Original video", format="mp4")
75
+ video2 = gr.Video(label="Refaced video", interactive=False, format="mp4")
76
+
77
+ for i in range(0, num_faces):
78
+ with gr.Tab(f"Face #{i+1}"):
79
+ with gr.Row():
80
+ origin.append(gr.Image(label="Face to replace"))
81
+ destination.append(gr.Image(label="Destination face"))
82
+ with gr.Row():
83
+ thresholds.append(gr.Slider(label="Threshold", minimum=0.0, maximum=1.0, value=0.2))
84
+
85
+ with gr.Row():
86
+ button = gr.Button("Reface", variant="primary")
87
+
88
+ button.click(fn=run, inputs=[video] + origin + destination + thresholds, outputs=[video2])
89
+
90
+ # Launch the Gradio app
91
+ demo.queue().launch(show_error=True, share=args.share_gradio, server_name="0.0.0.0", server_port=args.server_port)
refacer.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import onnxruntime as rt
3
+ import sys
4
+ from insightface.app import FaceAnalysis
5
+ sys.path.insert(1, './recognition')
6
+ from scrfd import SCRFD
7
+ from arcface_onnx import ArcFaceONNX
8
+ import os.path as osp
9
+ import os
10
+ from pathlib import Path
11
+ from tqdm import tqdm
12
+ import ffmpeg
13
+ import random
14
+ import multiprocessing as mp
15
+ from concurrent.futures import ThreadPoolExecutor
16
+ from insightface.model_zoo.inswapper import INSwapper
17
+ import psutil
18
+ from enum import Enum
19
+ from insightface.app.common import Face
20
+ from insightface.utils.storage import ensure_available
21
+ import re
22
+ import subprocess
23
+
24
+ class RefacerMode(Enum):
25
+ CPU, CUDA, COREML, TENSORRT = range(1, 5)
26
+
27
+ class Refacer:
28
+ def __init__(self,force_cpu=False,colab_performance=False):
29
+ self.first_face = False
30
+ self.force_cpu = force_cpu
31
+ self.colab_performance = colab_performance
32
+ self.__check_encoders()
33
+ self.__check_providers()
34
+ self.total_mem = psutil.virtual_memory().total
35
+ self.__init_apps()
36
+
37
+ def __check_providers(self):
38
+ if self.force_cpu :
39
+ self.providers = ['CPUExecutionProvider']
40
+ else:
41
+ self.providers = rt.get_available_providers()
42
+ rt.set_default_logger_severity(4)
43
+ self.sess_options = rt.SessionOptions()
44
+ self.sess_options.execution_mode = rt.ExecutionMode.ORT_SEQUENTIAL
45
+ self.sess_options.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_ALL
46
+
47
+ if len(self.providers) == 1 and 'CPUExecutionProvider' in self.providers:
48
+ self.mode = RefacerMode.CPU
49
+ self.use_num_cpus = mp.cpu_count()-1
50
+ self.sess_options.intra_op_num_threads = int(self.use_num_cpus/3)
51
+ print(f"CPU mode with providers {self.providers}")
52
+ elif self.colab_performance:
53
+ self.mode = RefacerMode.TENSORRT
54
+ self.use_num_cpus = mp.cpu_count()-1
55
+ self.sess_options.intra_op_num_threads = int(self.use_num_cpus/3)
56
+ print(f"TENSORRT mode with providers {self.providers}")
57
+ elif 'CoreMLExecutionProvider' in self.providers:
58
+ self.mode = RefacerMode.COREML
59
+ self.use_num_cpus = mp.cpu_count()-1
60
+ self.sess_options.intra_op_num_threads = int(self.use_num_cpus/3)
61
+ print(f"CoreML mode with providers {self.providers}")
62
+ elif 'CUDAExecutionProvider' in self.providers:
63
+ self.mode = RefacerMode.CUDA
64
+ self.use_num_cpus = 2
65
+ self.sess_options.intra_op_num_threads = 1
66
+ if 'TensorrtExecutionProvider' in self.providers:
67
+ self.providers.remove('TensorrtExecutionProvider')
68
+ print(f"CUDA mode with providers {self.providers}")
69
+ """
70
+ elif 'TensorrtExecutionProvider' in self.providers:
71
+ self.mode = RefacerMode.TENSORRT
72
+ #self.use_num_cpus = 1
73
+ #self.sess_options.intra_op_num_threads = 1
74
+ self.use_num_cpus = mp.cpu_count()-1
75
+ self.sess_options.intra_op_num_threads = int(self.use_num_cpus/3)
76
+ print(f"TENSORRT mode with providers {self.providers}")
77
+ """
78
+
79
+
80
+ def __init_apps(self):
81
+ assets_dir = ensure_available('models', 'buffalo_l', root='~/.insightface')
82
+
83
+ model_path = os.path.join(assets_dir, 'det_10g.onnx')
84
+ sess_face = rt.InferenceSession(model_path, self.sess_options, providers=self.providers)
85
+ self.face_detector = SCRFD(model_path,sess_face)
86
+ self.face_detector.prepare(0,input_size=(640, 640))
87
+
88
+ model_path = os.path.join(assets_dir , 'w600k_r50.onnx')
89
+ sess_rec = rt.InferenceSession(model_path, self.sess_options, providers=self.providers)
90
+ self.rec_app = ArcFaceONNX(model_path,sess_rec)
91
+ self.rec_app.prepare(0)
92
+
93
+ model_path = 'inswapper_128.onnx'
94
+ sess_swap = rt.InferenceSession(model_path, self.sess_options, providers=self.providers)
95
+ self.face_swapper = INSwapper(model_path,sess_swap)
96
+
97
+ def prepare_faces(self, faces):
98
+ self.replacement_faces=[]
99
+ for face in faces:
100
+ #image1 = cv2.imread(face.origin)
101
+ if "origin" in face:
102
+ face_threshold = face['threshold']
103
+ bboxes1, kpss1 = self.face_detector.autodetect(face['origin'], max_num=1)
104
+ if len(kpss1)<1:
105
+ raise Exception('No face detected on "Face to replace" image')
106
+ feat_original = self.rec_app.get(face['origin'], kpss1[0])
107
+ else:
108
+ face_threshold = 0
109
+ self.first_face = True
110
+ feat_original = None
111
+ print('No origin image: First face change')
112
+ #image2 = cv2.imread(face.destination)
113
+ _faces = self.__get_faces(face['destination'],max_num=1)
114
+ if len(_faces)<1:
115
+ raise Exception('No face detected on "Destination face" image')
116
+ self.replacement_faces.append((feat_original,_faces[0],face_threshold))
117
+
118
+ def __convert_video(self,video_path,output_video_path):
119
+ if self.video_has_audio:
120
+ print("Merging audio with the refaced video...")
121
+ new_path = output_video_path + str(random.randint(0,999)) + "_c.mp4"
122
+ #stream = ffmpeg.input(output_video_path)
123
+ in1 = ffmpeg.input(output_video_path)
124
+ in2 = ffmpeg.input(video_path)
125
+ out = ffmpeg.output(in1.video, in2.audio, new_path,video_bitrate=self.ffmpeg_video_bitrate,vcodec=self.ffmpeg_video_encoder)
126
+ out.run(overwrite_output=True,quiet=True)
127
+ else:
128
+ new_path = output_video_path
129
+ print("The video doesn't have audio, so post-processing is not necessary")
130
+
131
+ print(f"The process has finished.\nThe refaced video can be found at {os.path.abspath(new_path)}")
132
+ return new_path
133
+
134
+ def __get_faces(self,frame,max_num=0):
135
+
136
+ bboxes, kpss = self.face_detector.detect(frame,max_num=max_num,metric='default')
137
+
138
+ if bboxes.shape[0] == 0:
139
+ return []
140
+ ret = []
141
+ for i in range(bboxes.shape[0]):
142
+ bbox = bboxes[i, 0:4]
143
+ det_score = bboxes[i, 4]
144
+ kps = None
145
+ if kpss is not None:
146
+ kps = kpss[i]
147
+ face = Face(bbox=bbox, kps=kps, det_score=det_score)
148
+ face.embedding = self.rec_app.get(frame, kps)
149
+ ret.append(face)
150
+ return ret
151
+
152
+ def process_first_face(self,frame):
153
+ faces = self.__get_faces(frame,max_num=1)
154
+ if len(faces) != 0:
155
+ frame = self.face_swapper.get(frame, faces[0], self.replacement_faces[0][1], paste_back=True)
156
+ return frame
157
+
158
+ def process_faces(self,frame):
159
+ faces = self.__get_faces(frame,max_num=0)
160
+ for rep_face in self.replacement_faces:
161
+ for i in range(len(faces) - 1, -1, -1):
162
+ sim = self.rec_app.compute_sim(rep_face[0], faces[i].embedding)
163
+ if sim>=rep_face[2]:
164
+ frame = self.face_swapper.get(frame, faces[i], rep_face[1], paste_back=True)
165
+ del faces[i]
166
+ break
167
+ return frame
168
+
169
+ def __check_video_has_audio(self,video_path):
170
+ self.video_has_audio = False
171
+ probe = ffmpeg.probe(video_path)
172
+ audio_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'audio'), None)
173
+ if audio_stream is not None:
174
+ self.video_has_audio = True
175
+
176
+ def reface_group(self, faces, frames, output):
177
+ with ThreadPoolExecutor(max_workers = self.use_num_cpus) as executor:
178
+ if self.first_face:
179
+ results = list(tqdm(executor.map(self.process_first_face, frames), total=len(frames),desc="Processing frames"))
180
+ else:
181
+ results = list(tqdm(executor.map(self.process_faces, frames), total=len(frames),desc="Processing frames"))
182
+ for result in results:
183
+ output.write(result)
184
+
185
+ def reface(self, video_path, faces):
186
+ self.__check_video_has_audio(video_path)
187
+ output_video_path = os.path.join('out',Path(video_path).name)
188
+ self.prepare_faces(faces)
189
+
190
+ cap = cv2.VideoCapture(video_path)
191
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
192
+ print(f"Total frames: {total_frames}")
193
+
194
+ fps = cap.get(cv2.CAP_PROP_FPS)
195
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
196
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
197
+
198
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
199
+ output = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))
200
+
201
+ frames=[]
202
+ self.k = 1
203
+ with tqdm(total=total_frames,desc="Extracting frames") as pbar:
204
+ while cap.isOpened():
205
+ flag, frame = cap.read()
206
+ if flag and len(frame)>0:
207
+ frames.append(frame.copy())
208
+ pbar.update()
209
+ else:
210
+ break
211
+ if (len(frames) > 1000):
212
+ self.reface_group(faces,frames,output)
213
+ frames=[]
214
+
215
+ cap.release()
216
+ pbar.close()
217
+
218
+ self.reface_group(faces,frames,output)
219
+ frames=[]
220
+ output.release()
221
+
222
+ return self.__convert_video(video_path,output_video_path)
223
+
224
+ def __try_ffmpeg_encoder(self, vcodec):
225
+ print(f"Trying FFMPEG {vcodec} encoder")
226
+ command = ['ffmpeg', '-y', '-f','lavfi','-i','testsrc=duration=1:size=1280x720:rate=30','-vcodec',vcodec,'testsrc.mp4']
227
+ try:
228
+ subprocess.run(command, check=True, capture_output=True).stderr
229
+ except subprocess.CalledProcessError as e:
230
+ print(f"FFMPEG {vcodec} encoder doesn't work -> Disabled.")
231
+ return False
232
+ print(f"FFMPEG {vcodec} encoder works")
233
+ return True
234
+
235
+ def __check_encoders(self):
236
+ self.ffmpeg_video_encoder='libx264'
237
+ self.ffmpeg_video_bitrate='0'
238
+
239
+ pattern = r"encoders: ([a-zA-Z0-9_]+(?: [a-zA-Z0-9_]+)*)"
240
+ command = ['ffmpeg', '-codecs', '--list-encoders']
241
+ commandout = subprocess.run(command, check=True, capture_output=True).stdout
242
+ result = commandout.decode('utf-8').split('\n')
243
+ for r in result:
244
+ if "264" in r:
245
+ encoders = re.search(pattern, r).group(1).split(' ')
246
+ for v_c in Refacer.VIDEO_CODECS:
247
+ for v_k in encoders:
248
+ if v_c == v_k:
249
+ if self.__try_ffmpeg_encoder(v_k):
250
+ self.ffmpeg_video_encoder=v_k
251
+ self.ffmpeg_video_bitrate=Refacer.VIDEO_CODECS[v_k]
252
+ print(f"Video codec for FFMPEG: {self.ffmpeg_video_encoder}")
253
+ return
254
+
255
+ VIDEO_CODECS = {
256
+ 'h264_videotoolbox':'0', #osx HW acceleration
257
+ 'h264_nvenc':'0', #NVIDIA HW acceleration
258
+ #'h264_qsv', #Intel HW acceleration
259
+ #'h264_vaapi', #Intel HW acceleration
260
+ #'h264_omx', #HW acceleration
261
+ 'libx264':'0' #No HW acceleration
262
+ }
requirements-COREML.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ffmpeg_python==0.2.0
2
+ gradio==3.33.1
3
+ insightface==0.7.3
4
+ numpy==1.24.3
5
+ onnx==1.14.0
6
+ onnxruntime-silicon
7
+ opencv_python==4.7.0.72
8
+ opencv_python_headless==4.7.0.72
9
+ scikit-image==0.20.0
10
+ tqdm
11
+ psutil
12
+ ngrok
requirements-GPU.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ffmpeg_python==0.2.0
2
+ gradio==3.33.1
3
+ insightface==0.7.3
4
+ numpy==1.24.3
5
+ onnx==1.14.0
6
+ onnxruntime_gpu==1.15.0
7
+ opencv_python==4.7.0.72
8
+ opencv_python_headless==4.7.0.72
9
+ scikit-image==0.20.0
10
+ tqdm
11
+ psutil
12
+ ngrok
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ffmpeg_python==0.2.0
2
+ gradio==3.33.1
3
+ insightface==0.7.3
4
+ numpy==1.24.3
5
+ onnx==1.14.0
6
+ onnxruntime==1.15.0
7
+ opencv_python==4.7.0.72
8
+ opencv_python_headless==4.7.0.72
9
+ scikit-image==0.20.0
10
+ tqdm
11
+ psutil
12
+ ngrok
script.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from refacer import Refacer
2
+ from os.path import exists
3
+ import argparse
4
+ import cv2
5
+
6
+ parser = argparse.ArgumentParser(description='Refacer')
7
+ parser.add_argument("--force_cpu", help="Force CPU mode", default=False, action="store_true")
8
+ parser.add_argument("--colab_performance", help="Use in colab for better performance", default=False,action="store_true")
9
+ parser.add_argument("--face", help="Face to replace (ex: <src>,<dst>,<thresh=0.2>)", nargs='+', action="append", required=True)
10
+ parser.add_argument("--video", help="Video to parse", required=True)
11
+ args = parser.parse_args()
12
+
13
+ refacer = Refacer(force_cpu=args.force_cpu,colab_performance=args.colab_performance)
14
+
15
+ def run(video_path,faces):
16
+ video_path_exists = exists(video_path)
17
+ if video_path_exists == False:
18
+ print ("Can't find " + video_path)
19
+ return
20
+
21
+ faces_out = []
22
+ for face in faces:
23
+ face_str = face[0].split(",")
24
+ origin = exists(face_str[0])
25
+ if origin == False:
26
+ print ("Can't find " + face_str[0])
27
+ return
28
+ destination = exists(face_str[1])
29
+ if destination == False:
30
+ print ("Can't find " + face_str[1])
31
+ return
32
+
33
+ faces_out.append({
34
+ 'origin':cv2.imread(face_str[0]),
35
+ 'destination':cv2.imread(face_str[1]),
36
+ 'threshold':float(face_str[2])
37
+ })
38
+
39
+ return refacer.reface(video_path,faces_out)
40
+
41
+ run(args.video, args.face)