import argparse import shutil import av import os import cv2 import sys import time import multiprocessing import tkinter as tk from tkinter import filedialog from concurrent.futures import ThreadPoolExecutor from PIL import Image import numpy as np from collections import defaultdict from waifuc.action import MinSizeFilterAction, PersonSplitAction from waifuc.export import SaveExporter, TextualInversionExporter from waifuc.source import LocalSource from tqdm import tqdm import logging # 配置日志 logging.basicConfig(filename='video_image_processing.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') def select_folder(): """ 弹出文件夹选择对话框,返回选择的文件夹路径。 """ root = tk.Tk() root.withdraw() # 隐藏主窗口 folder_path = filedialog.askdirectory(title="选择视频文件夹") return folder_path def create_output_folder(folder_path, extra_name): """ 创建输出文件夹,文件夹名称为原名称加上额外的后缀。 参数: folder_path (str): 原文件夹路径。 extra_name (str): 要添加到文件夹名称后的字符串。 返回: str: 新创建的文件夹路径。 """ folder_name = os.path.basename(folder_path) new_folder_name = f"{folder_name}{extra_name}" new_folder_path = os.path.join(folder_path, new_folder_name) os.makedirs(new_folder_path, exist_ok=True) return new_folder_path def find_video_files(folder_path): """ 在指定文件夹及其子文件夹中查找所有视频文件。 参数: folder_path (str): 文件夹路径。 返回: list: 视频文件的完整路径列表。 """ video_extensions = ('.mp4', '.avi', '.mov', '.mkv', '.flv', '.wmv') video_files = [] for root, dirs, files in os.walk(folder_path): for file in files: if file.lower().endswith(video_extensions): video_files.append(os.path.join(root, file)) return video_files def process_video(video_file, new_folder_path, frame_step=5): """ 处理视频文件,提取帧,计算哈希和清晰度,保存符合条件的帧。 参数: video_file (str): 视频文件路径。 new_folder_path (str): 保存提取帧的文件夹路径。 frame_step (int): 帧步长,每隔多少帧处理一次。 """ def compute_phash(image): resized = cv2.resize(image, (32, 32), interpolation=cv2.INTER_AREA) gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY) dct = cv2.dct(np.float32(gray)) dct_low = dct[:8, :8] med = np.median(dct_low) return (dct_low > med).flatten() def compute_sharpness(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 使用 Sobel 算子计算梯度 grad_x = cv2.Sobel(gray, cv2.CV_16S, 1, 0) grad_y = cv2.Sobel(gray, cv2.CV_16S, 0, 1) # 计算梯度的绝对值和 sharpness = cv2.mean(np.abs(grad_x) + np.abs(grad_y))[0] return sharpness def save_frame(image, frame_count): image_name = f'{os.path.splitext(os.path.basename(video_file))[0]}-{frame_count:08d}.jpg' image_path = os.path.join(new_folder_path, image_name) cv2.imwrite(image_path, image, [cv2.IMWRITE_JPEG_QUALITY, 90]) # 打开视频文件 container = av.open(video_file) video = container.streams.video[0] # 尝试启用硬件加速 try: video.codec_context.options = {'hwaccel': 'auto'} except Exception as e: print(f"无法启用硬件加速: {e}") logging.warning(f"无法启用硬件加速: {e}") start_time = time.time() frame_count = 0 saved_count = 0 sharpness_threshold = 15 # 清晰度阈值 reference_image = None reference_phash = None reference_sharpness = None reference_count = 0 for frame in tqdm(container.decode(video=0), desc=f"处理视频 {os.path.basename(video_file)}"): if frame_count % frame_step != 0: frame_count += 1 continue # 跳过不需要处理的帧 image = frame.to_ndarray(format='bgr24') phash = compute_phash(image) sharpness = compute_sharpness(image) if sharpness < sharpness_threshold: frame_count += 1 continue # 跳过模糊帧 if reference_image is None: # 初始化参考帧 reference_image = image reference_phash = phash reference_sharpness = sharpness reference_count = frame_count else: hamming_dist = np.sum(phash != reference_phash) if hamming_dist > 10: # 与参考帧差异较大,保存参考帧 save_frame(reference_image, reference_count) saved_count += 1 # 更新参考帧 reference_image = image reference_phash = phash reference_sharpness = sharpness reference_count = frame_count else: # 与参考帧相似,比较清晰度 if sharpness > reference_sharpness: # 当前帧更清晰,更新参考帧 reference_image = image reference_phash = phash reference_sharpness = sharpness reference_count = frame_count # 否则,保留原参考帧 frame_count += 1 # 保存最后的参考帧 if reference_image is not None: save_frame(reference_image, reference_count) saved_count += 1 total_time = time.time() - start_time average_fps = frame_count / total_time if total_time > 0 else 0 print(f'\n{os.path.basename(video_file)} 处理完成: 总共 {frame_count} 帧, 保存 {saved_count} 帧, 平均 {average_fps:.2f} 帧/秒') logging.info(f'{os.path.basename(video_file)} 处理完成: 总共 {frame_count} 帧, 保存 {saved_count} 帧, 平均 {average_fps:.2f} 帧/秒') def process_images_folder(new_folder_path): """ 处理保存的图像文件,去除相似的重复图片,仅保留最清晰的。 参数: new_folder_path (str): 图像文件夹路径。 返回: set: 保留的图像文件路径集合。 """ def get_image_files(folder_path): image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png'))] print(f'总共找到 {len(image_files)} 张图片') logging.info(f'总共找到 {len(image_files)} 张图片') return image_files def process_images(image_files): def compute_phash(image): resized = cv2.resize(image, (32, 32), interpolation=cv2.INTER_AREA) gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY) dct = cv2.dct(np.float32(gray)) dct_low = dct[:8, :8] med = np.median(dct_low) return (dct_low > med).flatten() def compute_sharpness(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) return cv2.Laplacian(gray, cv2.CV_64F).var() def process_single_image(image_path): image = cv2.imread(image_path) if image is None: error_message = f"无法读取图像文件 {image_path}" print(f"警告:{error_message}") logging.warning(error_message) return None try: phash = compute_phash(image) sharpness = compute_sharpness(image) return image_path, phash, sharpness except Exception as e: error_message = f"处理图像时出错 {image_path}: {e}" print(f"警告:{error_message}") logging.warning(error_message) return None image_data = {} start_time = time.time() with ThreadPoolExecutor() as executor: futures = [executor.submit(process_single_image, img) for img in image_files] for future in tqdm(futures, desc="计算哈希和清晰度", unit="张"): result = future.result() if result is not None: image_path, phash, sharpness = result image_data[image_path] = {'phash': phash, 'sharpness': sharpness} elapsed_time = time.time() - start_time print(f'\n图片处理完成,耗时 {elapsed_time:.2f} 秒') logging.info(f'图片处理完成,耗时 {elapsed_time:.2f} 秒') return image_data def compare_images(image_data): similar_groups = {} hash_buckets = defaultdict(list) # 将哈希值转换为字符串,并取前几位作为桶的键 for image_path, data in image_data.items(): hash_str = ''.join(data['phash'].astype(int).astype(str)) bucket_key = hash_str[:16] # 取前16位作为桶的键,可根据需要调整 hash_buckets[bucket_key].append((image_path, data)) total_buckets = len(hash_buckets) print(f"总共划分为 {total_buckets} 个哈希桶") logging.info(f"总共划分为 {total_buckets} 个哈希桶") # 遍历每个桶,比较桶内的图片 for bucket_key, bucket in tqdm(hash_buckets.items(), desc="比较哈希桶", unit="桶"): paths = [item[0] for item in bucket] hashes = np.array([item[1]['phash'] for item in bucket]) for i in range(len(paths)): for j in range(i + 1, len(paths)): dist = np.sum(hashes[i] != hashes[j]) if dist <= 10: # 阈值,可根据需要调整 similar_groups.setdefault(paths[i], []).append(paths[j]) return similar_groups def select_images_to_keep(similar_groups, image_data): to_keep = set() processed_groups = set() for group_key, group in similar_groups.items(): if group_key in processed_groups: continue group_with_key = [group_key] + group sharpest = max(group_with_key, key=lambda x: image_data[x]['sharpness']) to_keep.add(sharpest) processed_groups.update(group_with_key) # 将不在任何相似组中的图片也加入保留列表 all_images = set(image_data.keys()) images_in_groups = set().union(*[set([k] + v) for k, v in similar_groups.items()]) images_not_in_groups = all_images - images_in_groups to_keep.update(images_not_in_groups) return to_keep def delete_duplicate_images(similar_groups, to_keep): deleted_count = 0 to_delete = set() # 收集所有需要删除的图片 for group_key, similar_images in similar_groups.items(): group_with_key = [group_key] + similar_images for image_path in group_with_key: if image_path not in to_keep: to_delete.add(image_path) total_to_delete = len(to_delete) # 删除图片 for image_path in tqdm(to_delete, desc="删除重复图片", unit="张"): try: os.remove(image_path) deleted_count += 1 except Exception as e: print(f"\n无法删除 {image_path}: {e}") logging.error(f"无法删除 {image_path}: {e}") print(f'\n去重完成,保留 {len(to_keep)} 张图片,成功删除 {deleted_count} 张重复图片') logging.info(f'去重完成,保留 {len(to_keep)} 张图片,成功删除 {deleted_count} 张重复图片') return deleted_count # 开始执行去重流程 image_files = get_image_files(new_folder_path) image_data = process_images(image_files) similar_groups = compare_images(image_data) to_keep = select_images_to_keep(similar_groups, image_data) deleted_count = delete_duplicate_images(similar_groups, to_keep) def waifuc_split(new_folder_path, split_path): """ 使用 waifuc 库对图像进行分割,提取人物部分。 参数: new_folder_path (str): 原始图像文件夹路径。 split_path (str): 分割后图像的保存路径。 """ # 直接使用目录路径初始化 LocalSource s = LocalSource(new_folder_path) s = s.attach( PersonSplitAction(), MinSizeFilterAction(300), ) s.export(SaveExporter(split_path, no_meta=True)) def process_split_images(new_folder_path, split_path): """ 将没有检测到人物的原始图像移动到指定的无人文件夹。 参数: new_folder_path (str): 原始图像文件夹路径。 split_path (str): 分割后图像的保存路径。 """ nohuman_path = create_output_folder(new_folder_path, "-nohuman") # 获取去重后的原始图片列表 original_images = [f for f in os.listdir(new_folder_path) if os.path.isfile(os.path.join(new_folder_path, f)) and f.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))] split_images = [f for f in os.listdir(split_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))] total_images = len(original_images) moved_count = 0 for original_image in tqdm(original_images, desc="处理无人图片", unit="张"): base_name = os.path.splitext(original_image)[0] has_person = any(split_image.startswith(base_name + '_person') for split_image in split_images) if not has_person: source_path = os.path.join(new_folder_path, original_image) dest_path = os.path.join(nohuman_path, original_image) try: shutil.move(source_path, dest_path) moved_count += 1 except Exception as e: print(f"\n无法移动 {source_path}: {e}") logging.error(f"无法移动 {source_path}: {e}") print(f'\n处理完成。总共处理 {total_images} 张图片, 移动了 {moved_count} 张无人图片到 {nohuman_path}') logging.info(f'处理完成。总共处理 {total_images} 张图片, 移动了 {moved_count} 张无人图片到 {nohuman_path}') def main(): """ 主函数,执行整个处理流程。 """ folder_path = select_folder() if not folder_path: print("未选择文件夹,程序退出。") logging.error("未选择文件夹,程序退出。") return video_files = find_video_files(folder_path) if not video_files: print("所选文件夹中未找到视频文件,程序退出。") logging.error("所选文件夹中未找到视频文件,程序退出。") return # 创建保存提取帧的文件夹 new_folder_path = create_output_folder(folder_path, "-Eng_SS") # 处理每个视频文件 for video_file in video_files: print(f"开始处理视频文件: {video_file}") logging.info(f"开始处理视频文件: {video_file}") process_video(video_file, new_folder_path, frame_step=5) # 设置帧步长 # 去除相似的重复图片(第一次) process_images_folder(new_folder_path) # 去除相似的重复图片(第二次) process_images_folder(new_folder_path) # 创建保存分割后图像的文件夹 split_path = create_output_folder(new_folder_path, "-split") # 使用 waifuc 库进行人物分割 waifuc_split(new_folder_path, split_path) # 移动没有检测到人物的图像到无人文件夹 process_split_images(new_folder_path, split_path) if __name__ == "__main__": main()