import gradio as gr import numpy as np from PIL import Image import io from huggingface_hub import hf_hub_download # Download the model model_path = hf_hub_download("dima806/deepfake_vs_real_image_detection", "model.pkl") # Load the model (you might need to adjust this based on how the model is saved) import pickle with open(model_path, 'rb') as f: model = pickle.load(f) def process_frame(frame): # Process the frame using the model # You might need to preprocess the frame to match the model's input requirements result = model.predict(np.array(frame)) return result def process_video(video_file): video = video_file.read() # Use Pillow to open the video file with Image.open(io.BytesIO(video)) as img: # Check if it's an animated image (like GIF) if hasattr(img, 'n_frames') and img.n_frames > 1: results = [] for frame in range(img.n_frames): img.seek(frame) frame_result = process_frame(img.convert('RGB')) results.append(frame_result) else: # If it's a single image, process it once results = [process_frame(img.convert('RGB'))] # Analyze results final_result = analyze_results(results) return final_result def analyze_results(results): # Implement your logic to analyze frame-by-frame results # This is a placeholder implementation return np.mean(results) iface = gr.Interface( fn=process_video, inputs=gr.File(label="Upload Video"), outputs=gr.Text(label="Deepfake Detection Result") ) iface.launch()