Spaces:
Sleeping
Sleeping
| import os | |
| import io | |
| import cv2 | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| import matplotlib.animation as animation | |
| from PIL import Image | |
| from math import tau | |
| from concurrent.futures import ThreadPoolExecutor | |
| import gradio as gr | |
| def fourier_transform_drawing(input_image, frames, coefficients, img_size, blur_kernel_size, desired_range, num_points, theta_points): | |
| # Convert PIL to OpenCV image | |
| img = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR) | |
| # Resize the image for faster processing | |
| img = cv2.resize(img, (img_size, img_size), interpolation=cv2.INTER_AREA) | |
| # Image processing | |
| imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
| blurred = cv2.GaussianBlur(imgray, (blur_kernel_size, blur_kernel_size), 0) | |
| _, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU) | |
| contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) | |
| # find the contour with the largest area | |
| # largest_contour_idx = np.argmax([cv2.contourArea(c) for c in contours]) | |
| # largest_contour = contours[largest_contour_idx] | |
| # Combine all contours | |
| def combine_all_contours(contours): | |
| combined_contour = np.array([], dtype=np.int32).reshape(0, 1, 2) | |
| for contour in contours: | |
| combined_contour = np.vstack((combined_contour, contour)) | |
| return combined_contour | |
| combined_contour = combine_all_contours(contours) | |
| verts = [tuple(coord) for coord in combined_contour.squeeze()] | |
| xs, ys = np.asarray(list(zip(*verts))) | |
| # Scale the coordinates | |
| x_range, y_range = np.max(xs) - np.min(xs), np.max(ys) - np.min(ys) | |
| scale_x, scale_y = desired_range / x_range, desired_range / y_range | |
| xs = (xs - np.mean(xs)) * scale_x | |
| ys = (-ys + np.mean(ys)) * scale_y | |
| # Compute Fourier coefficients | |
| t_list = np.linspace(0, tau, len(xs)) | |
| t_values = np.linspace(0, tau, num_points) | |
| f_precomputed = np.interp(t_values, t_list, xs + 1j * ys) | |
| def compute_cn(f_exp, n, t_values): | |
| coef = np.trapz(f_exp * np.exp(-n * t_values * 1j), t_values) / tau | |
| return coef | |
| N = coefficients | |
| indices = [0] + [j for i in range(1, N + 1) for j in (i, -i)] | |
| # Parallel computation of coefficients | |
| with ThreadPoolExecutor(max_workers=8) as executor: | |
| coefs = list(executor.map(lambda n: (compute_cn(f_precomputed, n, t_values), n), indices)) | |
| # Animation setup | |
| fig, ax = plt.subplots() | |
| circles = [ax.plot([], [], 'b-')[0] for _ in range(-N, N + 1)] | |
| circle_lines = [ax.plot([], [], 'g-')[0] for _ in range(-N, N + 1)] | |
| drawing, = ax.plot([], [], 'r-', linewidth=2) | |
| ax.set_xlim(-desired_range, desired_range) | |
| ax.set_ylim(-desired_range, desired_range) | |
| ax.set_axis_off() | |
| ax.set_aspect('equal') | |
| fig.set_size_inches(15, 15) | |
| draw_x, draw_y = [], [] | |
| theta = np.linspace(0, tau, theta_points) | |
| coefs_static = [(np.linalg.norm(c), fr) for c, fr in coefs] | |
| # Animation function | |
| def animate(i, coefs, time): | |
| center = (0, 0) | |
| for idx, (r, fr) in enumerate(coefs_static): | |
| c_dynamic = coefs[idx][0] * np.exp(1j * (fr * tau * time[i])) | |
| x, y = center[0] + r * np.cos(theta), center[1] + r * np.sin(theta) | |
| circle_lines[idx].set_data([center[0], center[0] + np.real(c_dynamic)], [center[1], center[1] + np.imag(c_dynamic)]) | |
| circles[idx].set_data(x, y) | |
| center = (center[0] + np.real(c_dynamic), center[1] + np.imag(c_dynamic)) | |
| draw_x.append(center[0]) | |
| draw_y.append(center[1]) | |
| drawing.set_data(draw_x[:i+1], draw_y[:i+1]) | |
| # Create and save the animation | |
| anim = animation.FuncAnimation(fig, animate, frames=frames, interval=5, fargs=(coefs, np.linspace(0, 1, num=frames))) | |
| output_animation = "output.mp4" | |
| anim.save(output_animation, fps=15) | |
| plt.close(fig) | |
| return output_animation | |
| # Gradio interface setup | |
| interface = gr.Interface( | |
| fn=fourier_transform_drawing, | |
| inputs=[ | |
| gr.Image(label="Input Image", sources=['upload'], type="pil"), | |
| gr.Slider(minimum=5, maximum=500, value=100, label="Number of Frames"), | |
| gr.Slider(minimum=1, maximum=500, value=50, label="Number of Coefficients"), | |
| gr.Number(value=224, label="Image Size (px)", precision=0), | |
| gr.Slider(minimum=3, maximum=11, step=2, value=5, label="Blur Kernel Size (odd number)"), | |
| gr.Number(value=400, label="Desired Range for Scaling", precision=0), | |
| gr.Number(value=1000, label="Number of Points for Integration", precision=0), | |
| gr.Slider(minimum=50, maximum=500, value=80, label="Theta Points for Animation") | |
| ], | |
| outputs=gr.Video(), | |
| title="Fourier Transform Drawing", | |
| description="Upload an image and generate a Fourier Transform drawing animation.", | |
| examples=[["Fourier2.jpg", 100, 200, 224, 5, 400, 1000, 80], ["Luffy.png", 100, 100, 224, 5, 400, 1000, 80]] | |
| ) | |
| if __name__ == "__main__": | |
| interface.launch() |