import os from pathlib import Path import colorsys import numpy as np import torch import torchvision from einops import rearrange from torch.utils.data import Dataset from PIL import Image as PIL_Image from PIL import ImageDraw from torchvision import transforms import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation import matplotlib import itertools torchvision.disable_beta_transforms_warning() import torchvision.transforms.v2 as transforms from torchvision.tv_tensors import (BoundingBoxes, BoundingBoxFormat, Image, Mask) from flow_viz import flow_to_image categories = ["Action Figures", "Bag", "Board Games", "Bottles and Cans and Cups", "Camera", "Car Seat", "Consumer Goods", "Hat", "Headphones", "Keyboard", "Legos", "Media Cases", "Mouse", "None", "Shoe", "Stuffed Toys", "Toys"] def save_tensor_dict(tensor_dict: dict, path: Path): output_dict = {} for k, v in tensor_dict.items(): output_dict[k] = v np.savez_compressed(path, **output_dict) def get_n_distinct_colors(n): def HSVToRGB(h, s, v): (r, g, b) = colorsys.hsv_to_rgb(h, s, v) return (int(255 * r), int(255 * g), int(255 * b)) huePartition = 1.0 / (n + 1) return (HSVToRGB(huePartition * value, 1.0, 1.0) for value in range(0, n)) def get_layered_image_from_binary_mask(masks, flip=False): if torch.is_tensor(masks): masks = masks.float().cpu().detach().numpy() if flip: masks = np.flipud(masks) masks = masks.astype(np.bool_) colors = np.asarray(list(get_n_distinct_colors(masks.shape[2]))) img = np.zeros((*masks.shape[:2], 3)) for i in range(masks.shape[2]): img[masks[..., i]] = colors[i] return Image.fromarray(img.astype(np.uint8)) def get_max_neighboring_depth(depth, coords): S, H, W, _ = depth.shape num_pts = coords.shape[1] f_idx = np.tile(np.arange(S)[:, np.newaxis], (1, num_pts)) x = coords[:, :, 0] y = coords[:, :, 1] x0 = x.astype(int) x1 = x0 + 1 y0 = y.astype(int) y1 = y0 + 1 x0 = np.clip(x0, 0, W-1) x1 = np.clip(x1, 0, W-1) y0 = np.clip(y0, 0, H-1) y1 = np.clip(y1, 0, H-1) sampled_depth = np.max(np.concatenate([depth[f_idx, y0, x0], depth[f_idx, y0, x1], depth[f_idx, y1, x0], depth[f_idx, y1, x1]], axis=-1), axis=-1) return sampled_depth class StandaloneMoviDataset(Dataset): def __init__(self, root: Path, dataset: str, split='train', num_frames = 24, augment=False, num_dataset_frames=24, resolution=(512, 512), normalize_img: bool = True, **kwargs): super(StandaloneMoviDataset, self).__init__() self.root = Path(root) # Path to the dataset containing folders of "movi_a", "movi_e", etc. self.dataset = dataset # str of dataset name (e.g. "movi_a") self.split = split # str of split name (e.g. "train", "validation") self.resolution = tuple(resolution) self.root_dir = self.root / self.dataset / split self.files = os.listdir(self.root_dir) self.files.sort() print(self.files) self.num_dataset_frames = num_dataset_frames self.num_frames = num_frames self.augment = augment self.normalize_img = normalize_img if self.augment: self.transform = transforms.Compose([transforms.RandomResizedCrop(self.resolution, scale=(0.5, 1.0), antialias=True)]) else: self.transform = transforms.Compose([transforms.Resize(self.resolution, antialias=True)]) def __getitem__(self, index): video_idx = index camera_idx = 0 clip_len = 10 start = 6 stride = 1 path = self.files[video_idx] data = np.load(self.root_dir / path / "data.npz") rgb = data["rgb"][camera_idx][start::stride][:clip_len] instance = data["segment"][camera_idx][start::stride][:clip_len] depth = data["depth"][camera_idx][start::stride][:clip_len] f_flow = data["forward_flow"][camera_idx][start::stride][:clip_len] object_coordinates = data["object_coordinates"][camera_idx][start::stride][:clip_len] quaternions = data["quaternions"][camera_idx][start::stride][:clip_len] # (23, 4) positions = data["positions"][camera_idx][start::stride][:clip_len] # (23, 3) valid = data["valid"][camera_idx, :].squeeze(0) # (23, ) categories = data["categories"][camera_idx, :].squeeze(0) # (23, ) bboxes_3d = data['bboxes_3d'][camera_idx][valid][:, start::stride][:, :clip_len] intrinsics = data['intrinsics'][camera_idx][start::stride][:clip_len] matrix_world = data['matrix_world'][camera_idx][start::stride][:clip_len] cam_pos = data['cam_positions'][camera_idx][start::stride][:clip_len] rgb = rearrange(rgb, '... h w c -> ... c h w') instance = Mask(instance.squeeze(-1)) rgb, instance = self.transform(Image(rgb), instance) rgb = rearrange(rgb, '... c h w -> ... h w c') rgb = rgb.numpy() instance = torch.nn.functional.one_hot(instance.long(), num_classes=21).numpy() mask_valid = instance.sum((1,2)) > 0 num_objects = (mask_valid.sum(0) > 0).sum() ret = { "vid_name": path, "image": rgb, "depth": depth, "flow": f_flow, 'segmentation': instance, 'object_coordinates': object_coordinates, "mask_valid": mask_valid, "num_objs": num_objects, 'categories': categories, 'valid': valid, 'bboxes_3d': bboxes_3d, 'intrinsics': intrinsics, 'matrix_world': matrix_world, 'cam_positions': cam_pos, } return ret def __len__(self): return len(self.files) def project_point(cam_matrix_world, cam_intrinsics, point3d, image_size): """Compute the image space coordinates [0, 1] for a set of points. Args: cam: The camera parameters, as returned by kubric. 'matrix_world' and 'intrinsics' have a leading axis num_frames. point3d: Points in 3D world coordinates. it has shape [num_frames, num_points, 3]. num_frames: The number of frames in the video. Returns: Image coordinates in 2D. The last coordinate is an indicator of whether the point is behind the camera. """ homo_transform = np.linalg.inv(cam_matrix_world) homo_intrinsics = np.zeros((cam_intrinsics.shape[0], 3, 1), dtype=np.float32) homo_intrinsics = np.concatenate([cam_intrinsics, homo_intrinsics], axis=2) point4d = np.concatenate([point3d, np.ones_like(point3d[:, :, 0:1])], axis=2) projected = point4d @ np.transpose(homo_transform, (0, 2, 1)) projected = projected @ np.transpose(homo_intrinsics, (0, 2, 1)) image_coords = projected / projected[:, :, 2:3] # image_coords = np.concatenate( # [image_coords[:, :, :2], # np.sign(projected[:, :, 2:])], axis=2) image_coords = image_coords[:, :, 0:2] * np.array(image_size[::-1])[np.newaxis, np.newaxis, :] return image_coords def get_trajs(data): shp = data['image'].shape num_frames = shp[0] vid_pix_pts = [] vid_3d_pts = [] vid_rgb_pts = [] vid_2d_pts = [] occ_pts = [] # x inward, y left, z up (kubrics) # x right, y up, z out (gaussian) frame_idx = 0 bboxes_3d = data['bboxes_3d'] obj_coords = data['object_coordinates'][frame_idx] grid_x, grid_y = np.meshgrid(np.arange(0, shp[2]), np.arange(0, shp[1]), indexing='ij') pix_coords = np.stack([grid_x, grid_y], axis=-1) for obj_idx in range(bboxes_3d.shape[0]): obj_mask = (data['segmentation'][frame_idx][:, :, obj_idx+1]) bbox_3d = bboxes_3d[obj_idx] coord_box = list(itertools.product([-.5, .5], [-.5, .5], [-.5, .5])) coord_box = np.array([np.array(x) for x in coord_box]) coord_box = np.concatenate([coord_box, np.ones_like(coord_box[:, 0:1])], axis=1) coord_box = np.tile(coord_box[np.newaxis, ...], [num_frames, 1, 1]) bbox_homo = np.concatenate([bbox_3d, np.ones_like(bbox_3d[:, :, 0:1])], axis=2) local_to_world = np.stack([np.linalg.lstsq(coord_box.astype(np.float32)[i], bbox_homo[i], rcond=None)[0] for i in range(num_frames)]) obj_3d_coords = obj_coords[obj_mask.astype(bool)] obj_3d_coords = obj_3d_coords / np.iinfo(np.uint16).max - .5 obj_homo_coords = np.concatenate([obj_3d_coords, np.ones_like(obj_3d_coords[:, 0:1])], axis=1).astype(np.float32) obj_3d_world_coords = np.stack([obj_homo_coords @ local_to_world[i] for i in range(num_frames)]) obj_3d_world_coords = obj_3d_world_coords[:, :, 0:3] / obj_3d_world_coords[:, :, 3:] proj_depth = np.sqrt( np.sum( np.square(obj_3d_world_coords - data['cam_positions'][:, np.newaxis, :]), axis=2, ),) pt_rgb = np.tile(data['image'][0][obj_mask.astype(bool)], [num_frames, 1, 1]) pix_pts = pix_coords[obj_mask.astype(bool)] obj_2d_pix_coords = project_point(data['matrix_world'], data['intrinsics'], obj_3d_world_coords, (shp[1], shp[2])) pix_depth = get_max_neighboring_depth(data['depth'], obj_2d_pix_coords) occluded = pix_depth < 0.99 * proj_depth vid_3d_pts.append(obj_3d_world_coords) vid_rgb_pts.append(pt_rgb) vid_2d_pts.append(obj_2d_pix_coords) occ_pts.append(occluded) vid_pix_pts.append(pix_pts) vid_3d_pts = np.concatenate(vid_3d_pts, axis=1) vid_rgb_pts = np.concatenate(vid_rgb_pts, axis=1) vid_2d_pts = np.concatenate(vid_2d_pts, axis=1) occ_pts = np.concatenate(occ_pts, axis=1) vid_pix_pts = np.concatenate(vid_pix_pts, axis=0) return vid_3d_pts, vid_rgb_pts, vid_2d_pts, occ_pts, vid_pix_pts def vis_data(images, vid_3d_pts, vid_rgb_pts, vid_2d_pts, occ_pts, shp, vis_out_path, v_idx=0): num_frames = shp[0] fig = plt.figure() ax = fig.add_subplot(111, projection = '3d') ax.set_xlim(-7, 7) ax.set_ylim(-7, 7) ax.set_zlim(-1, 3) def update(t): ax.cla() ax.scatter(vid_3d_pts[t][:, 0], vid_3d_pts[t][:, 1], vid_3d_pts[t][:, 2], c = vid_rgb_pts[t]/255.0, s = 0.25) ax.set_xlim(-7, 7) ax.set_ylim(-7, 7) ax.set_zlim(-1, 3) ani = FuncAnimation(fig = fig, func = update, frames = num_frames, interval = 1000) writervideo = matplotlib.animation.PillowWriter(fps=5) ani.save(os.path.join(vis_out_path, f'orig_{v_idx:03d}.gif'), writer=writervideo) num_pts = vid_2d_pts.shape[1] vis_pt_count = 75 rand_pt_idxes = np.random.randint(0, num_pts, vis_pt_count) rand_colors = np.random.randint(0, 256, (vis_pt_count, 3)) trajs = [] for t in range(num_frames): traj_vis = PIL_Image.new('RGBA', (shp[1], shp[2])) draw = ImageDraw.Draw(traj_vis) t_idx = t prev_t_idx = t-1 if t > 0 else 0 for pt, prev_pt, occ, color in zip(vid_2d_pts[t_idx][rand_pt_idxes], vid_2d_pts[prev_t_idx][rand_pt_idxes], occ_pts[t_idx][rand_pt_idxes], rand_colors): if occ: segments = 5 delta_x = (pt[0] - prev_pt[0]) / (2*segments-1) delta_y = (pt[1] - prev_pt[1]) / (2*segments-1) for i in range(segments): draw.line((prev_pt[0]+delta_x*2*i, prev_pt[1]+delta_y*2*i, prev_pt[0]+delta_x*(2*i+1), prev_pt[1]+delta_y*(2*i+1)), tuple(color), width=3) else: draw.line((pt[0], pt[1], prev_pt[0], prev_pt[1]), tuple(color), width=3) if len(trajs) > 0: traj_vis.paste(trajs[-1], mask=trajs[-1]) trajs.append(traj_vis) traj_image_list = [] for t in range(num_frames): traj_out = PIL_Image.fromarray(np.uint8(images[t])).convert("RGBA") traj_out.paste(trajs[t], mask=trajs[t]) traj_image_list.append(traj_out) traj_image_list[0].save(os.path.join(vis_out_path, f"2d_{v_idx:03d}.gif"), save_all=True, append_images=[traj_image_list[i] for i in range(1, num_frames)], duration=200, loop=0) PIL_Image.fromarray(images[0]).save(os.path.join(vis_out_path, f"kubric_{v_idx:03d}.gif"), save_all=True, append_images=[PIL_Image.fromarray(images[i]) for i in range(1, num_frames)], duration=200, loop=0) plt.close('all') if __name__ == "__main__": ROOT_PATH = '/projects/katefgroup/datasets/gs_kubric' dataset = StandaloneMoviDataset(root='/projects/katefgroup/datasets/gs_kubric', dataset='for_splatting2', split='subset_50', augment=False, num_frames=24) OUT_PATH = os.path.join('/projects/katefgroup/datasets/gs_kubric', 'InpaintingFormat_Set50') VIS_OUT_PATH = os.path.join('/projects/katefgroup/datasets/gs_kubric', 'vis') os.makedirs(VIS_OUT_PATH, exist_ok=True) midas = torch.hub.load("intel-isl/MiDaS", "DPT_Hybrid") midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") midas.to(device) midas.eval() midas_transform = midas_transforms.dpt_transform for i in range(len(dataset)): entry = dataset[i] num_frames = entry['image'].shape[0] vid_3d_pts, vid_rgb_pts, vid_2d_pts, occ_pts, vid_pix_pts = get_trajs(entry) trajs_dict = { '3d_traj': vid_3d_pts, '3d_rgb': vid_rgb_pts, '2d_traj': vid_2d_pts, 'occluded': occ_pts, 'pix_pts': vid_pix_pts, } save_tensor_dict(trajs_dict, os.path.join(dataset.root_dir, dataset.files[i], 'traj_data.npz')) vis_data(entry['image'], vid_3d_pts, vid_rgb_pts, vid_2d_pts, occ_pts, entry['image'].shape, VIS_OUT_PATH, i) os.makedirs(os.path.join(OUT_PATH, "JPEGImages", entry['vid_name']), exist_ok=True) os.makedirs(os.path.join(OUT_PATH, "DepthImages", entry['vid_name']), exist_ok=True) os.makedirs(os.path.join(OUT_PATH, "OpticalFlow", entry['vid_name']), exist_ok=True) os.makedirs(os.path.join(OUT_PATH, "OpticalFlowVis", entry['vid_name']), exist_ok=True) vid_occluder_masks = [] for t, frame in enumerate(entry['image']): frame = entry['image'][t] img_save_path = os.path.join(OUT_PATH, "JPEGImages", entry['vid_name'], f'{t:05d}.png') PIL_Image.fromarray(frame).save(img_save_path) depth = entry['depth'][t] depth_save_path = os.path.join(OUT_PATH, "DepthImages", entry['vid_name'], f'{t:05d}.npy') np.save(depth_save_path, depth) flow = entry['flow'][t] flow_save_path = os.path.join(OUT_PATH, "OpticalFlow", entry['vid_name'], f'{t:05d}.npy') np.save(flow_save_path, flow) flow_vis = flow_to_image(flow) flow_vis_save_path = os.path.join(OUT_PATH, "OpticalFlowVis", entry['vid_name'], f'{t:05d}.png') PIL_Image.fromarray(flow_vis).save(flow_vis_save_path) with torch.no_grad(): RESOLUTION = frame.shape[1] prediction = midas(midas_transform(np.array(frame).astype(np.uint8))[0].to(device).unsqueeze(0)) prediction = torch.nn.functional.interpolate( prediction.unsqueeze(1), size=(RESOLUTION, RESOLUTION), mode="bicubic", align_corners=False, ).squeeze() prediction = prediction.reshape(RESOLUTION, RESOLUTION).cpu().numpy() binary_masks = entry['segmentation'][t].transpose(2, 0, 1)[:entry['num_objs']] inst_depth = np.sum(prediction[np.newaxis] * binary_masks, (-2, -1)) / (np.sum(binary_masks, (-2, -1)) + 1e-6) occluder_masks = np.zeros_like(binary_masks) for i in range(entry['num_objs']): if i == 0: occluder_masks[i] = np.sum(binary_masks[1:], axis=0) else: for j in range(entry['num_objs']): if j == 0 or j == i or inst_depth[i] > inst_depth[j]: continue else: if binary_masks[i].sum() == 0 or binary_masks[j].sum() == 0: continue rows = np.any(binary_masks[i], axis=1) cols = np.any(binary_masks[i], axis=0) rmin, rmax = np.where(rows)[0][[0, -1]] cmin, cmax = np.where(cols)[0][[0, -1]] bbox_i = np.clip(np.array([rmin-10, rmax+10, cmin-10, cmax+10]), 0, RESOLUTION) rows = np.any(binary_masks[j], axis=1) cols = np.any(binary_masks[j], axis=0) rmin, rmax = np.where(rows)[0][[0, -1]] cmin, cmax = np.where(cols)[0][[0, -1]] bbox_j = np.clip(np.array([rmin-10, rmax+10, cmin-10, cmax+10]), 0, RESOLUTION) x_left = max(bbox_i[0], bbox_j[0]) y_top = max(bbox_i[2], bbox_j[2]) x_right = min(bbox_i[1], bbox_j[1]) y_bottom = min(bbox_i[3], bbox_j[3]) if x_right < x_left or y_bottom < y_top: continue else: occluder_masks[i] += binary_masks[j] occluder_masks = np.clip(occluder_masks, 0, 1) vid_occluder_masks.append(occluder_masks) for j in range(entry['num_objs']): os.makedirs(os.path.join(OUT_PATH, "Annotations", entry['vid_name'], f'{j:03d}'), exist_ok=True) os.makedirs(os.path.join(OUT_PATH, "OccluderAnnotations", entry['vid_name'], f'{j:03d}'), exist_ok=True) os.makedirs(os.path.join(OUT_PATH, "ClassLabels", entry['vid_name'], f'{j:03d}'), exist_ok=True) for t, frame in enumerate(entry['segmentation'][:, :, :, j]): frame = entry['segmentation'][t, :, :, j] mask_save_path = os.path.join(OUT_PATH, "Annotations", entry['vid_name'], f'{j:03d}', f'{t:05d}.png') mask = np.repeat(frame[:, :, np.newaxis], 3, axis=2)*255 PIL_Image.fromarray(mask.astype(np.uint8)).save(mask_save_path) occ_mask_save_path = os.path.join(OUT_PATH, "OccluderAnnotations", entry['vid_name'], f'{j:03d}', f'{t:05d}.png') mask = vid_occluder_masks[t][j] PIL_Image.fromarray(mask.astype(np.uint8)*255).save(occ_mask_save_path) if j == 0: with open(os.path.join(os.path.join(OUT_PATH, 'ClassLabels', entry['vid_name'], f'{j:03d}', 'class_label.txt')), 'w') as class_file: class_file.write("background") else: with open(os.path.join(os.path.join(OUT_PATH, 'ClassLabels', entry['vid_name'], f'{j:03d}', 'class_label.txt')), 'w') as class_file: if categories[entry['categories'][j]] != 'None': class_file.write(categories[entry['categories'][j]]) else: class_file.write("object") print("Done") from ipdb import set_trace; set_trace()