File size: 6,354 Bytes
f0d013b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
""" GraspNet dataset processing.
"""
import os
import sys
import numpy as np
import numpy.ma as ma
import scipy.io as scio
from scipy.optimize import linear_sum_assignment
from PIL import Image
from skimage.measure import label, regionprops
import cv2
import torch
from collections import abc as container_abcs
from torch.utils.data import Dataset
from tqdm import tqdm
from torch.utils.data import DataLoader
from time import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
from .data_utils import CameraInfo, transform_point_cloud, create_point_cloud_from_depth_image, \
get_workspace_mask, remove_invisible_grasp_points
import h5py
class GraspNetDataset(Dataset):
def __init__(self, root, valid_obj_idxs, camera='kinect', split='train', remove_invisible=True,
augment=False, limited_data=False, overfitting=False, k_grasps=1, ground_truth_type="topk", caching=True):
self.root = root
self.split = split
self.remove_invisible = remove_invisible
self.valid_obj_idxs = valid_obj_idxs
self.camera = camera
self.augment = augment
self.k_grasps = k_grasps
self.ground_truth_type = ground_truth_type
self.overfitting = overfitting
self.caching = caching
if overfitting:
limited_data = True
self.limited_data = limited_data
if split == 'train':
self.sceneIds = list(range(100))
elif split == 'test':
self.sceneIds = list(range(100, 190))
elif split == 'test_seen':
self.sceneIds = list(range(100, 130))
elif split == 'test_similar':
self.sceneIds = list(range(130, 160))
elif split == 'test_novel':
self.sceneIds = list(range(160, 190))
if limited_data:
self.sceneIds = self.sceneIds[:10]
self.sceneIds = ['scene_{}'.format(str(x).zfill(4)) for x in self.sceneIds]
filename = f"dataset/{split}_labels"
if limited_data and not overfitting:
filename += "_limited"
if overfitting:
filename += "_overfitting"
filename += ".hdf5"
self.h5_filename = filename
self.h5_file = None
self.grasp_labels_filename = "dataset/grasp_labels.hdf5"
self.grasp_labels_file = None
with h5py.File(self.h5_filename, 'r') as f:
self.len = f['depthpath'].shape[0]
def __len__(self):
return self.len
def __getitem__(self, index):
if self.h5_file is None:
self.h5_file = h5py.File(self.h5_filename, 'r')
ann_id = int(str(self.h5_file['metapath'][index], 'utf-8').split("meta")[1][1:-4])
color = np.array(Image.open(self.h5_file['colorpath'][index]), dtype=np.float32) / 255.0
depth = np.array(Image.open(self.h5_file['depthpath'][index]))
# fixing depth image where value is 0
p99 = np.percentile(depth[depth != 0], 99)
# p1 = abs(np.percentile(depth[depth != 0], 1))
depth[depth > p99] = p99
depth[depth == 0] = p99
seg = np.array(Image.open(self.h5_file['labelpath'][index]))
meta = scio.loadmat(self.h5_file['metapath'][index])
scene = self.h5_file['scenename'][index]
main_path = str(self.h5_file['metapath'][index], 'utf-8').split("meta")[0]
cam_extrinsics = np.load(os.path.join(str(self.h5_file['metapath'][index], 'utf-8').split("meta")[0],
'camera_poses.npy'))[ann_id]
cam_wrt_table = np.load(os.path.join(str(self.h5_file['metapath'][index], 'utf-8').split("meta")[0],
'cam0_wrt_table.npy'))
cam_extrinsics = cam_wrt_table.dot(cam_extrinsics).astype(np.float32)
try:
obj_idxs = meta['cls_indexes'].flatten().astype(np.int32)
poses = meta['poses']
intrinsic = meta['intrinsic_matrix']
factor_depth = meta['factor_depth']
except Exception as e:
print(repr(e))
print(scene)
# h_ratio = 800 / 720
# w_ratio = 1333 / 1280
camera = CameraInfo(1280.0, 720.0, intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], factor_depth)
## generate cloud required to remove invisible grasp points
#cloud = create_point_cloud_from_depth_image(depth, camera, organized=True)
obj_bounding_boxes = []
for i, obj_idx in enumerate(obj_idxs):
if obj_idx not in self.valid_obj_idxs:
continue
if (seg == obj_idx).sum() < 50:
continue
seg_cpy = seg.copy()
seg_cpy[seg != obj_idx] = 0
seg_cpy[seg == obj_idx] = 1
seg_labels = label(seg_cpy)
regions = regionprops(seg_labels)
# b has start_height, start_width, end_height, end_width = (x_min, y_min, x_max, y_max)
b = regions[0].bbox
# saved bbox has xyxy
H, W = seg.shape[0], seg.shape[1]
obj_bounding_boxes.append(np.array([b[1] / W, b[0] / H, b[3] / W, b[2] / H])[None].repeat(self.k_grasps, 0))
obj_bounding_boxes = np.concatenate(obj_bounding_boxes, axis=0).astype(np.float32)
ret_dict = {}
#ret_dict['point_cloud'] = cloud.transpose((2, 0, 1)).astype(np.float32)
ret_dict['color'] = color.transpose((2, 0, 1)).astype(np.float32)
ret_dict['depth'] = (depth / camera.scale).astype(np.float32)
ret_dict['objectness_label'] = seg.astype(np.int32)
ret_dict['obj_bounding_boxes'] = obj_bounding_boxes
ret_dict['camera_intrinsics'] = np.expand_dims(np.concatenate([intrinsic.reshape(-1), factor_depth[0]]), -1).astype(np.float32)
ret_dict['camera_extrinsics'] = cam_extrinsics.astype(np.float32)
#ret_dict['transformed_points'] = transformed_points.astype(np.float32)
ret_dict['obj_idxs'] = obj_idxs
return ret_dict
def load_valid_obj_idxs():
obj_names = list(range(88))
valid_obj_idxs = []
for i, obj_name in enumerate(obj_names):
if i == 18: continue
valid_obj_idxs.append(i + 1) # here align with label png
return valid_obj_idxs
def my_worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
pass
|