|
""" GraspNet dataset processing. |
|
""" |
|
|
|
import os |
|
import sys |
|
import numpy as np |
|
import numpy.ma as ma |
|
import scipy.io as scio |
|
from scipy.optimize import linear_sum_assignment |
|
from PIL import Image |
|
from skimage.measure import label, regionprops |
|
import cv2 |
|
|
|
import torch |
|
from collections import abc as container_abcs |
|
from torch.utils.data import Dataset |
|
from tqdm import tqdm |
|
from torch.utils.data import DataLoader |
|
from time import time |
|
|
|
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) |
|
from .data_utils import CameraInfo, transform_point_cloud, create_point_cloud_from_depth_image, \ |
|
get_workspace_mask, remove_invisible_grasp_points |
|
import h5py |
|
|
|
|
|
class GraspNetDataset(Dataset): |
|
def __init__(self, root, valid_obj_idxs, camera='kinect', split='train', remove_invisible=True, |
|
augment=False, limited_data=False, overfitting=False, k_grasps=1, ground_truth_type="topk", caching=True): |
|
self.root = root |
|
self.split = split |
|
self.remove_invisible = remove_invisible |
|
self.valid_obj_idxs = valid_obj_idxs |
|
self.camera = camera |
|
self.augment = augment |
|
self.k_grasps = k_grasps |
|
self.ground_truth_type = ground_truth_type |
|
self.overfitting = overfitting |
|
self.caching = caching |
|
|
|
if overfitting: |
|
limited_data = True |
|
self.limited_data = limited_data |
|
|
|
if split == 'train': |
|
self.sceneIds = list(range(100)) |
|
elif split == 'test': |
|
self.sceneIds = list(range(100, 190)) |
|
elif split == 'test_seen': |
|
self.sceneIds = list(range(100, 130)) |
|
elif split == 'test_similar': |
|
self.sceneIds = list(range(130, 160)) |
|
elif split == 'test_novel': |
|
self.sceneIds = list(range(160, 190)) |
|
if limited_data: |
|
self.sceneIds = self.sceneIds[:10] |
|
self.sceneIds = ['scene_{}'.format(str(x).zfill(4)) for x in self.sceneIds] |
|
|
|
filename = f"dataset/{split}_labels" |
|
if limited_data and not overfitting: |
|
filename += "_limited" |
|
if overfitting: |
|
filename += "_overfitting" |
|
filename += ".hdf5" |
|
self.h5_filename = filename |
|
self.h5_file = None |
|
self.grasp_labels_filename = "dataset/grasp_labels.hdf5" |
|
self.grasp_labels_file = None |
|
|
|
with h5py.File(self.h5_filename, 'r') as f: |
|
self.len = f['depthpath'].shape[0] |
|
|
|
def __len__(self): |
|
return self.len |
|
|
|
def __getitem__(self, index): |
|
if self.h5_file is None: |
|
self.h5_file = h5py.File(self.h5_filename, 'r') |
|
|
|
ann_id = int(str(self.h5_file['metapath'][index], 'utf-8').split("meta")[1][1:-4]) |
|
|
|
color = np.array(Image.open(self.h5_file['colorpath'][index]), dtype=np.float32) / 255.0 |
|
depth = np.array(Image.open(self.h5_file['depthpath'][index])) |
|
|
|
|
|
p99 = np.percentile(depth[depth != 0], 99) |
|
|
|
depth[depth > p99] = p99 |
|
depth[depth == 0] = p99 |
|
|
|
seg = np.array(Image.open(self.h5_file['labelpath'][index])) |
|
meta = scio.loadmat(self.h5_file['metapath'][index]) |
|
scene = self.h5_file['scenename'][index] |
|
|
|
main_path = str(self.h5_file['metapath'][index], 'utf-8').split("meta")[0] |
|
cam_extrinsics = np.load(os.path.join(str(self.h5_file['metapath'][index], 'utf-8').split("meta")[0], |
|
'camera_poses.npy'))[ann_id] |
|
cam_wrt_table = np.load(os.path.join(str(self.h5_file['metapath'][index], 'utf-8').split("meta")[0], |
|
'cam0_wrt_table.npy')) |
|
cam_extrinsics = cam_wrt_table.dot(cam_extrinsics).astype(np.float32) |
|
|
|
try: |
|
obj_idxs = meta['cls_indexes'].flatten().astype(np.int32) |
|
poses = meta['poses'] |
|
intrinsic = meta['intrinsic_matrix'] |
|
factor_depth = meta['factor_depth'] |
|
except Exception as e: |
|
print(repr(e)) |
|
print(scene) |
|
|
|
|
|
|
|
|
|
camera = CameraInfo(1280.0, 720.0, intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], factor_depth) |
|
|
|
|
|
|
|
|
|
obj_bounding_boxes = [] |
|
for i, obj_idx in enumerate(obj_idxs): |
|
if obj_idx not in self.valid_obj_idxs: |
|
continue |
|
if (seg == obj_idx).sum() < 50: |
|
continue |
|
|
|
seg_cpy = seg.copy() |
|
seg_cpy[seg != obj_idx] = 0 |
|
seg_cpy[seg == obj_idx] = 1 |
|
seg_labels = label(seg_cpy) |
|
regions = regionprops(seg_labels) |
|
|
|
|
|
b = regions[0].bbox |
|
|
|
H, W = seg.shape[0], seg.shape[1] |
|
|
|
obj_bounding_boxes.append(np.array([b[1] / W, b[0] / H, b[3] / W, b[2] / H])[None].repeat(self.k_grasps, 0)) |
|
obj_bounding_boxes = np.concatenate(obj_bounding_boxes, axis=0).astype(np.float32) |
|
|
|
ret_dict = {} |
|
|
|
ret_dict['color'] = color.transpose((2, 0, 1)).astype(np.float32) |
|
ret_dict['depth'] = (depth / camera.scale).astype(np.float32) |
|
ret_dict['objectness_label'] = seg.astype(np.int32) |
|
ret_dict['obj_bounding_boxes'] = obj_bounding_boxes |
|
ret_dict['camera_intrinsics'] = np.expand_dims(np.concatenate([intrinsic.reshape(-1), factor_depth[0]]), -1).astype(np.float32) |
|
ret_dict['camera_extrinsics'] = cam_extrinsics.astype(np.float32) |
|
|
|
ret_dict['obj_idxs'] = obj_idxs |
|
|
|
return ret_dict |
|
|
|
|
|
def load_valid_obj_idxs(): |
|
obj_names = list(range(88)) |
|
valid_obj_idxs = [] |
|
for i, obj_name in enumerate(obj_names): |
|
if i == 18: continue |
|
valid_obj_idxs.append(i + 1) |
|
|
|
return valid_obj_idxs |
|
|
|
|
|
def my_worker_init_fn(worker_id): |
|
np.random.seed(np.random.get_state()[1][0] + worker_id) |
|
pass |
|
|