VfiTest / datasets /x4k1000fps.py
SuyeonJ's picture
Upload folder using huggingface_hub
8d015d4 verified
raw
history blame
3.58 kB
import numpy as np
import cv2
from glob import glob
import os
import torch
from torch.utils.data import Dataset
from .datasets import register
@register('x4k1000fps')
class X_Test(Dataset):
def __init__(self, test_data_path, multiple):
self.test_data_path = test_data_path
self.multiple = multiple
self.testPath = self.make_2_d_dataset_x_test(
self.test_data_path, multiple, t_step_size=32)
self.nIterations = len(self.testPath)
# Raise error if no images found in test_data_path.
if len(self.testPath) == 0:
raise (RuntimeError("Found 0 files in subfolders of: " \
+ self.test_data_path + "\n"))
def make_2_d_dataset_x_test(self, test_data_path, multiple, t_step_size):
""" make [I0,I1,It,t,scene_folder] """
""" 1D (accumulated) """
testPath = []
t = np.linspace(
(1 / multiple), (1 - (1 / multiple)), (multiple - 1)
)
for type_folder in sorted(glob(os.path.join(test_data_path, '*', ''))): # [type1,type2,type3,...]
for scene_folder in sorted(glob(os.path.join(type_folder, '*', ''))): # [scene1,scene2,..]
frame_folder = sorted(glob(scene_folder + '*.png')) # 32 multiple, ['00000.png',...,'00032.png']
for idx in range(0, len(frame_folder), t_step_size): # 0,32,64,...
if idx == len(frame_folder) - 1:
break
for mul in range(multiple - 1):
I0I1It_paths = []
I0I1It_paths.append(frame_folder[idx]) # I0 (fix)
I0I1It_paths.append(frame_folder[idx + t_step_size]) # I1 (fix)
I0I1It_paths.append(frame_folder[idx + int((t_step_size // multiple) * (mul + 1))]) # It
I0I1It_paths.append(t[mul])
I0I1It_paths.append(scene_folder.split(os.path.join(test_data_path, ''))[-1]) # type1/scene1
testPath.append(I0I1It_paths)
return testPath
def frames_loader_test(self, I0I1It_Path):
frames = []
for path in I0I1It_Path:
frame = cv2.imread(path)
frames.append(frame)
(ih, iw, c) = frame.shape
frames = np.stack(frames, axis=0) # (T, H, W, 3)
""" np2Tensor [-1,1] normalized """
frames = X_Test.RGBframes_np2Tensor(frames)
return frames
def RGBframes_np2Tensor(self, imgIn, channel=3):
## input : T, H, W, C
if channel == 1:
# rgb --> Y (gray)
imgIn = np.sum(
imgIn * np.reshape(
[65.481, 128.553, 24.966], [1, 1, 1, 3]
) / 255.0,
axis=3,
keepdims=True) + 16.0
# to Tensor
ts = (3, 0, 1, 2) ############# dimension order should be [C, T, H, W]
imgIn = torch.Tensor(imgIn.transpose(ts).astype(float)).mul_(1.0)
return imgIn
def __getitem__(self, idx):
I0, I1, It, t_value, scene_name = self.testPath[idx]
I0I1It_Path = [I0, I1, It]
frames = self.frames_loader_test(I0I1It_Path)
# including "np2Tensor [-1,1] normalized"
I0_path = I0.split(os.sep)[-1]
I1_path = I1.split(os.sep)[-1]
It_path = It.split(os.sep)[-1]
return frames, np.expand_dims(np.array(t_value, dtype=np.float32), 0), \
scene_name, [It_path, I0_path, I1_path]
def __len__(self):
return self.nIterations