|
|
|
|
|
import torch |
|
import numpy as np |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import torchvision.models as models |
|
import cv2 |
|
import numpy |
|
|
|
from modules.components.m2m_unimatch.unimatch.unimatch import UniMatch |
|
from modules.components.m2m_flow_former.LatentCostFormer.transformer import * |
|
from modules.components.m2m_flow_former.cfg import get_cfg |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
losses = {} |
|
|
|
|
|
def register(name): |
|
def decorator(cls): |
|
losses[name] = cls |
|
return cls |
|
return decorator |
|
|
|
|
|
def make_loss_dict(loss_cfgs): |
|
loss_dict = dict() |
|
|
|
def make_loss(loss_spec): |
|
loss = losses[loss_spec['name']](**loss_spec['args']) |
|
return loss |
|
|
|
for loss_cfg in loss_cfgs: |
|
loss_dict[loss_cfg['name']] = make_loss(loss_cfg) |
|
|
|
return loss_dict |
|
|
|
@register('frequency') |
|
class Frequency(nn.Module): |
|
def __init__(self, weight): |
|
super(Frequency, self).__init__() |
|
self.weight = weight |
|
|
|
def forward(self, imgt, imgt_pred, **kwargs): |
|
fft_pred = torch.fft.fft2(imgt_pred) |
|
amp_pred = torch.abs(fft_pred) |
|
pha_pred = torch.angle(fft_pred) |
|
|
|
fft_gt = torch.fft.fft2(imgt) |
|
amp_gt = torch.abs(fft_gt) |
|
pha_gt = torch.angle(fft_gt) |
|
|
|
amp_loss = F.l1_loss(input=amp_pred, target=amp_gt, reduction='mean') |
|
pha_loss = F.l1_loss(input=pha_pred, target=pha_gt, reduction='mean') |
|
|
|
return (amp_loss + pha_loss) * self.weight |
|
|
|
@register('bi_frequency') |
|
class BidirectionalFrequency(nn.Module): |
|
def __init__(self, weight): |
|
super(BidirectionalFrequency, self).__init__() |
|
self.weight = weight |
|
|
|
def get_amp_pha(self, img): |
|
fft = torch.fft.fft2(img) |
|
amplitude = torch.abs(fft) |
|
phase = torch.angle(fft) |
|
return amplitude, phase |
|
|
|
def forward(self, img0, img1, imgt, imgt_pred, **kwargs): |
|
amp0, pha0 = self.get_amp_pha(img0) |
|
amp1, pha1 = self.get_amp_pha(img1) |
|
ampt, phat = self.get_amp_pha(imgt) |
|
ampt_pred, phat_pred = self.get_amp_pha(imgt_pred) |
|
|
|
amp_loss0 = F.l1_loss(torch.abs(amp0-ampt), torch.abs(amp0-ampt_pred)) |
|
amp_loss1 = F.l1_loss(torch.abs(amp1-ampt), torch.abs(amp1-ampt_pred)) |
|
pha_loss0 = F.l1_loss(torch.abs(pha0-phat), torch.abs(pha0-phat_pred)) |
|
pha_loss1 = F.l1_loss(torch.abs(pha1-phat), torch.abs(pha1-phat_pred)) |
|
|
|
return (amp_loss0 + amp_loss1 + pha_loss0 + pha_loss1) * self.weight |
|
|
|
@register('l1') |
|
class L1(nn.Module): |
|
def __init__(self): |
|
super(L1, self).__init__() |
|
|
|
|
|
|
|
def forward(self, img0, img1): |
|
return F.l1_loss(input=img0, target=img1, reduction='mean') |
|
|
|
|
|
|
|
|
|
|
|
@register('charbonnier') |
|
class Charbonnier(nn.Module): |
|
def __init__(self, weight): |
|
super(Charbonnier, self).__init__() |
|
self.weight = weight |
|
|
|
|
|
|
|
def forward(self, imgt, imgt_pred, **kwargs): |
|
return (((imgt - imgt_pred) ** 2 + 1e-6) ** 0.5).mean() * self.weight |
|
|
|
|
|
|
|
|
|
|
|
@register('multiple_charbonnier') |
|
class MultipleCharbonnier(nn.Module): |
|
def __init__(self, weight, gamma, **kwargs): |
|
super().__init__() |
|
self.weight = weight |
|
self.gamma = gamma |
|
self.charbonnier = Charbonnier(1) |
|
|
|
def forward(self, imgt_preds, imgt, **kwargs): |
|
loss_charbonnier = torch.Tensor([0]).cuda() |
|
for i in range(len(imgt_preds)): |
|
i_weight = self.gamma ** (len(imgt_preds) - i - 1) |
|
loss_charbonnier += self.charbonnier(imgt_preds[i], imgt) * i_weight |
|
return loss_charbonnier * self.weight |
|
|
|
|
|
@register('ternary') |
|
class Ternary(nn.Module): |
|
def __init__(self, weight): |
|
super(Ternary, self).__init__() |
|
patch_size = 7 |
|
out_channels = patch_size * patch_size |
|
self.w = np.eye(out_channels).reshape( |
|
(patch_size, patch_size, 1, out_channels)) |
|
self.w = np.transpose(self.w, (3, 2, 0, 1)) |
|
self.w = torch.tensor(self.w).float().to(device) |
|
self.weight = weight |
|
|
|
|
|
|
|
def transform(self, img): |
|
patches = F.conv2d(img, self.w, padding=3, bias=None) |
|
transf = patches - img |
|
transf_norm = transf / torch.sqrt(0.81 + transf ** 2) |
|
return transf_norm |
|
|
|
|
|
|
|
def rgb2gray(self, rgb): |
|
r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :] |
|
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b |
|
return gray |
|
|
|
|
|
|
|
def hamming(self, t1, t2): |
|
dist = (t1 - t2) ** 2 |
|
dist_norm = torch.mean(dist / (0.1 + dist), 1, True) |
|
return dist_norm |
|
|
|
|
|
|
|
def valid_mask(self, t, padding): |
|
n, _, h, w = t.size() |
|
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t) |
|
mask = F.pad(inner, [padding] * 4) |
|
return mask |
|
|
|
|
|
|
|
def forward(self, imgt, imgt_pred, **kwargs): |
|
imgt = self.transform(self.rgb2gray(imgt)) |
|
imgt_pred = self.transform(self.rgb2gray(imgt_pred)) |
|
return (self.hamming(imgt, imgt_pred) * self.valid_mask(imgt, 1)).mean() * self.weight |
|
|
|
|
|
|
|
|
|
|
|
@register('multiple_ternary') |
|
class MultipleTernary(nn.Module): |
|
def __init__(self, weight, gamma, **kwargs): |
|
super().__init__() |
|
self.weight = weight |
|
self.gamma = gamma |
|
self.ternary = Ternary(1) |
|
|
|
def forward(self, imgt_preds, imgt, **kwargs): |
|
loss_ter = torch.Tensor([0]).cuda() |
|
for i in range(len(imgt_preds)): |
|
i_weight = self.gamma ** (len(imgt_preds) - i - 1) |
|
loss_ter += self.ternary(imgt_preds[i], imgt) * i_weight |
|
return loss_ter * self.weight |
|
|
|
|
|
@register('sobel') |
|
class SOBEL(nn.Module): |
|
def __init__(self): |
|
super(SOBEL, self).__init__() |
|
self.kernelX = torch.tensor([ |
|
[1, 0, -1], |
|
[2, 0, -2], |
|
[1, 0, -1], |
|
]).float() |
|
self.kernelY = self.kernelX.clone().T |
|
self.kernelX = self.kernelX.unsqueeze(0).unsqueeze(0).to(device) |
|
self.kernelY = self.kernelY.unsqueeze(0).unsqueeze(0).to(device) |
|
|
|
|
|
|
|
def forward(self, pred, gt): |
|
N, C, H, W = pred.shape[0], pred.shape[1], pred.shape[2], pred.shape[3] |
|
img_stack = torch.cat( |
|
[pred.reshape(N * C, 1, H, W), gt.reshape(N * C, 1, H, W)], 0) |
|
sobel_stack_x = F.conv2d(img_stack, self.kernelX, padding=1) |
|
sobel_stack_y = F.conv2d(img_stack, self.kernelY, padding=1) |
|
pred_X, gt_X = sobel_stack_x[:N * C], sobel_stack_x[N * C:] |
|
pred_Y, gt_Y = sobel_stack_y[:N * C], sobel_stack_y[N * C:] |
|
|
|
L1X, L1Y = torch.abs(pred_X - gt_X), torch.abs(pred_Y - gt_Y) |
|
loss = (L1X + L1Y) |
|
return loss |
|
|
|
|
|
|
|
|
|
|
|
|
|
class MeanShift(nn.Conv2d): |
|
def __init__(self, data_mean, data_std, data_range=1, norm=True): |
|
c = len(data_mean) |
|
super(MeanShift, self).__init__(c, c, kernel_size=1) |
|
std = torch.Tensor(data_std) |
|
self.weight.data = torch.eye(c).view(c, c, 1, 1) |
|
if norm: |
|
self.weight.data.div_(std.view(c, 1, 1, 1)) |
|
self.bias.data = -1 * data_range * torch.Tensor(data_mean) |
|
self.bias.data.div_(std) |
|
else: |
|
self.weight.data.mul_(std.view(c, 1, 1, 1)) |
|
self.bias.data = data_range * torch.Tensor(data_mean) |
|
|
|
self.requires_grad = False |
|
|
|
|
|
|
|
|
|
|
|
@register('vgg') |
|
class VGGPerceptualLoss(nn.Module): |
|
def __init__(self, weight=1): |
|
super(VGGPerceptualLoss, self).__init__() |
|
blocks = [] |
|
pretrained = True |
|
self.weight = weight |
|
self.vgg_pretrained_features = models.vgg19(pretrained=pretrained).features |
|
self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).cuda() |
|
for param in self.parameters(): |
|
param.requires_grad = False |
|
|
|
|
|
|
|
|
|
def forward(self, imgt, imgt_pred, **kwargs): |
|
imgt = self.normalize(imgt) |
|
imgt_pred = self.normalize(imgt_pred) |
|
indices = [2, 7, 12, 21, 30] |
|
weights = [1.0 / 2.6, 1.0 / 4.8, 1.0 / 3.7, 1.0 / 5.6, 10 / 1.5] |
|
k = 0 |
|
loss = 0 |
|
for i in range(indices[-1]): |
|
imgt = self.vgg_pretrained_features[i](imgt) |
|
imgt_pred = self.vgg_pretrained_features[i](imgt_pred) |
|
if (i + 1) in indices: |
|
loss += weights[k] * (imgt - imgt_pred.detach()).abs().mean() * 0.1 |
|
k += 1 |
|
|
|
|
|
return loss * self.weight |
|
|
|
|
|
|
|
|
|
@register('ada_charbonnier') |
|
class AdaCharbonnierLoss(nn.Module): |
|
def __init__(self, weight) -> None: |
|
super().__init__() |
|
self.weight = weight |
|
|
|
def forward(self, imgt_pred, imgt, weight, **kwargs): |
|
alpha = weight / 2 |
|
epsilon = 10 ** (-(10 * weight - 1) / 3) |
|
|
|
diff = imgt_pred - imgt |
|
loss = ((diff ** 2 + epsilon ** 2) ** alpha).mean() |
|
return loss |
|
|
|
|
|
@register('multiple_flow') |
|
class MultipleFlowLoss(nn.Module): |
|
def __init__(self, weight, beta=0.3) -> None: |
|
super().__init__() |
|
self.weight = weight |
|
self.beta = beta |
|
self.ada_cb_loss = AdaCharbonnierLoss(1.0) |
|
|
|
def forward(self, flow0_pred, flow1_pred, flowt0, flowt1, **kwargs): |
|
robust_weight0 = self.get_mutli_flow_robust_weight(flow0_pred[0], flowt0) |
|
robust_weight1 = self.get_mutli_flow_robust_weight(flow1_pred[0], flowt1) |
|
loss = 0 |
|
h, w = flowt0.shape[-2:] |
|
for lvl in range(0, len(flow0_pred)): |
|
h_lvl, w_lvl = flow0_pred[lvl].shape[-2:] |
|
scale_factor = h / h_lvl |
|
loss = loss + self.ada_cb_loss(**{ |
|
'imgt_pred': self.resize(flow0_pred[lvl], scale_factor), |
|
'imgt': flowt0, |
|
'weight': robust_weight0 |
|
}) |
|
loss = loss + self.ada_cb_loss(**{ |
|
'imgt_pred': self.resize(flow1_pred[lvl], scale_factor), |
|
'imgt': flowt1, |
|
'weight': robust_weight1 |
|
}) |
|
return loss * self.weight |
|
|
|
def resize(self, x, scale_factor): |
|
return scale_factor * F.interpolate(x, scale_factor=scale_factor, mode="bilinear", align_corners=False) |
|
|
|
def get_mutli_flow_robust_weight(self, flow_pred, flow_gt): |
|
dims = flow_pred.shape |
|
if len(dims) == 5: |
|
b, num_flows, c, h, w = dims |
|
else: |
|
b, c, h, w = dims |
|
num_flows = 1 |
|
flow_pred = flow_pred.view(b, num_flows, c, h, w) |
|
flow_gt = flow_gt.repeat(1, num_flows, 1, 1).view(b, num_flows, c, h, w) |
|
epe = ((flow_pred.detach() - flow_gt) ** 2).sum(dim=2, keepdim=True).max(1)[0] ** 0.5 |
|
|
|
robust_weight = torch.ones_like(epe) |
|
return robust_weight |
|
|
|
|
|
@register('lap') |
|
class LapLoss(torch.nn.Module): |
|
@staticmethod |
|
def gauss_kernel(size=5, channels=3): |
|
kernel = torch.tensor([[1., 4., 6., 4., 1], |
|
[4., 16., 24., 16., 4.], |
|
[6., 24., 36., 24., 6.], |
|
[4., 16., 24., 16., 4.], |
|
[1., 4., 6., 4., 1.]]) |
|
kernel /= 256. |
|
kernel = kernel.repeat(channels, 1, 1, 1) |
|
kernel = kernel.to(device) |
|
return kernel |
|
|
|
|
|
@staticmethod |
|
def laplacian_pyramid(img, kernel, max_levels=3): |
|
def downsample(x): |
|
return x[:, :, ::2, ::2] |
|
|
|
def upsample(x): |
|
cc = torch.cat([x, torch.zeros(x.shape[0], x.shape[1], x.shape[2], x.shape[3]).to(device)], dim=3) |
|
cc = cc.view(x.shape[0], x.shape[1], x.shape[2]*2, x.shape[3]) |
|
cc = cc.permute(0,1,3,2) |
|
cc = torch.cat([cc, torch.zeros(x.shape[0], x.shape[1], x.shape[3], x.shape[2]*2).to(device)], dim=3) |
|
cc = cc.view(x.shape[0], x.shape[1], x.shape[3]*2, x.shape[2]*2) |
|
x_up = cc.permute(0,1,3,2) |
|
return conv_gauss(x_up, 4*LapLoss.gauss_kernel(channels=x.shape[1])) |
|
|
|
def conv_gauss(img, kernel): |
|
img = torch.nn.functional.pad(img, (2, 2, 2, 2), mode='reflect') |
|
out = torch.nn.functional.conv2d(img, kernel, groups=img.shape[1]) |
|
return out |
|
|
|
current = img |
|
pyr = [] |
|
for level in range(max_levels): |
|
filtered = conv_gauss(current, kernel) |
|
down = downsample(filtered) |
|
up = upsample(down) |
|
diff = current-up |
|
pyr.append(diff) |
|
current = down |
|
return pyr |
|
|
|
def __init__(self, max_levels=5, channels=3): |
|
super(LapLoss, self).__init__() |
|
self.max_levels = max_levels |
|
self.gauss_kernel = LapLoss.gauss_kernel(channels=channels) |
|
|
|
def forward(self, imgt_pred, imgt): |
|
pyr_pred = LapLoss.laplacian_pyramid( |
|
img=imgt_pred, kernel=self.gauss_kernel, max_levels=self.max_levels) |
|
pyr_target = LapLoss.laplacian_pyramid( |
|
img=imgt, kernel=self.gauss_kernel, max_levels=self.max_levels) |
|
return sum(torch.nn.functional.l1_loss(a, b) for a, b in zip(pyr_pred, pyr_target)) |
|
|
|
|
|
@register('vos') |
|
class VOSLoss(nn.Module): |
|
def __init__(self, weight): |
|
super(VOSLoss, self).__init__() |
|
self.weight = weight |
|
|
|
def forward(self, segt, segt_f_binary, segt_b_binary, **kwargs): |
|
|
|
loss = F.binary_cross_entropy(segt_f_binary, segt) + F.binary_cross_entropy(segt_b_binary, segt) + F.binary_cross_entropy(segt_b_binary, segt_f_binary) |
|
return loss * self.weight |
|
|
|
|
|
@register('texture_consistency') |
|
class TCLoss(nn.Module): |
|
def __init__(self, weight): |
|
super(TCLoss, self).__init__() |
|
self.weight = weight |
|
|
|
def rgb2gray(self, rgb): |
|
r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :] |
|
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b |
|
return gray |
|
|
|
def forward(self, imgt_pred, imgt, **kwargs): |
|
b, c, h, w = imgt_pred.shape |
|
imgt_g = self.rgb2gray(imgt) |
|
imgt_pred_g = self.rgb2gray(imgt_pred) |
|
imgt_patched = F.unfold(imgt_g, [3, 3], padding=1).view(b, 9, h, w) |
|
census_imgt = ((imgt_patched - imgt_g) < 0).to(torch.float32) |
|
imgt_pred_patched = F.unfold(imgt_pred_g, [3, 3], padding=1).view(b, 9, h, w) |
|
census_imgt_pred = ((imgt_pred_patched - imgt_pred_g) < 0).to(torch.float32).view(b, 9, 1, h, w) |
|
census_imgt_unfold = F.unfold(census_imgt, [5, 5], padding=2).view(b, 9, 25, h, w) |
|
diff = (census_imgt_unfold - census_imgt_pred).abs().sum(dim=1) |
|
valid_mask = torch.argmax(diff, dim=1, keepdim=True).view(b, 1, 1, h, w) |
|
imgt_patched = F.unfold(imgt, [3, 3], padding=1).view(b, c * 9, h, w) |
|
imgt_masked = torch.take_along_dim( |
|
F.unfold(imgt_patched, kernel_size=[5, 5], padding=2).view(b, c * 9, 25, h, w), valid_mask, 2) |
|
imgt_pred_patched = F.unfold(imgt, [3, 3], padding=1).view(b, c * 9, 1, h, w) |
|
|
|
loss = F.l1_loss(imgt_masked, imgt_pred_patched) |
|
return loss * self.weight |
|
|
|
|
|
@register('flow_consistency') |
|
class FCLoss(nn.Module): |
|
def __init__(self, weight): |
|
super(FCLoss, self).__init__() |
|
self.weight = weight |
|
|
|
|
|
cfg = get_cfg().latentcostformer |
|
|
|
self.of_model = FlowFormer(cfg) |
|
checkpoint = torch.load('./modules/components/m2m_flow_former/flowformer++.pth') |
|
checkpoint_mod = {k.replace('module.', ''): checkpoint[k] for k in checkpoint.keys()} |
|
self.of_model.load_state_dict(checkpoint_mod, strict=False) |
|
self.of_model.to(device) |
|
self.of_model.eval() |
|
for p in self.of_model.parameters(True): |
|
p.requires_grad = False |
|
|
|
def forward(self, imgt_pred, img0, img1, flowt0, flowt1, **kwargs): |
|
self.of_model.eval() |
|
|
|
|
|
flowt0_pred = self.of_model(imgt_pred, img0)[-1] |
|
flowt1_pred = self.of_model(imgt_pred, img1)[-1] |
|
return ((flowt0_pred - flowt0).abs().mean() + (flowt1_pred - flowt1).abs().mean()) * self.weight, flowt0_pred |
|
|
|
|
|
def census_transform(img, kernel_size=3): |
|
""" |
|
Calculates the census transform of an image of shape [N x C x H x W] with batch size N, number of channels C, |
|
height H and width W. If C > 1, the census transform is applied independently on each channel. |
|
:param img: input image as torch.Tensor of shape [H x C x H x W] |
|
:return: census transform of img |
|
""" |
|
assert len(img.size()) == 4 |
|
if kernel_size != 3: |
|
raise NotImplementedError |
|
|
|
n, c, h, w = img.size() |
|
|
|
census = torch.zeros((n, c, h - 2, w - 2), dtype=torch.uint8, device=img.device) |
|
|
|
cp = img[:, :, 1:h - 1, 1:w - 1] |
|
offsets = [(u, v) for v in range(3) for u in range(3) if not u == 1 == v] |
|
|
|
|
|
for u, v in offsets: |
|
census = (census << 1) | (img[:, :, v:v + h - 2, u:u + w - 2] >= cp).byte() |
|
|
|
return torch.nn.functional.pad(census.float() / 255, (1, 1, 1, 1), mode='reflect') |
|
|
|
|
|
class CensusTransform(torch.nn.Module): |
|
""" |
|
Calculates the census transform of an image of shape [N x C x H x W] with batch size N, number of channels C, |
|
height H and width W. If C > 1, the census transform is applied independently on each channel. |
|
:param img: input image as torch.Tensor of shape [H x C x H x W] |
|
:return: census transform of img |
|
""" |
|
def __init__(self, kernel_size=3): |
|
super().__init__() |
|
self._kernel_size = kernel_size |
|
|
|
def forward(self, x): |
|
x = census_transform(x, self._kernel_size) |
|
return x |
|
|
|
|
|
@register('texture_consistency_original') |
|
class PatchMatching(nn.Module): |
|
def __init__(self, weight, kSize=3, nsize=7, scale=4, alpha=1): |
|
super(PatchMatching, self).__init__() |
|
self.scale = scale |
|
self.kSize = kSize |
|
self.nsize = nsize |
|
self.alpha = alpha |
|
self.weight = weight |
|
|
|
self.ct = CensusTransform() |
|
|
|
def _unfold(self, data, with_unfold=False): |
|
|
|
if self.scale != 1: |
|
data = torch.nn.functional.interpolate(data, scale_factor=1.0 / self.scale, mode='bicubic', |
|
align_corners=False) |
|
pad = self.kSize // 2 |
|
|
|
data_pad = torch.nn.functional.pad(data, (pad, pad, pad, pad), mode='reflect') |
|
d1 = torch.nn.functional.unfold(data_pad, kernel_size=self.kSize) |
|
if not with_unfold: |
|
return d1.permute(0, 2, 1).unsqueeze(-2) |
|
else: |
|
b, c, h, w = data.size() |
|
|
|
d1 = d1.view(b, -1, h, w) |
|
c1 = d1.size()[1] |
|
pad = self.nsize // 2 |
|
d1_pad = torch.nn.functional.pad(d1, (pad, pad, pad, pad), mode='reflect') |
|
d1_pad_unflod = torch.nn.functional.unfold(d1_pad, kernel_size=self.nsize) |
|
d1_pad_unflod = d1_pad_unflod.view(b, c1, -1, h * w).permute(0, 3, 2, 1) |
|
|
|
return d1_pad_unflod |
|
|
|
def _match(self, pred, ref_d0, ref_d1): |
|
|
|
b, n, c = pred.size() |
|
print('--', pred.shape) |
|
pred_2 = (pred ** 2).sum(-1).view(b, n, -1) |
|
ref_d0_2 = (ref_d0 ** 2).sum(-1).view(b, -1, n) |
|
ref_d1_2 = (ref_d1 ** 2).sum(-1).view(b, -1, n) |
|
|
|
|
|
error_d0 = pred_2 + ref_d0_2 - 2.0 * torch.matmul(pred, ref_d0.permute(0, 2, 1)) |
|
error_d1 = pred_2 + ref_d1_2 - 2.0 * torch.matmul(pred, ref_d1.permute(0, 2, 1)) |
|
|
|
score_d0 = torch.exp(self.alpha * error_d0) |
|
score_d1 = torch.exp(self.alpha * error_d1) |
|
|
|
|
|
weight, ind = torch.min(score_d0, dim=2) |
|
index_d0 = ind.unsqueeze(-1).expand([-1, -1, c]) |
|
print(ref_d0.shape, index_d0.shape) |
|
matched_d0 = torch.gather(ref_d0, dim=1, index=index_d0) |
|
|
|
weight, ind = torch.min(score_d1, dim=2) |
|
index_d1 = ind.unsqueeze(-1).expand([-1, -1, c]) |
|
matched_d1 = torch.gather(ref_d1, dim=1, index=index_d1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
loss = ((pred - matched_d0) ** 2).mean() + ((pred - matched_d1) ** 2).mean() |
|
return loss |
|
|
|
|
|
|
|
def forward(self, imgt_pred, imgt, **kwarps): |
|
|
|
pred_ct = self.ct(imgt_pred) |
|
gt_ct = self.ct(imgt) |
|
|
|
pred_ct = self._unfold(pred_ct) |
|
gt_ct = self._unfold(gt_ct, with_unfold=True) |
|
|
|
|
|
pred_ct = pred_ct.repeat(1, 1, self.nsize ** 2, 1) |
|
|
|
dis_I_ct = ((pred_ct - gt_ct) ** 2).sum(-1) |
|
weight, ind = torch.min(dis_I_ct, dim=2) |
|
index_d = ind.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, self.nsize ** 2 * 2, 3 * self.kSize ** 2) |
|
|
|
imgt_pred = self._unfold(imgt_pred) |
|
imgt = self._unfold(imgt, with_unfold=True) |
|
|
|
imgt_pred = imgt_pred.repeat(1, 1, self.nsize ** 2, 1) |
|
|
|
matched_d = torch.gather(imgt, dim=2, index=index_d) |
|
|
|
|
|
|
|
loss = ((imgt_pred[:, :, 0] - matched_d[:, :, 0]) ** 2) * 0.5 |
|
|
|
return loss.mean() * self.weight |