|
import numpy as np |
|
import scipy.ndimage as nd |
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
|
|
from utils.seg_opr.lovasz_losses import lovasz_softmax |
|
|
|
class JSD(nn.Module): |
|
def __init__(self): |
|
super(JSD, self).__init__() |
|
self.kl = nn.KLDivLoss(reduction='batchmean', log_target=True) |
|
|
|
def forward(self, p: torch.tensor, q: torch.tensor): |
|
p = F.softmax(p, dim=1) |
|
q = F.softmax(q, dim=1) |
|
m = (0.5 * (p + q)).log() |
|
return 0.5 * (self.kl(m, p.log()) + self.kl(m, q.log())) |
|
|
|
class MSE(nn.Module): |
|
def __init__(self): |
|
super(MSE, self).__init__() |
|
self.mse = nn.MSELoss(reduction="mean") |
|
|
|
def forward(self, p: torch.tensor, q: torch.tensor): |
|
p = F.softmax(p, dim=1) |
|
q = F.softmax(q, dim=1) |
|
return self.mse(p, q) |
|
|
|
class ProbOhemCrossEntropy2d(nn.Module): |
|
def __init__(self, ignore_label, reduction='mean', thresh=0.6, min_kept=256, |
|
down_ratio=1, use_weight=False): |
|
super(ProbOhemCrossEntropy2d, self).__init__() |
|
self.ignore_label = ignore_label |
|
self.thresh = float(thresh) |
|
self.min_kept = int(min_kept) |
|
self.down_ratio = down_ratio |
|
if use_weight: |
|
weight = torch.FloatTensor( |
|
[0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, |
|
0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, |
|
1.0865, 1.1529, 1.0507]) |
|
self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction, |
|
weight=weight, |
|
ignore_index=ignore_label) |
|
else: |
|
self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction, |
|
ignore_index=ignore_label) |
|
|
|
def forward(self, pred, target): |
|
b, c, h, w = pred.size() |
|
target = target.view(-1) |
|
valid_mask = target.ne(self.ignore_label) |
|
target = target * valid_mask.long() |
|
num_valid = valid_mask.sum() |
|
|
|
prob = F.softmax(pred, dim=1) |
|
prob = (prob.transpose(0, 1)).reshape(c, -1) |
|
|
|
if self.min_kept > num_valid: |
|
print('Labels: {} < {}'.format(num_valid, self.min_kept)) |
|
elif num_valid > 0: |
|
prob = prob.masked_fill_(~valid_mask, 1) |
|
mask_prob = prob[ |
|
target, torch.arange(len(target), dtype=torch.long)] |
|
threshold = self.thresh |
|
if self.min_kept > 0: |
|
index = mask_prob.argsort() |
|
threshold_index = index[min(len(index), self.min_kept) - 1] |
|
if mask_prob[threshold_index] > self.thresh: |
|
threshold = mask_prob[threshold_index] |
|
kept_mask = mask_prob.le(threshold) |
|
target = target * kept_mask.long() |
|
valid_mask = valid_mask * kept_mask |
|
|
|
|
|
target = target.masked_fill_(~valid_mask, self.ignore_label) |
|
target = target.view(b, h, w) |
|
|
|
return self.criterion(pred, target) |
|
|
|
class FocalLoss(nn.Module): |
|
def __init__(self, gamma=2, alpha=None, ignore_label=255, size_average=True): |
|
super(FocalLoss, self).__init__() |
|
self.gamma = gamma |
|
self.size_average = size_average |
|
self.CE_loss = nn.CrossEntropyLoss(reduce=False, ignore_index=ignore_label, weight=alpha) |
|
|
|
def forward(self, output, target): |
|
logpt = self.CE_loss(output, target) |
|
pt = torch.exp(-logpt) |
|
loss = ((1-pt)**self.gamma) * logpt |
|
if self.size_average: |
|
return loss.mean() |
|
return loss.sum() |
|
|
|
class LovaszSoftmax(nn.Module): |
|
def __init__(self, classes='present', per_image=False, ignore_index=255): |
|
super(LovaszSoftmax, self).__init__() |
|
self.smooth = classes |
|
self.per_image = per_image |
|
self.ignore_index = ignore_index |
|
|
|
def forward(self, output, target): |
|
logits = F.softmax(output, dim=1) |
|
loss = lovasz_softmax(logits, target, ignore=self.ignore_index) |
|
return loss |