-
Notifications
You must be signed in to change notification settings - Fork 3
/
loss.py
64 lines (44 loc) · 1.72 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import torch.nn.functional as F
from utils import one_hot_segmentation
import torch.nn as nn
import torch
class CrossEntropy(nn.Module):
def __init__(self):
super().__init__()
self.Loss = nn.CrossEntropyLoss()
def forward(self, pred, target):
if len(target.size()) == 4:
target = torch.squeeze(target, 1).long()
return self.Loss(pred, target)
class DiceLossSoftmax(nn.Module):
def __init__(self, smooth=1e-4):
super().__init__()
self.smooth = smooth
def flatten(self, tensor):
tensor = tensor.transpose(0, 1).contiguous()
return tensor.view(tensor.size(0), -1)
def forward(self, pred, target):
return 1.0 - self.dice_coef(pred, target)
def dice_coef(self, pred, target):
n, c = pred.shape[:2]
pred = F.softmax(pred, dim=1)
target = one_hot_segmentation(target, c).float()
pred = pred.view(n, c, -1)
target = target.view(n, c, -1)
intersect = torch.sum(target * pred, -1)
dice = (2 * intersect + self.smooth) / (torch.sum(target, -1) + torch.sum(pred, -1) + self.smooth)
# in class axis
dice = torch.mean(dice, dim=-1)
# in batch axis
return torch.mean(dice)
class DiceWithBceLoss(nn.Module):
def __init__(self, weights=[1., 1.]):
super().__init__()
self.dice_loss = DiceLossSoftmax()
self.bce_loss = CrossEntropy()
self.weights = weights
def forward(self, pred, target):
dl = self.dice_loss(pred, target)
bl = self.bce_loss(pred, target)
loss = self.weights[0] * dl + self.weights[1] * bl
return loss