forked from Kerorohu/DS-PWC-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
losses.py
112 lines (82 loc) · 3.57 KB
/
losses.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import torch
import torch.nn as nn
import torch.nn.functional as F
def L1loss(x, y): return (x - y).abs().mean()
def L2loss(x, y): return torch.norm(x - y, p=2, dim=1).mean()
def training_loss(args, flow_pyramid, flow_gt_pyramid):
return sum(w * L2loss(flow, gt) for w, flow, gt in zip(args.weights, flow_pyramid, flow_gt_pyramid))
def robust_training_loss(args, flow_pyramid, flow_gt_pyramid):
return sum((w * L1loss(flow, gt) + args.epsilon) ** args.q for w, flow, gt in
zip(args.weights, flow_pyramid, flow_gt_pyramid))
def EPE(input_flow, target_flow):
return torch.norm(target_flow - input_flow, p=2, dim=1).mean()
def EPEp(input_flow, target_flow, args):
input_flow = torch.Tensor(input_flow).to(args.device)
target_flow = torch.Tensor(target_flow).to(args.device)
return torch.norm(target_flow - input_flow, p=2, dim=1).mean()
class L1(nn.Module):
def __init__(self):
super(L1, self).__init__()
def forward(self, output, target):
lossvalue = torch.abs(output - target).mean()
return lossvalue
class L2(nn.Module):
def __init__(self):
super(L2, self).__init__()
def forward(self, output, target):
lossvalue = torch.norm(output - target, p=2, dim=1).mean()
return lossvalue
class L1Loss(nn.Module):
def __init__(self, args):
super(L1Loss, self).__init__()
self.args = args
self.loss = L1()
self.loss_labels = ['L1', 'EPE']
def forward(self, outputs, target):
lossvalue = self.loss(outputs[-1], target)
epevalue = EPE(outputs[-1], target)
return [lossvalue, epevalue]
class L2Loss(nn.Module):
def __init__(self, args):
super(L2Loss, self).__init__()
self.args = args
self.loss = L2()
self.loss_labels = ['L2', 'EPE']
def forward(self, outputs, target):
lossvalue = self.loss(outputs[-1], target)
epevalue = EPE(outputs, target)
return [lossvalue, epevalue]
class MultiScale(nn.Module):
def __init__(self, args, startScale=5, numScales=6, l_weight=0.32, norm='L1'):
super(MultiScale, self).__init__()
self.startScale = startScale
self.numScales = numScales
self.loss_weights = torch.FloatTensor([(l_weight / 2 ** scale) for scale in range(self.numScales)]).to(
args.device)
self.args = args
self.l_type = norm
self.div_flow = 0.05
assert (len(self.loss_weights) == self.numScales)
if self.l_type == 'L1':
self.loss = L1()
else:
self.loss = L2()
self.multiScales = [nn.AvgPool2d(2 ** l, 2 ** l) for l in range(args.num_levels)][::-1][:args.output_level]
self.loss_labels = ['MultiScale-' + self.l_type, 'EPE'],
def forward(self, outputs, target):
args = self.args
# if flow is normalized, every output is multiplied by its size
# correspondingly, groundtruth should be scaled at each level
targets = [avg_pool(target) / 2 ** (args.num_levels - l - 1) for l, avg_pool in enumerate(self.multiScales)] + [
target]
loss, epe = 0, 0
loss_levels, epe_levels = [], []
for w, o, t in zip(args.weights, outputs, targets):
# print(f'flow值域: ({o.min()}, {o.max()})')
# print(f'gt值域: ({t.min()}, {t.max()})')
# print(f'EPE:', EPE(o, t).item())
loss += w * self.loss(o, t)
epe += EPE(o, t)
loss_levels.append(self.loss(o, t))
epe_levels.append(EPE(o, t))
return [loss, epe, loss_levels, epe_levels]