Isi99999 commited on
Commit
70a3bc8
·
verified ·
1 Parent(s): 0d57ce4

Adding IFNet_HDv3.py to 4.22

Browse files
4.22/RIFEv4.22/train_log/IFNet_HDv3.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from model.warplayer import warp
5
+ # from train_log.refine import *
6
+
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+
9
+ def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
10
+ return nn.Sequential(
11
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
12
+ padding=padding, dilation=dilation, bias=True),
13
+ nn.LeakyReLU(0.2, True)
14
+ )
15
+
16
+ def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
17
+ return nn.Sequential(
18
+ nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
19
+ padding=padding, dilation=dilation, bias=False),
20
+ nn.BatchNorm2d(out_planes),
21
+ nn.LeakyReLU(0.2, True)
22
+ )
23
+
24
+ class Head(nn.Module):
25
+ def __init__(self):
26
+ super(Head, self).__init__()
27
+ self.cnn0 = nn.Conv2d(3, 32, 3, 2, 1)
28
+ self.cnn1 = nn.Conv2d(32, 32, 3, 1, 1)
29
+ self.cnn2 = nn.Conv2d(32, 32, 3, 1, 1)
30
+ self.cnn3 = nn.ConvTranspose2d(32, 8, 4, 2, 1)
31
+ self.relu = nn.LeakyReLU(0.2, True)
32
+
33
+ def forward(self, x, feat=False):
34
+ x0 = self.cnn0(x)
35
+ x = self.relu(x0)
36
+ x1 = self.cnn1(x)
37
+ x = self.relu(x1)
38
+ x2 = self.cnn2(x)
39
+ x = self.relu(x2)
40
+ x3 = self.cnn3(x)
41
+ if feat:
42
+ return [x0, x1, x2, x3]
43
+ return x3
44
+
45
+ class ResConv(nn.Module):
46
+ def __init__(self, c, dilation=1):
47
+ super(ResConv, self).__init__()
48
+ self.conv = nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1\
49
+ )
50
+ self.beta = nn.Parameter(torch.ones((1, c, 1, 1)), requires_grad=True)
51
+ self.relu = nn.LeakyReLU(0.2, True)
52
+
53
+ def forward(self, x):
54
+ return self.relu(self.conv(x) * self.beta + x)
55
+
56
+ class IFBlock(nn.Module):
57
+ def __init__(self, in_planes, c=64):
58
+ super(IFBlock, self).__init__()
59
+ self.conv0 = nn.Sequential(
60
+ conv(in_planes, c//2, 3, 2, 1),
61
+ conv(c//2, c, 3, 2, 1),
62
+ )
63
+ self.convblock = nn.Sequential(
64
+ ResConv(c),
65
+ ResConv(c),
66
+ ResConv(c),
67
+ ResConv(c),
68
+ ResConv(c),
69
+ ResConv(c),
70
+ ResConv(c),
71
+ ResConv(c),
72
+ )
73
+ self.lastconv = nn.Sequential(
74
+ nn.ConvTranspose2d(c, 4*13, 4, 2, 1),
75
+ nn.PixelShuffle(2)
76
+ )
77
+
78
+ def forward(self, x, flow=None, scale=1):
79
+ x = F.interpolate(x, scale_factor= 1. / scale, mode="bilinear", align_corners=False)
80
+ if flow is not None:
81
+ flow = F.interpolate(flow, scale_factor= 1. / scale, mode="bilinear", align_corners=False) * 1. / scale
82
+ x = torch.cat((x, flow), 1)
83
+ feat = self.conv0(x)
84
+ feat = self.convblock(feat)
85
+ tmp = self.lastconv(feat)
86
+ tmp = F.interpolate(tmp, scale_factor=scale, mode="bilinear", align_corners=False)
87
+ flow = tmp[:, :4] * scale
88
+ mask = tmp[:, 4:5]
89
+ feat = tmp[:, 5:]
90
+ return flow, mask, feat
91
+
92
+ class IFNet(nn.Module):
93
+ def __init__(self):
94
+ super(IFNet, self).__init__()
95
+ self.block0 = IFBlock(7+16, c=256)
96
+ self.block1 = IFBlock(8+4+16+8, c=192)
97
+ self.block2 = IFBlock(8+4+16+8, c=96)
98
+ self.block3 = IFBlock(8+4+16+8, c=48)
99
+ self.encode = Head()
100
+
101
+ # not used during inference
102
+ self.teacher = IFBlock(8+4+16+3+8, c=96)
103
+ self.caltime = nn.Sequential(
104
+ nn.Conv2d(16+9, 32, 3, 2, 1),
105
+ nn.LeakyReLU(0.2, True),
106
+ nn.Conv2d(32, 64, 3, 2, 1),
107
+ nn.LeakyReLU(0.2, True),
108
+ nn.Conv2d(64, 64, 3, 1, 1),
109
+ nn.LeakyReLU(0.2, True),
110
+ nn.Conv2d(64, 64, 3, 1, 1),
111
+ nn.LeakyReLU(0.2, True),
112
+ nn.Conv2d(64, 1, 3, 1, 1),
113
+ nn.Sigmoid()
114
+ )
115
+
116
+ def forward(self, x, timestep=0.5, scale_list=[8, 4, 2, 1], training=False, fastmode=True, ensemble=False):
117
+ if training == False:
118
+ channel = x.shape[1] // 2
119
+ img0 = x[:, :channel]
120
+ img1 = x[:, channel:]
121
+ if not torch.is_tensor(timestep):
122
+ timestep = (x[:, :1].clone() * 0 + 1) * timestep
123
+ else:
124
+ timestep = timestep.repeat(1, 1, img0.shape[2], img0.shape[3])
125
+ f0 = self.encode(img0[:, :3])
126
+ f1 = self.encode(img1[:, :3])
127
+ flow_list = []
128
+ merged = []
129
+ mask_list = []
130
+ warped_img0 = img0
131
+ warped_img1 = img1
132
+ flow = None
133
+ mask = None
134
+ loss_cons = 0
135
+ block = [self.block0, self.block1, self.block2, self.block3]
136
+ for i in range(4):
137
+ if flow is None:
138
+ flow, mask, feat = block[i](torch.cat((img0[:, :3], img1[:, :3], f0, f1, timestep), 1), None, scale=scale_list[i])
139
+ if ensemble:
140
+ print("warning: ensemble is not supported since RIFEv4.21")
141
+ else:
142
+ wf0 = warp(f0, flow[:, :2])
143
+ wf1 = warp(f1, flow[:, 2:4])
144
+ fd, m0, feat = block[i](torch.cat((warped_img0[:, :3], warped_img1[:, :3], wf0, wf1, timestep, mask, feat), 1), flow, scale=scale_list[i])
145
+ if ensemble:
146
+ print("warning: ensemble is not supported since RIFEv4.21")
147
+ else:
148
+ mask = m0
149
+ flow = flow + fd
150
+ mask_list.append(mask)
151
+ flow_list.append(flow)
152
+ warped_img0 = warp(img0, flow[:, :2])
153
+ warped_img1 = warp(img1, flow[:, 2:4])
154
+ merged.append((warped_img0, warped_img1))
155
+ mask = torch.sigmoid(mask)
156
+ merged[3] = (warped_img0 * mask + warped_img1 * (1 - mask))
157
+ if not fastmode:
158
+ print('contextnet is removed')
159
+ '''
160
+ c0 = self.contextnet(img0, flow[:, :2])
161
+ c1 = self.contextnet(img1, flow[:, 2:4])
162
+ tmp = self.unet(img0, img1, warped_img0, warped_img1, mask, flow, c0, c1)
163
+ res = tmp[:, :3] * 2 - 1
164
+ merged[3] = torch.clamp(merged[3] + res, 0, 1)
165
+ '''
166
+ return flow_list, mask_list[3], merged