feng2022 commited on
Commit
70ebff6
1 Parent(s): c3389d7

Update Time_TravelRephotography/model.py

Browse files
Files changed (1) hide show
  1. Time_TravelRephotography/model.py +5 -4
Time_TravelRephotography/model.py CHANGED
@@ -9,7 +9,8 @@ from torch import nn
9
  from torch.nn import functional as F
10
  from torch.autograd import Function
11
 
12
- from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
 
13
 
14
 
15
  class PixelNorm(nn.Module):
@@ -152,7 +153,7 @@ class EqualLinear(nn.Module):
152
  def forward(self, input):
153
  if self.activation:
154
  out = F.linear(input, self.weight * self.scale)
155
- out = FusedLeakyReLU(out, self.bias * self.lr_mul)
156
 
157
  else:
158
  out = F.linear(
@@ -331,7 +332,7 @@ class StyledConv(nn.Module):
331
  self.noise = NoiseInjection()
332
  # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
333
  # self.activate = ScaledLeakyReLU(0.2)
334
- self.activate = FusedLeakyReLU(out_channel)
335
 
336
  def forward(self, input, style, noise=None):
337
  out = self.conv(input, style)
@@ -606,7 +607,7 @@ class ConvLayer(nn.Sequential):
606
 
607
  if activate:
608
  if bias:
609
- layers.append(FusedLeakyReLU(out_channel))
610
 
611
  else:
612
  layers.append(ScaledLeakyReLU(0.2))
 
9
  from torch.nn import functional as F
10
  from torch.autograd import Function
11
 
12
+ #from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
13
+ from torch_utils.ops import bias_act, upfirdn2d
14
 
15
 
16
  class PixelNorm(nn.Module):
 
153
  def forward(self, input):
154
  if self.activation:
155
  out = F.linear(input, self.weight * self.scale)
156
+ out = bias_act(out)
157
 
158
  else:
159
  out = F.linear(
 
332
  self.noise = NoiseInjection()
333
  # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
334
  # self.activate = ScaledLeakyReLU(0.2)
335
+ self.activate = bias_act(out_channel)
336
 
337
  def forward(self, input, style, noise=None):
338
  out = self.conv(input, style)
 
607
 
608
  if activate:
609
  if bias:
610
+ layers.append(bias_act(out_channel))
611
 
612
  else:
613
  layers.append(ScaledLeakyReLU(0.2))