.fuse() gradient introduction bug fix

pull/1007/head
Glenn Jocher 2020-09-20 11:57:19 -07:00
parent c4cb78570c
commit 89655a84f2
1 changed files with 19 additions and 19 deletions

View File

@ -104,8 +104,8 @@ def prune(model, amount=0.3):
def fuse_conv_and_bn(conv, bn): def fuse_conv_and_bn(conv, bn):
# https://tehnokv.com/posts/fusing-batchnorm-and-conv/ # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
with torch.no_grad():
# init # init
fusedconv = nn.Conv2d(conv.in_channels, fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels, conv.out_channels,
@ -113,7 +113,7 @@ def fuse_conv_and_bn(conv, bn):
stride=conv.stride, stride=conv.stride,
padding=conv.padding, padding=conv.padding,
groups=conv.groups, groups=conv.groups,
bias=True).to(conv.weight.device) bias=True).requires_grad_(False).to(conv.weight.device)
# prepare filters # prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1) w_conv = conv.weight.clone().view(conv.out_channels, -1)