reformat
parent
b392ba44cc
commit
fa49911440
|
@ -22,13 +22,12 @@ def conv3x3(in_planes, out_planes, stride=1, dilation=1):
|
|||
|
||||
def conv_1x1_bn(inp, oup, activation=nn.ReLU6):
|
||||
return nn.Sequential(
|
||||
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
|
||||
nn.BatchNorm2d(oup),
|
||||
activation(inplace=True)
|
||||
)
|
||||
nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup),
|
||||
activation(inplace=True))
|
||||
|
||||
|
||||
class ConvBNReLU(nn.Sequential):
|
||||
|
||||
def __init__(self,
|
||||
in_planes,
|
||||
out_planes,
|
||||
|
@ -39,16 +38,15 @@ class ConvBNReLU(nn.Sequential):
|
|||
padding = (kernel_size - 1) // 2
|
||||
|
||||
super(ConvBNReLU, self).__init__(
|
||||
nn.Conv2d(in_planes,
|
||||
out_planes,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
groups=groups,
|
||||
bias=False),
|
||||
nn.BatchNorm2d(out_planes),
|
||||
activation(inplace=True)
|
||||
)
|
||||
nn.Conv2d(
|
||||
in_planes,
|
||||
out_planes,
|
||||
kernel_size,
|
||||
stride,
|
||||
padding,
|
||||
groups=groups,
|
||||
bias=False), nn.BatchNorm2d(out_planes),
|
||||
activation(inplace=True))
|
||||
|
||||
|
||||
def _make_divisible(v, divisor, min_value=None):
|
||||
|
@ -62,6 +60,7 @@ def _make_divisible(v, divisor, min_value=None):
|
|||
|
||||
|
||||
class InvertedResidual(nn.Module):
|
||||
|
||||
def __init__(self,
|
||||
inplanes,
|
||||
outplanes,
|
||||
|
@ -79,17 +78,18 @@ class InvertedResidual(nn.Module):
|
|||
layers = []
|
||||
if expand_ratio != 1:
|
||||
# pw
|
||||
layers.append(ConvBNReLU(inplanes,
|
||||
hidden_dim,
|
||||
kernel_size=1,
|
||||
activation=activation))
|
||||
layers.append(
|
||||
ConvBNReLU(
|
||||
inplanes, hidden_dim, kernel_size=1,
|
||||
activation=activation))
|
||||
layers.extend([
|
||||
# dw
|
||||
ConvBNReLU(hidden_dim,
|
||||
hidden_dim,
|
||||
stride=stride,
|
||||
groups=hidden_dim,
|
||||
activation=activation),
|
||||
ConvBNReLU(
|
||||
hidden_dim,
|
||||
hidden_dim,
|
||||
stride=stride,
|
||||
groups=hidden_dim,
|
||||
activation=activation),
|
||||
# pw-linear
|
||||
nn.Conv2d(hidden_dim, outplanes, 1, 1, 0, bias=False),
|
||||
nn.BatchNorm2d(outplanes),
|
||||
|
@ -97,6 +97,7 @@ class InvertedResidual(nn.Module):
|
|||
self.conv = nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
def _inner_forward(x):
|
||||
if self.use_res_connect:
|
||||
return x + self.conv(x)
|
||||
|
@ -122,15 +123,23 @@ def make_inverted_res_layer(block,
|
|||
layers = []
|
||||
for i in range(num_blocks):
|
||||
if i == 0:
|
||||
layers.append(block(inplanes, planes, stride,
|
||||
expand_ratio=expand_ratio,
|
||||
activation=activation,
|
||||
with_cp=with_cp))
|
||||
layers.append(
|
||||
block(
|
||||
inplanes,
|
||||
planes,
|
||||
stride,
|
||||
expand_ratio=expand_ratio,
|
||||
activation=activation,
|
||||
with_cp=with_cp))
|
||||
else:
|
||||
layers.append(block(inplanes, planes, 1,
|
||||
expand_ratio=expand_ratio,
|
||||
activation=activation,
|
||||
with_cp=with_cp))
|
||||
layers.append(
|
||||
block(
|
||||
inplanes,
|
||||
planes,
|
||||
1,
|
||||
expand_ratio=expand_ratio,
|
||||
activation=activation,
|
||||
with_cp=with_cp))
|
||||
inplanes = planes
|
||||
return nn.Sequential(*layers)
|
||||
|
||||
|
@ -162,15 +171,10 @@ class MobileNetv2(BaseBackbone):
|
|||
super(MobileNetv2, self).__init__()
|
||||
block = InvertedResidual
|
||||
# expand_ratio, out_channel, n, stride
|
||||
inverted_residual_setting = [
|
||||
[1, 16, 1, 1],
|
||||
[6, 24, 2, 2],
|
||||
[6, 32, 3, 2],
|
||||
[6, 64, 4, 2],
|
||||
[6, 96, 3, 1],
|
||||
[6, 160, 3, 2],
|
||||
[6, 320, 1, 1]
|
||||
]
|
||||
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2,
|
||||
2], [6, 32, 3, 2],
|
||||
[6, 64, 4, 2], [6, 96, 3, 1],
|
||||
[6, 160, 3, 2], [6, 320, 1, 1]]
|
||||
self.widen_factor = widen_factor
|
||||
if isinstance(activation, str):
|
||||
activation = eval(activation)
|
||||
|
@ -211,9 +215,8 @@ class MobileNetv2(BaseBackbone):
|
|||
self.out_channel = int(self.out_channel * widen_factor) \
|
||||
if widen_factor > 1.0 else self.out_channel
|
||||
|
||||
self.conv_last = nn.Conv2d(self.inplanes,
|
||||
self.out_channel,
|
||||
1, 1, 0, bias=False)
|
||||
self.conv_last = nn.Conv2d(
|
||||
self.inplanes, self.out_channel, 1, 1, 0, bias=False)
|
||||
self.bn_last = nn.BatchNorm2d(self.out_channel)
|
||||
|
||||
self.feat_dim = self.out_channel
|
||||
|
|
|
@ -10,7 +10,7 @@ from mmcls.models.backbones.mobilenet_v2 import InvertedResidual
|
|||
|
||||
def is_block(modules):
|
||||
"""Check if is ResNet building block."""
|
||||
if isinstance(modules, (InvertedResidual,)):
|
||||
if isinstance(modules, (InvertedResidual, )):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
@ -35,40 +35,30 @@ def test_mobilenetv2_invertedresidual():
|
|||
|
||||
with pytest.raises(AssertionError):
|
||||
# stride must be in [1, 2]
|
||||
InvertedResidual(64, 16,
|
||||
stride=3, expand_ratio=6)
|
||||
InvertedResidual(64, 16, stride=3, expand_ratio=6)
|
||||
|
||||
# Test InvertedResidual with checkpoint forward, stride=1
|
||||
block = InvertedResidual(64, 16,
|
||||
stride=1,
|
||||
expand_ratio=6)
|
||||
block = InvertedResidual(64, 16, stride=1, expand_ratio=6)
|
||||
x = torch.randn(1, 64, 56, 56)
|
||||
x_out = block(x)
|
||||
assert x_out.shape == torch.Size([1, 16, 56, 56])
|
||||
|
||||
# Test InvertedResidual with checkpoint forward, stride=2
|
||||
block = InvertedResidual(64, 16,
|
||||
stride=2,
|
||||
expand_ratio=6)
|
||||
block = InvertedResidual(64, 16, stride=2, expand_ratio=6)
|
||||
x = torch.randn(1, 64, 56, 56)
|
||||
x_out = block(x)
|
||||
assert x_out.shape == torch.Size([1, 16, 28, 28])
|
||||
|
||||
# Test InvertedResidual with checkpoint forward
|
||||
block = InvertedResidual(64, 16,
|
||||
stride=1,
|
||||
expand_ratio=6,
|
||||
with_cp=True)
|
||||
block = InvertedResidual(64, 16, stride=1, expand_ratio=6, with_cp=True)
|
||||
assert block.with_cp
|
||||
x = torch.randn(1, 64, 56, 56)
|
||||
x_out = block(x)
|
||||
assert x_out.shape == torch.Size([1, 16, 56, 56])
|
||||
|
||||
# Test InvertedResidual with activation=nn.ReLU
|
||||
block = InvertedResidual(64, 16,
|
||||
stride=1,
|
||||
expand_ratio=6,
|
||||
activation=nn.ReLU)
|
||||
block = InvertedResidual(
|
||||
64, 16, stride=1, expand_ratio=6, activation=nn.ReLU)
|
||||
x = torch.randn(1, 64, 56, 56)
|
||||
x_out = block(x)
|
||||
assert x_out.shape == torch.Size([1, 16, 56, 56])
|
||||
|
@ -193,8 +183,8 @@ def test_mobilenetv2_backbone():
|
|||
assert feat[6].shape == torch.Size([1, 320, 7, 7])
|
||||
|
||||
# Test MobileNetv2 with layers 1, 3, 5 out forward
|
||||
model = MobileNetv2(widen_factor=1.0, activation=nn.ReLU6,
|
||||
out_indices=(0, 2, 4))
|
||||
model = MobileNetv2(
|
||||
widen_factor=1.0, activation=nn.ReLU6, out_indices=(0, 2, 4))
|
||||
model.init_weights()
|
||||
model.train()
|
||||
|
||||
|
@ -206,8 +196,7 @@ def test_mobilenetv2_backbone():
|
|||
assert feat[2].shape == torch.Size([1, 96, 14, 14])
|
||||
|
||||
# Test MobileNetv2 with checkpoint forward
|
||||
model = MobileNetv2(widen_factor=1.0, activation=nn.ReLU6,
|
||||
with_cp=True)
|
||||
model = MobileNetv2(widen_factor=1.0, activation=nn.ReLU6, with_cp=True)
|
||||
for m in model.modules():
|
||||
if is_block(m):
|
||||
assert m.with_cp
|
||||
|
|
Loading…
Reference in New Issue