fix test
parent
fa49911440
commit
0def3a56b6
|
@ -97,7 +97,7 @@ def test_mobilenetv2_backbone():
|
|||
for param in layer.parameters():
|
||||
assert param.requires_grad is False
|
||||
|
||||
# Test MobileNetv2 with first stage frozen
|
||||
# Test MobileNetv2 with bn frozen
|
||||
model = MobileNetv2(bn_frozen=True)
|
||||
model.init_weights()
|
||||
model.train()
|
||||
|
@ -163,25 +163,6 @@ def test_mobilenetv2_backbone():
|
|||
assert feat[5].shape == torch.Size([1, 160, 7, 7])
|
||||
assert feat[6].shape == torch.Size([1, 320, 7, 7])
|
||||
|
||||
# Test MobileNetv2 with BatchNorm forward
|
||||
model = MobileNetv2(widen_factor=1.0, activation=nn.ReLU6)
|
||||
for m in model.modules():
|
||||
if is_norm(m):
|
||||
assert isinstance(m, _BatchNorm)
|
||||
model.init_weights()
|
||||
model.train()
|
||||
|
||||
imgs = torch.randn(1, 3, 224, 224)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 8
|
||||
assert feat[0].shape == torch.Size([1, 16, 112, 112])
|
||||
assert feat[1].shape == torch.Size([1, 24, 56, 56])
|
||||
assert feat[2].shape == torch.Size([1, 32, 28, 28])
|
||||
assert feat[3].shape == torch.Size([1, 64, 14, 14])
|
||||
assert feat[4].shape == torch.Size([1, 96, 14, 14])
|
||||
assert feat[5].shape == torch.Size([1, 160, 7, 7])
|
||||
assert feat[6].shape == torch.Size([1, 320, 7, 7])
|
||||
|
||||
# Test MobileNetv2 with layers 1, 3, 5 out forward
|
||||
model = MobileNetv2(
|
||||
widen_factor=1.0, activation=nn.ReLU6, out_indices=(0, 2, 4))
|
||||
|
|
Loading…
Reference in New Issue