smaller input & channels of unittest (#1004)
parent
349fc2d812
commit
ddce375977
|
@ -25,20 +25,20 @@ def test_bisenetv1_backbone():
|
|||
model.init_weights()
|
||||
model.train()
|
||||
batch_size = 2
|
||||
imgs = torch.randn(batch_size, 3, 256, 512)
|
||||
imgs = torch.randn(batch_size, 3, 64, 128)
|
||||
feat = model(imgs)
|
||||
|
||||
assert len(feat) == 3
|
||||
# output for segment Head
|
||||
assert feat[0].shape == torch.Size([batch_size, 256, 32, 64])
|
||||
assert feat[0].shape == torch.Size([batch_size, 256, 8, 16])
|
||||
# for auxiliary head 1
|
||||
assert feat[1].shape == torch.Size([batch_size, 128, 32, 64])
|
||||
assert feat[1].shape == torch.Size([batch_size, 128, 8, 16])
|
||||
# for auxiliary head 2
|
||||
assert feat[2].shape == torch.Size([batch_size, 128, 16, 32])
|
||||
assert feat[2].shape == torch.Size([batch_size, 128, 4, 8])
|
||||
|
||||
# Test input with rare shape
|
||||
batch_size = 2
|
||||
imgs = torch.randn(batch_size, 3, 527, 279)
|
||||
imgs = torch.randn(batch_size, 3, 95, 27)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 3
|
||||
|
||||
|
@ -47,20 +47,20 @@ def test_bisenetv1_backbone():
|
|||
BiSeNetV1(
|
||||
backbone_cfg=backbone_cfg,
|
||||
in_channels=3,
|
||||
spatial_channels=(64, 64, 64))
|
||||
spatial_channels=(16, 16, 16))
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
# BiSeNetV1 context path constraints.
|
||||
BiSeNetV1(
|
||||
backbone_cfg=backbone_cfg,
|
||||
in_channels=3,
|
||||
context_channels=(128, 256, 512, 1024))
|
||||
context_channels=(16, 32, 64, 128))
|
||||
|
||||
|
||||
def test_bisenetv1_spatial_path():
|
||||
with pytest.raises(AssertionError):
|
||||
# BiSeNetV1 spatial path channel constraints.
|
||||
SpatialPath(num_channels=(64, 64, 64), in_channels=3)
|
||||
SpatialPath(num_channels=(16, 16, 16), in_channels=3)
|
||||
|
||||
|
||||
def test_bisenetv1_context_path():
|
||||
|
@ -79,31 +79,31 @@ def test_bisenetv1_context_path():
|
|||
with pytest.raises(AssertionError):
|
||||
# BiSeNetV1 context path constraints.
|
||||
ContextPath(
|
||||
backbone_cfg=backbone_cfg, context_channels=(128, 256, 512, 1024))
|
||||
backbone_cfg=backbone_cfg, context_channels=(16, 32, 64, 128))
|
||||
|
||||
|
||||
def test_bisenetv1_attention_refinement_module():
|
||||
x_arm = AttentionRefinementModule(256, 64)
|
||||
assert x_arm.conv_layer.in_channels == 256
|
||||
assert x_arm.conv_layer.out_channels == 64
|
||||
x_arm = AttentionRefinementModule(32, 8)
|
||||
assert x_arm.conv_layer.in_channels == 32
|
||||
assert x_arm.conv_layer.out_channels == 8
|
||||
assert x_arm.conv_layer.kernel_size == (3, 3)
|
||||
x = torch.randn(2, 256, 32, 64)
|
||||
x = torch.randn(2, 32, 8, 16)
|
||||
x_out = x_arm(x)
|
||||
assert x_out.shape == torch.Size([2, 64, 32, 64])
|
||||
assert x_out.shape == torch.Size([2, 8, 8, 16])
|
||||
|
||||
|
||||
def test_bisenetv1_feature_fusion_module():
|
||||
ffm = FeatureFusionModule(128, 256)
|
||||
assert ffm.conv1.in_channels == 128
|
||||
assert ffm.conv1.out_channels == 256
|
||||
ffm = FeatureFusionModule(16, 32)
|
||||
assert ffm.conv1.in_channels == 16
|
||||
assert ffm.conv1.out_channels == 32
|
||||
assert ffm.conv1.kernel_size == (1, 1)
|
||||
assert ffm.gap.output_size == (1, 1)
|
||||
assert ffm.conv_atten[0].in_channels == 256
|
||||
assert ffm.conv_atten[0].out_channels == 256
|
||||
assert ffm.conv_atten[0].in_channels == 32
|
||||
assert ffm.conv_atten[0].out_channels == 32
|
||||
assert ffm.conv_atten[0].kernel_size == (1, 1)
|
||||
|
||||
ffm = FeatureFusionModule(128, 128)
|
||||
x1 = torch.randn(2, 64, 64, 128)
|
||||
x2 = torch.randn(2, 64, 64, 128)
|
||||
ffm = FeatureFusionModule(16, 16)
|
||||
x1 = torch.randn(2, 8, 8, 16)
|
||||
x2 = torch.randn(2, 8, 8, 16)
|
||||
x_out = ffm(x1, x2)
|
||||
assert x_out.shape == torch.Size([2, 128, 64, 128])
|
||||
assert x_out.shape == torch.Size([2, 16, 8, 16])
|
||||
|
|
|
@ -13,34 +13,34 @@ def test_bisenetv2_backbone():
|
|||
model.init_weights()
|
||||
model.train()
|
||||
batch_size = 2
|
||||
imgs = torch.randn(batch_size, 3, 512, 1024)
|
||||
imgs = torch.randn(batch_size, 3, 128, 256)
|
||||
feat = model(imgs)
|
||||
|
||||
assert len(feat) == 5
|
||||
# output for segment Head
|
||||
assert feat[0].shape == torch.Size([batch_size, 128, 64, 128])
|
||||
assert feat[0].shape == torch.Size([batch_size, 128, 16, 32])
|
||||
# for auxiliary head 1
|
||||
assert feat[1].shape == torch.Size([batch_size, 16, 128, 256])
|
||||
assert feat[1].shape == torch.Size([batch_size, 16, 32, 64])
|
||||
# for auxiliary head 2
|
||||
assert feat[2].shape == torch.Size([batch_size, 32, 64, 128])
|
||||
assert feat[2].shape == torch.Size([batch_size, 32, 16, 32])
|
||||
# for auxiliary head 3
|
||||
assert feat[3].shape == torch.Size([batch_size, 64, 32, 64])
|
||||
assert feat[3].shape == torch.Size([batch_size, 64, 8, 16])
|
||||
# for auxiliary head 4
|
||||
assert feat[4].shape == torch.Size([batch_size, 128, 16, 32])
|
||||
assert feat[4].shape == torch.Size([batch_size, 128, 4, 8])
|
||||
|
||||
# Test input with rare shape
|
||||
batch_size = 2
|
||||
imgs = torch.randn(batch_size, 3, 527, 952)
|
||||
imgs = torch.randn(batch_size, 3, 95, 27)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 5
|
||||
|
||||
|
||||
def test_bisenetv2_DetailBranch():
|
||||
x = torch.randn(1, 3, 512, 1024)
|
||||
detail_branch = DetailBranch(detail_channels=(64, 64, 128))
|
||||
x = torch.randn(1, 3, 32, 64)
|
||||
detail_branch = DetailBranch(detail_channels=(64, 16, 32))
|
||||
assert isinstance(detail_branch.detail_branch[0][0], ConvModule)
|
||||
x_out = detail_branch(x)
|
||||
assert x_out.shape == torch.Size([1, 128, 64, 128])
|
||||
assert x_out.shape == torch.Size([1, 32, 4, 8])
|
||||
|
||||
|
||||
def test_bisenetv2_SemanticBranch():
|
||||
|
@ -49,9 +49,9 @@ def test_bisenetv2_SemanticBranch():
|
|||
|
||||
|
||||
def test_bisenetv2_BGALayer():
|
||||
x_a = torch.randn(1, 128, 64, 128)
|
||||
x_b = torch.randn(1, 128, 16, 32)
|
||||
bga = BGALayer()
|
||||
x_a = torch.randn(1, 8, 8, 16)
|
||||
x_b = torch.randn(1, 8, 2, 4)
|
||||
bga = BGALayer(out_channels=8)
|
||||
assert isinstance(bga.conv, ConvModule)
|
||||
x_out = bga(x_a, x_b)
|
||||
assert x_out.shape == torch.Size([1, 128, 64, 128])
|
||||
assert x_out.shape == torch.Size([1, 8, 8, 16])
|
||||
|
|
|
@ -16,17 +16,27 @@ def test_fastscnn_backbone():
|
|||
lower_in_channels=128)
|
||||
|
||||
# Test FastSCNN Standard Forward
|
||||
model = FastSCNN()
|
||||
model = FastSCNN(
|
||||
in_channels=3,
|
||||
downsample_dw_channels=(4, 6),
|
||||
global_in_channels=8,
|
||||
global_block_channels=(8, 12, 16),
|
||||
global_block_strides=(2, 2, 1),
|
||||
global_out_channels=16,
|
||||
higher_in_channels=8,
|
||||
lower_in_channels=16,
|
||||
fusion_out_channels=16,
|
||||
)
|
||||
model.init_weights()
|
||||
model.train()
|
||||
batch_size = 4
|
||||
imgs = torch.randn(batch_size, 3, 512, 1024)
|
||||
imgs = torch.randn(batch_size, 3, 64, 128)
|
||||
feat = model(imgs)
|
||||
|
||||
assert len(feat) == 3
|
||||
# higher-res
|
||||
assert feat[0].shape == torch.Size([batch_size, 64, 64, 128])
|
||||
assert feat[0].shape == torch.Size([batch_size, 8, 8, 16])
|
||||
# lower-res
|
||||
assert feat[1].shape == torch.Size([batch_size, 128, 16, 32])
|
||||
assert feat[1].shape == torch.Size([batch_size, 16, 2, 4])
|
||||
# FFM output
|
||||
assert feat[2].shape == torch.Size([batch_size, 128, 64, 128])
|
||||
assert feat[2].shape == torch.Size([batch_size, 16, 8, 16])
|
||||
|
|
|
@ -95,21 +95,21 @@ def test_hrnet_backbone():
|
|||
model.init_weights()
|
||||
model.train()
|
||||
|
||||
imgs = torch.randn(1, 3, 256, 256)
|
||||
imgs = torch.randn(1, 3, 64, 64)
|
||||
feats = model(imgs)
|
||||
assert len(feats) == 4
|
||||
assert feats[0].shape == torch.Size([1, 32, 64, 64])
|
||||
assert feats[3].shape == torch.Size([1, 256, 8, 8])
|
||||
assert feats[0].shape == torch.Size([1, 32, 16, 16])
|
||||
assert feats[3].shape == torch.Size([1, 256, 2, 2])
|
||||
|
||||
# Test single scale output
|
||||
model = HRNet(extra=extra, multiscale_output=False)
|
||||
model.init_weights()
|
||||
model.train()
|
||||
|
||||
imgs = torch.randn(1, 3, 256, 256)
|
||||
imgs = torch.randn(1, 3, 64, 64)
|
||||
feats = model(imgs)
|
||||
assert len(feats) == 1
|
||||
assert feats[0].shape == torch.Size([1, 32, 64, 64])
|
||||
assert feats[0].shape == torch.Size([1, 32, 16, 16])
|
||||
|
||||
# Test HRNET with two stage frozen
|
||||
frozen_stages = 2
|
||||
|
|
|
@ -10,18 +10,19 @@ def test_icnet_backbone():
|
|||
# Must give backbone dict in config file.
|
||||
ICNet(
|
||||
in_channels=3,
|
||||
layer_channels=(512, 2048),
|
||||
light_branch_middle_channels=32,
|
||||
psp_out_channels=512,
|
||||
out_channels=(64, 256, 256),
|
||||
layer_channels=(128, 512),
|
||||
light_branch_middle_channels=8,
|
||||
psp_out_channels=128,
|
||||
out_channels=(16, 128, 128),
|
||||
backbone_cfg=None)
|
||||
|
||||
# Test ICNet Standard Forward
|
||||
model = ICNet(
|
||||
layer_channels=(128, 512),
|
||||
backbone_cfg=dict(
|
||||
type='ResNetV1c',
|
||||
in_channels=3,
|
||||
depth=50,
|
||||
depth=18,
|
||||
num_stages=4,
|
||||
out_indices=(0, 1, 2, 3),
|
||||
dilations=(1, 1, 2, 4),
|
||||
|
@ -29,13 +30,14 @@ def test_icnet_backbone():
|
|||
norm_cfg=dict(type='BN', requires_grad=True),
|
||||
norm_eval=False,
|
||||
style='pytorch',
|
||||
contract_dilation=True), )
|
||||
contract_dilation=True),
|
||||
)
|
||||
assert hasattr(model.backbone,
|
||||
'maxpool') and model.backbone.maxpool.ceil_mode is True
|
||||
model.init_weights()
|
||||
model.train()
|
||||
batch_size = 2
|
||||
imgs = torch.randn(batch_size, 3, 512, 1024)
|
||||
imgs = torch.randn(batch_size, 3, 32, 64)
|
||||
feat = model(imgs)
|
||||
|
||||
assert model.psp_modules[0][0].output_size == 1
|
||||
|
@ -45,4 +47,4 @@ def test_icnet_backbone():
|
|||
assert model.conv_sub1[0].padding == 1
|
||||
|
||||
assert len(feat) == 3
|
||||
assert feat[0].shape == torch.Size([batch_size, 64, 64, 128])
|
||||
assert feat[0].shape == torch.Size([batch_size, 64, 4, 8])
|
||||
|
|
|
@ -24,7 +24,7 @@ def test_mit():
|
|||
assert outs[3].shape == (1, 256, H // 32, W // 32)
|
||||
|
||||
# Test non-squared input
|
||||
H, W = (224, 320)
|
||||
H, W = (224, 256)
|
||||
temp = torch.randn((1, 3, H, W))
|
||||
outs = model(temp)
|
||||
assert outs[0].shape == (1, 32, H // 4, W // 4)
|
||||
|
@ -33,25 +33,25 @@ def test_mit():
|
|||
assert outs[3].shape == (1, 256, H // 32, W // 32)
|
||||
|
||||
# Test MixFFN
|
||||
FFN = MixFFN(128, 512)
|
||||
FFN = MixFFN(64, 128)
|
||||
hw_shape = (32, 32)
|
||||
token_len = 32 * 32
|
||||
temp = torch.randn((1, token_len, 128))
|
||||
temp = torch.randn((1, token_len, 64))
|
||||
# Self identity
|
||||
out = FFN(temp, hw_shape)
|
||||
assert out.shape == (1, token_len, 128)
|
||||
assert out.shape == (1, token_len, 64)
|
||||
# Out identity
|
||||
outs = FFN(temp, hw_shape, temp)
|
||||
assert out.shape == (1, token_len, 128)
|
||||
assert out.shape == (1, token_len, 64)
|
||||
|
||||
# Test EfficientMHA
|
||||
MHA = EfficientMultiheadAttention(128, 2)
|
||||
MHA = EfficientMultiheadAttention(64, 2)
|
||||
hw_shape = (32, 32)
|
||||
token_len = 32 * 32
|
||||
temp = torch.randn((1, token_len, 128))
|
||||
temp = torch.randn((1, token_len, 64))
|
||||
# Self identity
|
||||
out = MHA(temp, hw_shape)
|
||||
assert out.shape == (1, token_len, 128)
|
||||
assert out.shape == (1, token_len, 64)
|
||||
# Out identity
|
||||
outs = MHA(temp, hw_shape, temp)
|
||||
assert out.shape == (1, token_len, 128)
|
||||
assert out.shape == (1, token_len, 64)
|
||||
|
|
|
@ -32,24 +32,24 @@ def test_mobilenet_v3():
|
|||
model.init_weights()
|
||||
model.train()
|
||||
|
||||
imgs = torch.randn(2, 3, 224, 224)
|
||||
imgs = torch.randn(2, 3, 56, 56)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 3
|
||||
assert feat[0].shape == (2, 16, 112, 112)
|
||||
assert feat[1].shape == (2, 16, 56, 56)
|
||||
assert feat[2].shape == (2, 576, 28, 28)
|
||||
assert feat[0].shape == (2, 16, 28, 28)
|
||||
assert feat[1].shape == (2, 16, 14, 14)
|
||||
assert feat[2].shape == (2, 576, 7, 7)
|
||||
|
||||
# Test MobileNetV3 with arch = 'large'
|
||||
model = MobileNetV3(arch='large', out_indices=(1, 3, 16))
|
||||
model.init_weights()
|
||||
model.train()
|
||||
|
||||
imgs = torch.randn(2, 3, 224, 224)
|
||||
imgs = torch.randn(2, 3, 56, 56)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 3
|
||||
assert feat[0].shape == (2, 16, 112, 112)
|
||||
assert feat[1].shape == (2, 24, 56, 56)
|
||||
assert feat[2].shape == (2, 960, 28, 28)
|
||||
assert feat[0].shape == (2, 16, 28, 28)
|
||||
assert feat[1].shape == (2, 24, 14, 14)
|
||||
assert feat[2].shape == (2, 960, 7, 7)
|
||||
|
||||
# Test MobileNetV3 with norm_eval True, with_cp True and frozen_stages=5
|
||||
model = MobileNetV3(norm_eval=True, with_cp=True, frozen_stages=5)
|
||||
|
@ -59,9 +59,9 @@ def test_mobilenet_v3():
|
|||
model.init_weights()
|
||||
model.train()
|
||||
|
||||
imgs = torch.randn(2, 3, 224, 224)
|
||||
imgs = torch.randn(2, 3, 56, 56)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 3
|
||||
assert feat[0].shape == (2, 16, 112, 112)
|
||||
assert feat[1].shape == (2, 16, 56, 56)
|
||||
assert feat[2].shape == (2, 576, 28, 28)
|
||||
assert feat[0].shape == (2, 16, 28, 28)
|
||||
assert feat[1].shape == (2, 16, 14, 14)
|
||||
assert feat[2].shape == (2, 576, 7, 7)
|
||||
|
|
|
@ -43,21 +43,21 @@ def test_resnet_basic_block():
|
|||
# Test BasicBlock with checkpoint forward
|
||||
block = BasicBlock(16, 16, with_cp=True)
|
||||
assert block.with_cp
|
||||
x = torch.randn(1, 16, 56, 56)
|
||||
x = torch.randn(1, 16, 28, 28)
|
||||
x_out = block(x)
|
||||
assert x_out.shape == torch.Size([1, 16, 56, 56])
|
||||
assert x_out.shape == torch.Size([1, 16, 28, 28])
|
||||
|
||||
# test BasicBlock structure and forward
|
||||
block = BasicBlock(64, 64)
|
||||
assert block.conv1.in_channels == 64
|
||||
assert block.conv1.out_channels == 64
|
||||
block = BasicBlock(32, 32)
|
||||
assert block.conv1.in_channels == 32
|
||||
assert block.conv1.out_channels == 32
|
||||
assert block.conv1.kernel_size == (3, 3)
|
||||
assert block.conv2.in_channels == 64
|
||||
assert block.conv2.out_channels == 64
|
||||
assert block.conv2.in_channels == 32
|
||||
assert block.conv2.out_channels == 32
|
||||
assert block.conv2.kernel_size == (3, 3)
|
||||
x = torch.randn(1, 64, 56, 56)
|
||||
x = torch.randn(1, 32, 28, 28)
|
||||
x_out = block(x)
|
||||
assert x_out.shape == torch.Size([1, 64, 56, 56])
|
||||
assert x_out.shape == torch.Size([1, 32, 28, 28])
|
||||
|
||||
|
||||
def test_resnet_bottleneck():
|
||||
|
@ -293,37 +293,37 @@ def test_resnet_backbone():
|
|||
|
||||
with pytest.raises(AssertionError):
|
||||
# In ResNet: 1 <= num_stages <= 4
|
||||
ResNet(50, num_stages=5)
|
||||
ResNet(18, num_stages=5)
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
# len(strides) == len(dilations) == num_stages
|
||||
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
|
||||
ResNet(18, strides=(1, ), dilations=(1, 1), num_stages=3)
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
# pretrained must be a string path
|
||||
model = ResNet(50, pretrained=0)
|
||||
model = ResNet(18, pretrained=0)
|
||||
model.init_weights()
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
# Style must be in ['pytorch', 'caffe']
|
||||
ResNet(50, style='tensorflow')
|
||||
|
||||
# Test ResNet50 norm_eval=True
|
||||
model = ResNet(50, norm_eval=True)
|
||||
# Test ResNet18 norm_eval=True
|
||||
model = ResNet(18, norm_eval=True)
|
||||
model.init_weights()
|
||||
model.train()
|
||||
assert check_norm_state(model.modules(), False)
|
||||
|
||||
# Test ResNet50 with torchvision pretrained weight
|
||||
# Test ResNet18 with torchvision pretrained weight
|
||||
model = ResNet(
|
||||
depth=50, norm_eval=True, pretrained='torchvision://resnet50')
|
||||
depth=18, norm_eval=True, pretrained='torchvision://resnet18')
|
||||
model.init_weights()
|
||||
model.train()
|
||||
assert check_norm_state(model.modules(), False)
|
||||
|
||||
# Test ResNet50 with first stage frozen
|
||||
# Test ResNet18 with first stage frozen
|
||||
frozen_stages = 1
|
||||
model = ResNet(50, frozen_stages=frozen_stages)
|
||||
model = ResNet(18, frozen_stages=frozen_stages)
|
||||
model.init_weights()
|
||||
model.train()
|
||||
assert model.norm1.training is False
|
||||
|
@ -338,8 +338,8 @@ def test_resnet_backbone():
|
|||
for param in layer.parameters():
|
||||
assert param.requires_grad is False
|
||||
|
||||
# Test ResNet50V1d with first stage frozen
|
||||
model = ResNetV1d(depth=50, frozen_stages=frozen_stages)
|
||||
# Test ResNet18V1d with first stage frozen
|
||||
model = ResNetV1d(depth=18, frozen_stages=frozen_stages)
|
||||
assert len(model.stem) == 9
|
||||
model.init_weights()
|
||||
model.train()
|
||||
|
@ -367,8 +367,8 @@ def test_resnet_backbone():
|
|||
assert feat[2].shape == torch.Size([1, 256, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 512, 7, 7])
|
||||
|
||||
# Test ResNet50 with BatchNorm forward
|
||||
model = ResNet(50)
|
||||
# Test ResNet18 with BatchNorm forward
|
||||
model = ResNet(18)
|
||||
for m in model.modules():
|
||||
if is_norm(m):
|
||||
assert isinstance(m, _BatchNorm)
|
||||
|
@ -378,22 +378,22 @@ def test_resnet_backbone():
|
|||
imgs = torch.randn(1, 3, 224, 224)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 4
|
||||
assert feat[0].shape == torch.Size([1, 256, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 512, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
|
||||
assert feat[0].shape == torch.Size([1, 64, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 128, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 256, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 512, 7, 7])
|
||||
|
||||
# Test ResNet50 with layers 1, 2, 3 out forward
|
||||
model = ResNet(50, out_indices=(0, 1, 2))
|
||||
# Test ResNet18 with layers 1, 2, 3 out forward
|
||||
model = ResNet(18, out_indices=(0, 1, 2))
|
||||
model.init_weights()
|
||||
model.train()
|
||||
|
||||
imgs = torch.randn(1, 3, 224, 224)
|
||||
imgs = torch.randn(1, 3, 112, 112)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 3
|
||||
assert feat[0].shape == torch.Size([1, 256, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 512, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
|
||||
assert feat[0].shape == torch.Size([1, 64, 28, 28])
|
||||
assert feat[1].shape == torch.Size([1, 128, 14, 14])
|
||||
assert feat[2].shape == torch.Size([1, 256, 7, 7])
|
||||
|
||||
# Test ResNet18 with checkpoint forward
|
||||
model = ResNet(18, with_cp=True)
|
||||
|
@ -411,8 +411,8 @@ def test_resnet_backbone():
|
|||
assert feat[2].shape == torch.Size([1, 256, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 512, 7, 7])
|
||||
|
||||
# Test ResNet50 with checkpoint forward
|
||||
model = ResNet(50, with_cp=True)
|
||||
# Test ResNet18 with checkpoint forward
|
||||
model = ResNet(18, with_cp=True)
|
||||
for m in model.modules():
|
||||
if is_block(m):
|
||||
assert m.with_cp
|
||||
|
@ -422,14 +422,14 @@ def test_resnet_backbone():
|
|||
imgs = torch.randn(1, 3, 224, 224)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 4
|
||||
assert feat[0].shape == torch.Size([1, 256, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 512, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
|
||||
assert feat[0].shape == torch.Size([1, 64, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 128, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 256, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 512, 7, 7])
|
||||
|
||||
# Test ResNet50 with GroupNorm forward
|
||||
# Test ResNet18 with GroupNorm forward
|
||||
model = ResNet(
|
||||
50, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
|
||||
18, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
|
||||
for m in model.modules():
|
||||
if is_norm(m):
|
||||
assert isinstance(m, GroupNorm)
|
||||
|
@ -439,10 +439,10 @@ def test_resnet_backbone():
|
|||
imgs = torch.randn(1, 3, 224, 224)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 4
|
||||
assert feat[0].shape == torch.Size([1, 256, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 512, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
|
||||
assert feat[0].shape == torch.Size([1, 64, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 128, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 256, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 512, 7, 7])
|
||||
|
||||
# Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2d
|
||||
# after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
|
||||
|
@ -543,8 +543,8 @@ def test_resnet_backbone():
|
|||
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
|
||||
|
||||
# Test ResNet50 zero initialization of residual
|
||||
model = ResNet(50, zero_init_residual=True)
|
||||
# Test ResNet18 zero initialization of residual
|
||||
model = ResNet(18, zero_init_residual=True)
|
||||
model.init_weights()
|
||||
for m in model.modules():
|
||||
if isinstance(m, Bottleneck):
|
||||
|
@ -556,20 +556,20 @@ def test_resnet_backbone():
|
|||
imgs = torch.randn(1, 3, 224, 224)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 4
|
||||
assert feat[0].shape == torch.Size([1, 256, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 512, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
|
||||
assert feat[0].shape == torch.Size([1, 64, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 128, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 256, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 512, 7, 7])
|
||||
|
||||
# Test ResNetV1d forward
|
||||
model = ResNetV1d(depth=50)
|
||||
model = ResNetV1d(depth=18)
|
||||
model.init_weights()
|
||||
model.train()
|
||||
|
||||
imgs = torch.randn(1, 3, 224, 224)
|
||||
feat = model(imgs)
|
||||
assert len(feat) == 4
|
||||
assert feat[0].shape == torch.Size([1, 256, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 512, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
|
||||
assert feat[0].shape == torch.Size([1, 64, 56, 56])
|
||||
assert feat[1].shape == torch.Size([1, 128, 28, 28])
|
||||
assert feat[2].shape == torch.Size([1, 256, 14, 14])
|
||||
assert feat[3].shape == torch.Size([1, 512, 7, 7])
|
||||
|
|
|
@ -6,13 +6,13 @@ from mmseg.models.backbones.swin import SwinBlock, SwinTransformer
|
|||
|
||||
def test_swin_block():
|
||||
# test SwinBlock structure and forward
|
||||
block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256)
|
||||
assert block.ffn.embed_dims == 64
|
||||
block = SwinBlock(embed_dims=32, num_heads=4, feedforward_channels=128)
|
||||
assert block.ffn.embed_dims == 32
|
||||
assert block.attn.w_msa.num_heads == 4
|
||||
assert block.ffn.feedforward_channels == 256
|
||||
x = torch.randn(1, 56 * 56, 64)
|
||||
assert block.ffn.feedforward_channels == 128
|
||||
x = torch.randn(1, 56 * 56, 32)
|
||||
x_out = block(x, (56, 56))
|
||||
assert x_out.shape == torch.Size([1, 56 * 56, 64])
|
||||
assert x_out.shape == torch.Size([1, 56 * 56, 32])
|
||||
|
||||
# Test BasicBlock with checkpoint forward
|
||||
block = SwinBlock(
|
||||
|
@ -37,11 +37,11 @@ def test_swin_transformer():
|
|||
|
||||
# test pretrained image size
|
||||
with pytest.raises(AssertionError):
|
||||
SwinTransformer(pretrain_img_size=(224, 224, 224))
|
||||
SwinTransformer(pretrain_img_size=(112, 112, 112))
|
||||
|
||||
# Test absolute position embedding
|
||||
temp = torch.randn((1, 3, 224, 224))
|
||||
model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True)
|
||||
temp = torch.randn((1, 3, 112, 112))
|
||||
model = SwinTransformer(pretrain_img_size=112, use_abs_pos_embed=True)
|
||||
model.init_weights()
|
||||
model(temp)
|
||||
|
||||
|
@ -89,7 +89,7 @@ def test_swin_transformer():
|
|||
assert not p.requires_grad
|
||||
|
||||
# Test Swin with checkpoint forward
|
||||
temp = torch.randn((1, 3, 112, 112))
|
||||
temp = torch.randn((1, 3, 56, 56))
|
||||
model = SwinTransformer(with_cp=True)
|
||||
for m in model.modules():
|
||||
if isinstance(m, SwinBlock):
|
||||
|
|
|
@ -7,10 +7,10 @@ from .utils import to_cuda
|
|||
|
||||
def test_ann_head():
|
||||
|
||||
inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)]
|
||||
inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 8, 21, 21)]
|
||||
head = ANNHead(
|
||||
in_channels=[16, 32],
|
||||
channels=16,
|
||||
in_channels=[4, 8],
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
in_index=[-2, -1],
|
||||
project_channels=8)
|
||||
|
|
|
@ -10,25 +10,25 @@ def test_apc_head():
|
|||
|
||||
with pytest.raises(AssertionError):
|
||||
# pool_scales must be list|tuple
|
||||
APCHead(in_channels=32, channels=16, num_classes=19, pool_scales=1)
|
||||
APCHead(in_channels=8, channels=2, num_classes=19, pool_scales=1)
|
||||
|
||||
# test no norm_cfg
|
||||
head = APCHead(in_channels=32, channels=16, num_classes=19)
|
||||
head = APCHead(in_channels=8, channels=2, num_classes=19)
|
||||
assert not _conv_has_norm(head, sync_bn=False)
|
||||
|
||||
# test with norm_cfg
|
||||
head = APCHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
norm_cfg=dict(type='SyncBN'))
|
||||
assert _conv_has_norm(head, sync_bn=True)
|
||||
|
||||
# fusion=True
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 45, 45)]
|
||||
head = APCHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
pool_scales=(1, 2, 3),
|
||||
fusion=True)
|
||||
|
@ -42,10 +42,10 @@ def test_apc_head():
|
|||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
|
||||
# fusion=False
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 45, 45)]
|
||||
head = APCHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
pool_scales=(1, 2, 3),
|
||||
fusion=False)
|
||||
|
|
|
@ -10,23 +10,23 @@ def test_aspp_head():
|
|||
|
||||
with pytest.raises(AssertionError):
|
||||
# pool_scales must be list|tuple
|
||||
ASPPHead(in_channels=32, channels=16, num_classes=19, dilations=1)
|
||||
ASPPHead(in_channels=8, channels=4, num_classes=19, dilations=1)
|
||||
|
||||
# test no norm_cfg
|
||||
head = ASPPHead(in_channels=32, channels=16, num_classes=19)
|
||||
head = ASPPHead(in_channels=8, channels=4, num_classes=19)
|
||||
assert not _conv_has_norm(head, sync_bn=False)
|
||||
|
||||
# test with norm_cfg
|
||||
head = ASPPHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
norm_cfg=dict(type='SyncBN'))
|
||||
assert _conv_has_norm(head, sync_bn=True)
|
||||
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 45, 45)]
|
||||
head = ASPPHead(
|
||||
in_channels=32, channels=16, num_classes=19, dilations=(1, 12, 24))
|
||||
in_channels=8, channels=4, num_classes=19, dilations=(1, 12, 24))
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
assert head.aspp_modules[0].conv.dilation == (1, 1)
|
||||
|
@ -39,12 +39,12 @@ def test_aspp_head():
|
|||
def test_dw_aspp_head():
|
||||
|
||||
# test w.o. c1
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 45, 45)]
|
||||
head = DepthwiseSeparableASPPHead(
|
||||
c1_in_channels=0,
|
||||
c1_channels=0,
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
dilations=(1, 12, 24))
|
||||
if torch.cuda.is_available():
|
||||
|
@ -57,18 +57,18 @@ def test_dw_aspp_head():
|
|||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
|
||||
# test with c1
|
||||
inputs = [torch.randn(1, 8, 45, 45), torch.randn(1, 32, 21, 21)]
|
||||
inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 16, 21, 21)]
|
||||
head = DepthwiseSeparableASPPHead(
|
||||
c1_in_channels=8,
|
||||
c1_channels=4,
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
c1_in_channels=4,
|
||||
c1_channels=2,
|
||||
in_channels=16,
|
||||
channels=8,
|
||||
num_classes=19,
|
||||
dilations=(1, 12, 24))
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
assert head.c1_bottleneck.in_channels == 8
|
||||
assert head.c1_bottleneck.out_channels == 4
|
||||
assert head.c1_bottleneck.in_channels == 4
|
||||
assert head.c1_bottleneck.out_channels == 2
|
||||
assert head.aspp_modules[0].conv.dilation == (1, 1)
|
||||
assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12)
|
||||
assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24)
|
||||
|
|
|
@ -7,12 +7,12 @@ from .utils import to_cuda
|
|||
|
||||
|
||||
def test_cc_head():
|
||||
head = CCHead(in_channels=32, channels=16, num_classes=19)
|
||||
head = CCHead(in_channels=16, channels=8, num_classes=19)
|
||||
assert len(head.convs) == 2
|
||||
assert hasattr(head, 'cca')
|
||||
if not torch.cuda.is_available():
|
||||
pytest.skip('CCHead requires CUDA')
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 16, 23, 23)]
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
|
|
@ -7,13 +7,13 @@ from .utils import to_cuda
|
|||
|
||||
def test_da_head():
|
||||
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
head = DAHead(in_channels=32, channels=16, num_classes=19, pam_channels=8)
|
||||
inputs = [torch.randn(1, 16, 23, 23)]
|
||||
head = DAHead(in_channels=16, channels=8, num_classes=19, pam_channels=8)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert isinstance(outputs, tuple) and len(outputs) == 3
|
||||
for output in outputs:
|
||||
assert output.shape == (1, head.num_classes, 45, 45)
|
||||
assert output.shape == (1, head.num_classes, 23, 23)
|
||||
test_output = head.forward_test(inputs, None, None)
|
||||
assert test_output.shape == (1, head.num_classes, 45, 45)
|
||||
assert test_output.shape == (1, head.num_classes, 23, 23)
|
||||
|
|
|
@ -10,25 +10,25 @@ def test_dm_head():
|
|||
|
||||
with pytest.raises(AssertionError):
|
||||
# filter_sizes must be list|tuple
|
||||
DMHead(in_channels=32, channels=16, num_classes=19, filter_sizes=1)
|
||||
DMHead(in_channels=8, channels=4, num_classes=19, filter_sizes=1)
|
||||
|
||||
# test no norm_cfg
|
||||
head = DMHead(in_channels=32, channels=16, num_classes=19)
|
||||
head = DMHead(in_channels=8, channels=4, num_classes=19)
|
||||
assert not _conv_has_norm(head, sync_bn=False)
|
||||
|
||||
# test with norm_cfg
|
||||
head = DMHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
norm_cfg=dict(type='SyncBN'))
|
||||
assert _conv_has_norm(head, sync_bn=True)
|
||||
|
||||
# fusion=True
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
head = DMHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
filter_sizes=(1, 3, 5),
|
||||
fusion=True)
|
||||
|
@ -39,13 +39,13 @@ def test_dm_head():
|
|||
assert head.dcm_modules[1].filter_size == 3
|
||||
assert head.dcm_modules[2].filter_size == 5
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
# fusion=False
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
head = DMHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
filter_sizes=(1, 3, 5),
|
||||
fusion=False)
|
||||
|
@ -56,4 +56,4 @@ def test_dm_head():
|
|||
assert head.dcm_modules[1].filter_size == 3
|
||||
assert head.dcm_modules[2].filter_size == 5
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
|
|
@ -7,39 +7,38 @@ from .utils import to_cuda
|
|||
|
||||
def test_dnl_head():
|
||||
# DNL with 'embedded_gaussian' mode
|
||||
head = DNLHead(in_channels=32, channels=16, num_classes=19)
|
||||
head = DNLHead(in_channels=8, channels=4, num_classes=19)
|
||||
assert len(head.convs) == 2
|
||||
assert hasattr(head, 'dnl_block')
|
||||
assert head.dnl_block.temperature == 0.05
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
# NonLocal2d with 'dot_product' mode
|
||||
head = DNLHead(
|
||||
in_channels=32, channels=16, num_classes=19, mode='dot_product')
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
in_channels=8, channels=4, num_classes=19, mode='dot_product')
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
# NonLocal2d with 'gaussian' mode
|
||||
head = DNLHead(
|
||||
in_channels=32, channels=16, num_classes=19, mode='gaussian')
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
head = DNLHead(in_channels=8, channels=4, num_classes=19, mode='gaussian')
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
# NonLocal2d with 'concatenation' mode
|
||||
head = DNLHead(
|
||||
in_channels=32, channels=16, num_classes=19, mode='concatenation')
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
in_channels=8, channels=4, num_classes=19, mode='concatenation')
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
|
|
@ -10,13 +10,13 @@ def test_dpt_head():
|
|||
# input_transform must be 'multiple_select'
|
||||
head = DPTHead(
|
||||
in_channels=[768, 768, 768, 768],
|
||||
channels=256,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
in_index=[0, 1, 2, 3])
|
||||
|
||||
head = DPTHead(
|
||||
in_channels=[768, 768, 768, 768],
|
||||
channels=256,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
in_index=[0, 1, 2, 3],
|
||||
input_transform='multiple_select')
|
||||
|
@ -29,7 +29,7 @@ def test_dpt_head():
|
|||
# test readout operation
|
||||
head = DPTHead(
|
||||
in_channels=[768, 768, 768, 768],
|
||||
channels=256,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
in_index=[0, 1, 2, 3],
|
||||
input_transform='multiple_select',
|
||||
|
@ -39,7 +39,7 @@ def test_dpt_head():
|
|||
|
||||
head = DPTHead(
|
||||
in_channels=[768, 768, 768, 768],
|
||||
channels=256,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
in_index=[0, 1, 2, 3],
|
||||
input_transform='multiple_select',
|
||||
|
|
|
@ -7,17 +7,17 @@ from .utils import to_cuda
|
|||
|
||||
def test_emanet_head():
|
||||
head = EMAHead(
|
||||
in_channels=32,
|
||||
ema_channels=24,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
ema_channels=3,
|
||||
channels=2,
|
||||
num_stages=3,
|
||||
num_bases=16,
|
||||
num_bases=2,
|
||||
num_classes=19)
|
||||
for param in head.ema_mid_conv.parameters():
|
||||
assert not param.requires_grad
|
||||
assert hasattr(head, 'ema_module')
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 4, 23, 23)]
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
|
|
@ -7,9 +7,8 @@ from .utils import to_cuda
|
|||
|
||||
def test_enc_head():
|
||||
# with se_loss, w.o. lateral
|
||||
inputs = [torch.randn(1, 32, 21, 21)]
|
||||
head = EncHead(
|
||||
in_channels=[32], channels=16, num_classes=19, in_index=[-1])
|
||||
inputs = [torch.randn(1, 8, 21, 21)]
|
||||
head = EncHead(in_channels=[8], channels=4, num_classes=19, in_index=[-1])
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
|
@ -18,10 +17,10 @@ def test_enc_head():
|
|||
assert outputs[1].shape == (1, head.num_classes)
|
||||
|
||||
# w.o se_loss, w.o. lateral
|
||||
inputs = [torch.randn(1, 32, 21, 21)]
|
||||
inputs = [torch.randn(1, 8, 21, 21)]
|
||||
head = EncHead(
|
||||
in_channels=[32],
|
||||
channels=16,
|
||||
in_channels=[8],
|
||||
channels=4,
|
||||
use_se_loss=False,
|
||||
num_classes=19,
|
||||
in_index=[-1])
|
||||
|
@ -31,10 +30,10 @@ def test_enc_head():
|
|||
assert outputs.shape == (1, head.num_classes, 21, 21)
|
||||
|
||||
# with se_loss, with lateral
|
||||
inputs = [torch.randn(1, 16, 45, 45), torch.randn(1, 32, 21, 21)]
|
||||
inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 8, 21, 21)]
|
||||
head = EncHead(
|
||||
in_channels=[16, 32],
|
||||
channels=16,
|
||||
in_channels=[4, 8],
|
||||
channels=4,
|
||||
add_lateral=True,
|
||||
num_classes=19,
|
||||
in_index=[-2, -1])
|
||||
|
|
|
@ -15,15 +15,15 @@ def test_fcn_head():
|
|||
FCNHead(num_classes=19, num_convs=-1)
|
||||
|
||||
# test no norm_cfg
|
||||
head = FCNHead(in_channels=32, channels=16, num_classes=19)
|
||||
head = FCNHead(in_channels=8, channels=4, num_classes=19)
|
||||
for m in head.modules():
|
||||
if isinstance(m, ConvModule):
|
||||
assert not m.with_norm
|
||||
|
||||
# test with norm_cfg
|
||||
head = FCNHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
norm_cfg=dict(type='SyncBN'))
|
||||
for m in head.modules():
|
||||
|
@ -31,64 +31,64 @@ def test_fcn_head():
|
|||
assert m.with_norm and isinstance(m.bn, SyncBatchNorm)
|
||||
|
||||
# test concat_input=False
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
head = FCNHead(
|
||||
in_channels=32, channels=16, num_classes=19, concat_input=False)
|
||||
in_channels=8, channels=4, num_classes=19, concat_input=False)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
assert len(head.convs) == 2
|
||||
assert not head.concat_input and not hasattr(head, 'conv_cat')
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
# test concat_input=True
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
head = FCNHead(
|
||||
in_channels=32, channels=16, num_classes=19, concat_input=True)
|
||||
in_channels=8, channels=4, num_classes=19, concat_input=True)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
assert len(head.convs) == 2
|
||||
assert head.concat_input
|
||||
assert head.conv_cat.in_channels == 48
|
||||
assert head.conv_cat.in_channels == 12
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
# test kernel_size=3
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
head = FCNHead(in_channels=32, channels=16, num_classes=19)
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
head = FCNHead(in_channels=8, channels=4, num_classes=19)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
for i in range(len(head.convs)):
|
||||
assert head.convs[i].kernel_size == (3, 3)
|
||||
assert head.convs[i].padding == 1
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
# test kernel_size=1
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
head = FCNHead(in_channels=32, channels=16, num_classes=19, kernel_size=1)
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
head = FCNHead(in_channels=8, channels=4, num_classes=19, kernel_size=1)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
for i in range(len(head.convs)):
|
||||
assert head.convs[i].kernel_size == (1, 1)
|
||||
assert head.convs[i].padding == 0
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
# test num_conv
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
head = FCNHead(in_channels=32, channels=16, num_classes=19, num_convs=1)
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
head = FCNHead(in_channels=8, channels=4, num_classes=19, num_convs=1)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
assert len(head.convs) == 1
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
# test num_conv = 0
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
head = FCNHead(
|
||||
in_channels=32,
|
||||
channels=32,
|
||||
in_channels=8,
|
||||
channels=8,
|
||||
num_classes=19,
|
||||
num_convs=0,
|
||||
concat_input=False)
|
||||
|
@ -96,7 +96,7 @@ def test_fcn_head():
|
|||
head, inputs = to_cuda(head, inputs)
|
||||
assert isinstance(head.convs, torch.nn.Identity)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
||||
|
||||
def test_sep_fcn_head():
|
||||
|
@ -108,9 +108,9 @@ def test_sep_fcn_head():
|
|||
num_classes=19,
|
||||
in_index=-1,
|
||||
norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
|
||||
x = [torch.rand(2, 128, 32, 32)]
|
||||
x = [torch.rand(2, 128, 8, 8)]
|
||||
output = head(x)
|
||||
assert output.shape == (2, head.num_classes, 32, 32)
|
||||
assert output.shape == (2, head.num_classes, 8, 8)
|
||||
assert not head.concat_input
|
||||
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
|
||||
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
|
||||
|
@ -123,9 +123,9 @@ def test_sep_fcn_head():
|
|||
num_classes=19,
|
||||
in_index=-1,
|
||||
norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
|
||||
x = [torch.rand(3, 64, 32, 32)]
|
||||
x = [torch.rand(3, 64, 8, 8)]
|
||||
output = head(x)
|
||||
assert output.shape == (3, head.num_classes, 32, 32)
|
||||
assert output.shape == (3, head.num_classes, 8, 8)
|
||||
assert head.concat_input
|
||||
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
|
||||
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
|
||||
|
|
|
@ -6,11 +6,11 @@ from .utils import to_cuda
|
|||
|
||||
|
||||
def test_gc_head():
|
||||
head = GCHead(in_channels=32, channels=16, num_classes=19)
|
||||
head = GCHead(in_channels=4, channels=4, num_classes=19)
|
||||
assert len(head.convs) == 2
|
||||
assert hasattr(head, 'gc_block')
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 4, 23, 23)]
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
|
|
@ -7,14 +7,14 @@ from .utils import to_cuda
|
|||
|
||||
def test_isa_head():
|
||||
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
isa_head = ISAHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=8,
|
||||
channels=4,
|
||||
num_classes=19,
|
||||
isa_channels=16,
|
||||
isa_channels=4,
|
||||
down_factor=(8, 8))
|
||||
if torch.cuda.is_available():
|
||||
isa_head, inputs = to_cuda(isa_head, inputs)
|
||||
output = isa_head(inputs)
|
||||
assert output.shape == (1, isa_head.num_classes, 45, 45)
|
||||
assert output.shape == (1, isa_head.num_classes, 23, 23)
|
||||
|
|
|
@ -9,9 +9,9 @@ def test_lraspp_head():
|
|||
with pytest.raises(ValueError):
|
||||
# check invalid input_transform
|
||||
LRASPPHead(
|
||||
in_channels=(16, 16, 576),
|
||||
in_channels=(4, 4, 123),
|
||||
in_index=(0, 1, 2),
|
||||
channels=128,
|
||||
channels=32,
|
||||
input_transform='resize_concat',
|
||||
dropout_ratio=0.1,
|
||||
num_classes=19,
|
||||
|
@ -24,9 +24,9 @@ def test_lraspp_head():
|
|||
with pytest.raises(AssertionError):
|
||||
# check invalid branch_channels
|
||||
LRASPPHead(
|
||||
in_channels=(16, 16, 576),
|
||||
in_channels=(4, 4, 123),
|
||||
in_index=(0, 1, 2),
|
||||
channels=128,
|
||||
channels=32,
|
||||
branch_channels=64,
|
||||
input_transform='multiple_select',
|
||||
dropout_ratio=0.1,
|
||||
|
@ -39,9 +39,9 @@ def test_lraspp_head():
|
|||
|
||||
# test with default settings
|
||||
lraspp_head = LRASPPHead(
|
||||
in_channels=(16, 16, 576),
|
||||
in_channels=(4, 4, 123),
|
||||
in_index=(0, 1, 2),
|
||||
channels=128,
|
||||
channels=32,
|
||||
input_transform='multiple_select',
|
||||
dropout_ratio=0.1,
|
||||
num_classes=19,
|
||||
|
@ -51,18 +51,18 @@ def test_lraspp_head():
|
|||
loss_decode=dict(
|
||||
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
|
||||
inputs = [
|
||||
torch.randn(2, 16, 45, 45),
|
||||
torch.randn(2, 16, 28, 28),
|
||||
torch.randn(2, 576, 14, 14)
|
||||
torch.randn(2, 4, 45, 45),
|
||||
torch.randn(2, 4, 28, 28),
|
||||
torch.randn(2, 123, 14, 14)
|
||||
]
|
||||
with pytest.raises(RuntimeError):
|
||||
# check invalid inputs
|
||||
output = lraspp_head(inputs)
|
||||
|
||||
inputs = [
|
||||
torch.randn(2, 16, 111, 111),
|
||||
torch.randn(2, 16, 77, 77),
|
||||
torch.randn(2, 576, 55, 55)
|
||||
torch.randn(2, 4, 111, 111),
|
||||
torch.randn(2, 4, 77, 77),
|
||||
torch.randn(2, 123, 55, 55)
|
||||
]
|
||||
output = lraspp_head(inputs)
|
||||
assert output.shape == (2, 19, 111, 111)
|
||||
|
|
|
@ -6,11 +6,11 @@ from .utils import to_cuda
|
|||
|
||||
|
||||
def test_nl_head():
|
||||
head = NLHead(in_channels=32, channels=16, num_classes=19)
|
||||
head = NLHead(in_channels=8, channels=4, num_classes=19)
|
||||
assert len(head.convs) == 2
|
||||
assert hasattr(head, 'nl_block')
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
|
|
@ -7,13 +7,13 @@ from .utils import to_cuda
|
|||
|
||||
def test_ocr_head():
|
||||
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 8, 23, 23)]
|
||||
ocr_head = OCRHead(
|
||||
in_channels=32, channels=16, num_classes=19, ocr_channels=8)
|
||||
fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
|
||||
in_channels=8, channels=4, num_classes=19, ocr_channels=8)
|
||||
fcn_head = FCNHead(in_channels=8, channels=4, num_classes=19)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(ocr_head, inputs)
|
||||
head, inputs = to_cuda(fcn_head, inputs)
|
||||
prev_output = fcn_head(inputs)
|
||||
output = ocr_head(inputs, prev_output)
|
||||
assert output.shape == (1, ocr_head.num_classes, 45, 45)
|
||||
assert output.shape == (1, ocr_head.num_classes, 23, 23)
|
||||
|
|
|
@ -11,112 +11,112 @@ def test_psa_head():
|
|||
with pytest.raises(AssertionError):
|
||||
# psa_type must be in 'bi-direction', 'collect', 'distribute'
|
||||
PSAHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
mask_size=(39, 39),
|
||||
mask_size=(13, 13),
|
||||
psa_type='gather')
|
||||
|
||||
# test no norm_cfg
|
||||
head = PSAHead(
|
||||
in_channels=32, channels=16, num_classes=19, mask_size=(39, 39))
|
||||
in_channels=4, channels=2, num_classes=19, mask_size=(13, 13))
|
||||
assert not _conv_has_norm(head, sync_bn=False)
|
||||
|
||||
# test with norm_cfg
|
||||
head = PSAHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
mask_size=(39, 39),
|
||||
mask_size=(13, 13),
|
||||
norm_cfg=dict(type='SyncBN'))
|
||||
assert _conv_has_norm(head, sync_bn=True)
|
||||
|
||||
# test 'bi-direction' psa_type
|
||||
inputs = [torch.randn(1, 32, 39, 39)]
|
||||
inputs = [torch.randn(1, 4, 13, 13)]
|
||||
head = PSAHead(
|
||||
in_channels=32, channels=16, num_classes=19, mask_size=(39, 39))
|
||||
in_channels=4, channels=2, num_classes=19, mask_size=(13, 13))
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 39, 39)
|
||||
assert outputs.shape == (1, head.num_classes, 13, 13)
|
||||
|
||||
# test 'bi-direction' psa_type, shrink_factor=1
|
||||
inputs = [torch.randn(1, 32, 39, 39)]
|
||||
inputs = [torch.randn(1, 4, 13, 13)]
|
||||
head = PSAHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
mask_size=(39, 39),
|
||||
mask_size=(13, 13),
|
||||
shrink_factor=1)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 39, 39)
|
||||
assert outputs.shape == (1, head.num_classes, 13, 13)
|
||||
|
||||
# test 'bi-direction' psa_type with soft_max
|
||||
inputs = [torch.randn(1, 32, 39, 39)]
|
||||
inputs = [torch.randn(1, 4, 13, 13)]
|
||||
head = PSAHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
mask_size=(39, 39),
|
||||
mask_size=(13, 13),
|
||||
psa_softmax=True)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 39, 39)
|
||||
assert outputs.shape == (1, head.num_classes, 13, 13)
|
||||
|
||||
# test 'collect' psa_type
|
||||
inputs = [torch.randn(1, 32, 39, 39)]
|
||||
inputs = [torch.randn(1, 4, 13, 13)]
|
||||
head = PSAHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
mask_size=(39, 39),
|
||||
mask_size=(13, 13),
|
||||
psa_type='collect')
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 39, 39)
|
||||
assert outputs.shape == (1, head.num_classes, 13, 13)
|
||||
|
||||
# test 'collect' psa_type, shrink_factor=1
|
||||
inputs = [torch.randn(1, 32, 39, 39)]
|
||||
inputs = [torch.randn(1, 4, 13, 13)]
|
||||
head = PSAHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
mask_size=(39, 39),
|
||||
mask_size=(13, 13),
|
||||
shrink_factor=1,
|
||||
psa_type='collect')
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 39, 39)
|
||||
assert outputs.shape == (1, head.num_classes, 13, 13)
|
||||
|
||||
# test 'collect' psa_type, shrink_factor=1, compact=True
|
||||
inputs = [torch.randn(1, 32, 39, 39)]
|
||||
inputs = [torch.randn(1, 4, 13, 13)]
|
||||
head = PSAHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
mask_size=(39, 39),
|
||||
mask_size=(13, 13),
|
||||
psa_type='collect',
|
||||
shrink_factor=1,
|
||||
compact=True)
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 39, 39)
|
||||
assert outputs.shape == (1, head.num_classes, 13, 13)
|
||||
|
||||
# test 'distribute' psa_type
|
||||
inputs = [torch.randn(1, 32, 39, 39)]
|
||||
inputs = [torch.randn(1, 4, 13, 13)]
|
||||
head = PSAHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
mask_size=(39, 39),
|
||||
mask_size=(13, 13),
|
||||
psa_type='distribute')
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 39, 39)
|
||||
assert outputs.shape == (1, head.num_classes, 13, 13)
|
||||
|
|
|
@ -10,27 +10,27 @@ def test_psp_head():
|
|||
|
||||
with pytest.raises(AssertionError):
|
||||
# pool_scales must be list|tuple
|
||||
PSPHead(in_channels=32, channels=16, num_classes=19, pool_scales=1)
|
||||
PSPHead(in_channels=4, channels=2, num_classes=19, pool_scales=1)
|
||||
|
||||
# test no norm_cfg
|
||||
head = PSPHead(in_channels=32, channels=16, num_classes=19)
|
||||
head = PSPHead(in_channels=4, channels=2, num_classes=19)
|
||||
assert not _conv_has_norm(head, sync_bn=False)
|
||||
|
||||
# test with norm_cfg
|
||||
head = PSPHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
norm_cfg=dict(type='SyncBN'))
|
||||
assert _conv_has_norm(head, sync_bn=True)
|
||||
|
||||
inputs = [torch.randn(1, 32, 45, 45)]
|
||||
inputs = [torch.randn(1, 4, 23, 23)]
|
||||
head = PSPHead(
|
||||
in_channels=32, channels=16, num_classes=19, pool_scales=(1, 2, 3))
|
||||
in_channels=4, channels=2, num_classes=19, pool_scales=(1, 2, 3))
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
assert head.psp_modules[0][0].output_size == 1
|
||||
assert head.psp_modules[1][0].output_size == 2
|
||||
assert head.psp_modules[2][0].output_size == 3
|
||||
outputs = head(inputs)
|
||||
assert outputs.shape == (1, head.num_classes, 45, 45)
|
||||
assert outputs.shape == (1, head.num_classes, 23, 23)
|
||||
|
|
|
@ -10,29 +10,29 @@ def test_setr_mla_head(capsys):
|
|||
|
||||
with pytest.raises(AssertionError):
|
||||
# MLA requires input multiple stage feature information.
|
||||
SETRMLAHead(in_channels=32, channels=16, num_classes=19, in_index=1)
|
||||
SETRMLAHead(in_channels=8, channels=4, num_classes=19, in_index=1)
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
# multiple in_indexs requires multiple in_channels.
|
||||
SETRMLAHead(
|
||||
in_channels=32, channels=16, num_classes=19, in_index=(0, 1, 2, 3))
|
||||
in_channels=8, channels=4, num_classes=19, in_index=(0, 1, 2, 3))
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
# channels should be len(in_channels) * mla_channels
|
||||
SETRMLAHead(
|
||||
in_channels=(32, 32, 32, 32),
|
||||
channels=32,
|
||||
mla_channels=16,
|
||||
in_channels=(8, 8, 8, 8),
|
||||
channels=8,
|
||||
mla_channels=4,
|
||||
in_index=(0, 1, 2, 3),
|
||||
num_classes=19)
|
||||
|
||||
# test inference of MLA head
|
||||
img_size = (32, 32)
|
||||
patch_size = 16
|
||||
img_size = (8, 8)
|
||||
patch_size = 4
|
||||
head = SETRMLAHead(
|
||||
in_channels=(32, 32, 32, 32),
|
||||
channels=64,
|
||||
mla_channels=16,
|
||||
in_channels=(8, 8, 8, 8),
|
||||
channels=16,
|
||||
mla_channels=4,
|
||||
in_index=(0, 1, 2, 3),
|
||||
num_classes=19,
|
||||
norm_cfg=dict(type='BN'))
|
||||
|
@ -40,10 +40,10 @@ def test_setr_mla_head(capsys):
|
|||
h, w = img_size[0] // patch_size, img_size[1] // patch_size
|
||||
# Input square NCHW format feature information
|
||||
x = [
|
||||
torch.randn(1, 32, h, w),
|
||||
torch.randn(1, 32, h, w),
|
||||
torch.randn(1, 32, h, w),
|
||||
torch.randn(1, 32, h, w)
|
||||
torch.randn(1, 8, h, w),
|
||||
torch.randn(1, 8, h, w),
|
||||
torch.randn(1, 8, h, w),
|
||||
torch.randn(1, 8, h, w)
|
||||
]
|
||||
if torch.cuda.is_available():
|
||||
head, x = to_cuda(head, x)
|
||||
|
@ -52,10 +52,10 @@ def test_setr_mla_head(capsys):
|
|||
|
||||
# Input non-square NCHW format feature information
|
||||
x = [
|
||||
torch.randn(1, 32, h, w * 2),
|
||||
torch.randn(1, 32, h, w * 2),
|
||||
torch.randn(1, 32, h, w * 2),
|
||||
torch.randn(1, 32, h, w * 2)
|
||||
torch.randn(1, 8, h, w * 2),
|
||||
torch.randn(1, 8, h, w * 2),
|
||||
torch.randn(1, 8, h, w * 2),
|
||||
torch.randn(1, 8, h, w * 2)
|
||||
]
|
||||
if torch.cuda.is_available():
|
||||
head, x = to_cuda(head, x)
|
||||
|
|
|
@ -15,12 +15,12 @@ def test_setr_up_head(capsys):
|
|||
with pytest.raises(AssertionError):
|
||||
# in_channels must be int type and in_channels must be same
|
||||
# as embed_dim.
|
||||
SETRUPHead(in_channels=(32, 32), channels=16, num_classes=19)
|
||||
SETRUPHead(in_channels=(4, 4), channels=2, num_classes=19)
|
||||
|
||||
# test init_cfg of head
|
||||
head = SETRUPHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
norm_cfg=dict(type='SyncBN'),
|
||||
num_classes=19,
|
||||
init_cfg=dict(type='Kaiming'))
|
||||
|
@ -28,11 +28,11 @@ def test_setr_up_head(capsys):
|
|||
|
||||
# test inference of Naive head
|
||||
# the auxiliary head of Naive head is same as Naive head
|
||||
img_size = (32, 32)
|
||||
patch_size = 16
|
||||
img_size = (4, 4)
|
||||
patch_size = 2
|
||||
head = SETRUPHead(
|
||||
in_channels=32,
|
||||
channels=16,
|
||||
in_channels=4,
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
num_convs=1,
|
||||
up_scale=4,
|
||||
|
@ -42,14 +42,14 @@ def test_setr_up_head(capsys):
|
|||
h, w = img_size[0] // patch_size, img_size[1] // patch_size
|
||||
|
||||
# Input square NCHW format feature information
|
||||
x = [torch.randn(1, 32, h, w)]
|
||||
x = [torch.randn(1, 4, h, w)]
|
||||
if torch.cuda.is_available():
|
||||
head, x = to_cuda(head, x)
|
||||
out = head(x)
|
||||
assert out.shape == (1, head.num_classes, h * 4, w * 4)
|
||||
|
||||
# Input non-square NCHW format feature information
|
||||
x = [torch.randn(1, 32, h, w * 2)]
|
||||
x = [torch.randn(1, 4, h, w * 2)]
|
||||
if torch.cuda.is_available():
|
||||
head, x = to_cuda(head, x)
|
||||
out = head(x)
|
||||
|
|
|
@ -10,25 +10,25 @@ def test_uper_head():
|
|||
|
||||
with pytest.raises(AssertionError):
|
||||
# fpn_in_channels must be list|tuple
|
||||
UPerHead(in_channels=32, channels=16, num_classes=19)
|
||||
UPerHead(in_channels=4, channels=2, num_classes=19)
|
||||
|
||||
# test no norm_cfg
|
||||
head = UPerHead(
|
||||
in_channels=[32, 16], channels=16, num_classes=19, in_index=[-2, -1])
|
||||
in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1])
|
||||
assert not _conv_has_norm(head, sync_bn=False)
|
||||
|
||||
# test with norm_cfg
|
||||
head = UPerHead(
|
||||
in_channels=[32, 16],
|
||||
channels=16,
|
||||
in_channels=[4, 2],
|
||||
channels=2,
|
||||
num_classes=19,
|
||||
norm_cfg=dict(type='SyncBN'),
|
||||
in_index=[-2, -1])
|
||||
assert _conv_has_norm(head, sync_bn=True)
|
||||
|
||||
inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)]
|
||||
inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 2, 21, 21)]
|
||||
head = UPerHead(
|
||||
in_channels=[32, 16], channels=16, num_classes=19, in_index=[-2, -1])
|
||||
in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1])
|
||||
if torch.cuda.is_available():
|
||||
head, inputs = to_cuda(head, inputs)
|
||||
outputs = head(inputs)
|
||||
|
|
|
@ -5,15 +5,15 @@ from mmseg.models import FPN
|
|||
|
||||
|
||||
def test_fpn():
|
||||
in_channels = [256, 512, 1024, 2048]
|
||||
in_channels = [64, 128, 256, 512]
|
||||
inputs = [
|
||||
torch.randn(1, c, 56 // 2**i, 56 // 2**i)
|
||||
for i, c in enumerate(in_channels)
|
||||
]
|
||||
|
||||
fpn = FPN(in_channels, 256, len(in_channels))
|
||||
fpn = FPN(in_channels, 64, len(in_channels))
|
||||
outputs = fpn(inputs)
|
||||
assert outputs[0].shape == torch.Size([1, 256, 56, 56])
|
||||
assert outputs[1].shape == torch.Size([1, 256, 28, 28])
|
||||
assert outputs[2].shape == torch.Size([1, 256, 14, 14])
|
||||
assert outputs[3].shape == torch.Size([1, 256, 7, 7])
|
||||
assert outputs[0].shape == torch.Size([1, 64, 56, 56])
|
||||
assert outputs[1].shape == torch.Size([1, 64, 28, 28])
|
||||
assert outputs[2].shape == torch.Size([1, 64, 14, 14])
|
||||
assert outputs[3].shape == torch.Size([1, 64, 7, 7])
|
||||
|
|
|
@ -10,44 +10,44 @@ from ..test_heads.utils import _conv_has_norm, to_cuda
|
|||
def test_ic_neck():
|
||||
# test with norm_cfg
|
||||
neck = ICNeck(
|
||||
in_channels=(64, 256, 256),
|
||||
out_channels=128,
|
||||
in_channels=(4, 16, 16),
|
||||
out_channels=8,
|
||||
norm_cfg=dict(type='SyncBN'),
|
||||
align_corners=False)
|
||||
assert _conv_has_norm(neck, sync_bn=True)
|
||||
|
||||
inputs = [
|
||||
torch.randn(1, 64, 128, 256),
|
||||
torch.randn(1, 256, 65, 129),
|
||||
torch.randn(1, 256, 32, 64)
|
||||
torch.randn(1, 4, 32, 64),
|
||||
torch.randn(1, 16, 16, 32),
|
||||
torch.randn(1, 16, 8, 16)
|
||||
]
|
||||
neck = ICNeck(
|
||||
in_channels=(64, 256, 256),
|
||||
out_channels=128,
|
||||
in_channels=(4, 16, 16),
|
||||
out_channels=4,
|
||||
norm_cfg=dict(type='BN', requires_grad=True),
|
||||
align_corners=False)
|
||||
if torch.cuda.is_available():
|
||||
neck, inputs = to_cuda(neck, inputs)
|
||||
|
||||
outputs = neck(inputs)
|
||||
assert outputs[0].shape == (1, 128, 65, 129)
|
||||
assert outputs[1].shape == (1, 128, 128, 256)
|
||||
assert outputs[1].shape == (1, 128, 128, 256)
|
||||
assert outputs[0].shape == (1, 4, 16, 32)
|
||||
assert outputs[1].shape == (1, 4, 32, 64)
|
||||
assert outputs[1].shape == (1, 4, 32, 64)
|
||||
|
||||
|
||||
def test_ic_neck_cascade_feature_fusion():
|
||||
cff = CascadeFeatureFusion(256, 256, 128)
|
||||
assert cff.conv_low.in_channels == 256
|
||||
assert cff.conv_low.out_channels == 128
|
||||
assert cff.conv_high.in_channels == 256
|
||||
assert cff.conv_high.out_channels == 128
|
||||
cff = CascadeFeatureFusion(64, 64, 32)
|
||||
assert cff.conv_low.in_channels == 64
|
||||
assert cff.conv_low.out_channels == 32
|
||||
assert cff.conv_high.in_channels == 64
|
||||
assert cff.conv_high.out_channels == 32
|
||||
|
||||
|
||||
def test_ic_neck_input_channels():
|
||||
with pytest.raises(AssertionError):
|
||||
# ICNet Neck input channel constraints.
|
||||
ICNeck(
|
||||
in_channels=(64, 256, 256, 256),
|
||||
out_channels=128,
|
||||
in_channels=(16, 64, 64, 64),
|
||||
out_channels=32,
|
||||
norm_cfg=dict(type='BN', requires_grad=True),
|
||||
align_corners=False)
|
||||
|
|
|
@ -7,34 +7,40 @@ from mmseg.models.necks import JPU
|
|||
|
||||
def test_fastfcn_neck():
|
||||
# Test FastFCN Standard Forward
|
||||
model = JPU()
|
||||
model = JPU(
|
||||
in_channels=(64, 128, 256),
|
||||
mid_channels=64,
|
||||
start_level=0,
|
||||
end_level=-1,
|
||||
dilations=(1, 2, 4, 8),
|
||||
)
|
||||
model.init_weights()
|
||||
model.train()
|
||||
batch_size = 1
|
||||
input = [
|
||||
torch.randn(batch_size, 512, 64, 128),
|
||||
torch.randn(batch_size, 1024, 32, 64),
|
||||
torch.randn(batch_size, 2048, 16, 32)
|
||||
torch.randn(batch_size, 64, 64, 128),
|
||||
torch.randn(batch_size, 128, 32, 64),
|
||||
torch.randn(batch_size, 256, 16, 32)
|
||||
]
|
||||
feat = model(input)
|
||||
|
||||
assert len(feat) == 3
|
||||
assert feat[0].shape == torch.Size([batch_size, 512, 64, 128])
|
||||
assert feat[1].shape == torch.Size([batch_size, 1024, 32, 64])
|
||||
assert feat[2].shape == torch.Size([batch_size, 2048, 64, 128])
|
||||
assert feat[0].shape == torch.Size([batch_size, 64, 64, 128])
|
||||
assert feat[1].shape == torch.Size([batch_size, 128, 32, 64])
|
||||
assert feat[2].shape == torch.Size([batch_size, 256, 64, 128])
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
# FastFCN input and in_channels constraints.
|
||||
JPU(in_channels=(256, 512, 1024), start_level=0, end_level=5)
|
||||
JPU(in_channels=(256, 64, 128), start_level=0, end_level=5)
|
||||
|
||||
# Test not default start_level
|
||||
model = JPU(in_channels=(512, 1024, 2048), start_level=1, end_level=-1)
|
||||
model = JPU(in_channels=(64, 128, 256), start_level=1, end_level=-1)
|
||||
input = [
|
||||
torch.randn(batch_size, 512, 64, 128),
|
||||
torch.randn(batch_size, 1024, 32, 64),
|
||||
torch.randn(batch_size, 2048, 16, 32)
|
||||
torch.randn(batch_size, 64, 64, 128),
|
||||
torch.randn(batch_size, 128, 32, 64),
|
||||
torch.randn(batch_size, 256, 16, 32)
|
||||
]
|
||||
feat = model(input)
|
||||
assert len(feat) == 2
|
||||
assert feat[0].shape == torch.Size([batch_size, 1024, 32, 64])
|
||||
assert feat[0].shape == torch.Size([batch_size, 128, 32, 64])
|
||||
assert feat[1].shape == torch.Size([batch_size, 2048, 32, 64])
|
||||
|
|
|
@ -5,12 +5,12 @@ from mmseg.models import MLANeck
|
|||
|
||||
|
||||
def test_mla():
|
||||
in_channels = [1024, 1024, 1024, 1024]
|
||||
mla = MLANeck(in_channels, 256)
|
||||
in_channels = [4, 4, 4, 4]
|
||||
mla = MLANeck(in_channels, 32)
|
||||
|
||||
inputs = [torch.randn(1, c, 24, 24) for i, c in enumerate(in_channels)]
|
||||
inputs = [torch.randn(1, c, 12, 12) for i, c in enumerate(in_channels)]
|
||||
outputs = mla(inputs)
|
||||
assert outputs[0].shape == torch.Size([1, 256, 24, 24])
|
||||
assert outputs[1].shape == torch.Size([1, 256, 24, 24])
|
||||
assert outputs[2].shape == torch.Size([1, 256, 24, 24])
|
||||
assert outputs[3].shape == torch.Size([1, 256, 24, 24])
|
||||
assert outputs[0].shape == torch.Size([1, 32, 12, 12])
|
||||
assert outputs[1].shape == torch.Size([1, 32, 12, 12])
|
||||
assert outputs[2].shape == torch.Size([1, 32, 12, 12])
|
||||
assert outputs[3].shape == torch.Size([1, 32, 12, 12])
|
||||
|
|
|
@ -7,26 +7,26 @@ from mmseg.models import MultiLevelNeck
|
|||
def test_multilevel_neck():
|
||||
|
||||
# Test init_weights
|
||||
MultiLevelNeck([266], 256).init_weights()
|
||||
MultiLevelNeck([266], 32).init_weights()
|
||||
|
||||
# Test multi feature maps
|
||||
in_channels = [256, 512, 1024, 2048]
|
||||
in_channels = [32, 64, 128, 256]
|
||||
inputs = [torch.randn(1, c, 14, 14) for i, c in enumerate(in_channels)]
|
||||
|
||||
neck = MultiLevelNeck(in_channels, 256)
|
||||
neck = MultiLevelNeck(in_channels, 32)
|
||||
outputs = neck(inputs)
|
||||
assert outputs[0].shape == torch.Size([1, 256, 7, 7])
|
||||
assert outputs[1].shape == torch.Size([1, 256, 14, 14])
|
||||
assert outputs[2].shape == torch.Size([1, 256, 28, 28])
|
||||
assert outputs[3].shape == torch.Size([1, 256, 56, 56])
|
||||
assert outputs[0].shape == torch.Size([1, 32, 7, 7])
|
||||
assert outputs[1].shape == torch.Size([1, 32, 14, 14])
|
||||
assert outputs[2].shape == torch.Size([1, 32, 28, 28])
|
||||
assert outputs[3].shape == torch.Size([1, 32, 56, 56])
|
||||
|
||||
# Test one feature map
|
||||
in_channels = [768]
|
||||
inputs = [torch.randn(1, 768, 14, 14)]
|
||||
|
||||
neck = MultiLevelNeck(in_channels, 256)
|
||||
neck = MultiLevelNeck(in_channels, 32)
|
||||
outputs = neck(inputs)
|
||||
assert outputs[0].shape == torch.Size([1, 256, 7, 7])
|
||||
assert outputs[1].shape == torch.Size([1, 256, 14, 14])
|
||||
assert outputs[2].shape == torch.Size([1, 256, 28, 28])
|
||||
assert outputs[3].shape == torch.Size([1, 256, 56, 56])
|
||||
assert outputs[0].shape == torch.Size([1, 32, 7, 7])
|
||||
assert outputs[1].shape == torch.Size([1, 32, 14, 14])
|
||||
assert outputs[2].shape == torch.Size([1, 32, 28, 28])
|
||||
assert outputs[3].shape == torch.Size([1, 32, 56, 56])
|
||||
|
|
Loading…
Reference in New Issue