mirror of
https://github.com/open-mmlab/mmsegmentation.git
synced 2025-06-03 22:03:48 +08:00
* delete markdownlint * Support MobileNetV3 * fix import * add mobilenetv3 head and configs * Modify MobileNetV3 to semantic segmentation version * modify mobilenetv3 configs * add std configs * fix Conv2dAdaptivePadding bug * add configs * add unitest and fix bugs * fix lraspp unitest bugs * restore * fix unitest * add MobileNetV3 docstring * add mmcv * add mmcv * fix syntax bug * fix unitest bug * fix unitest bug * fix unitest bugs * fix docstring * add configs * restore * delete unnecessary assert * modify unitest * delete benchmark
42 lines
1.4 KiB
Python
42 lines
1.4 KiB
Python
import mmcv
|
|
import pytest
|
|
import torch
|
|
|
|
from mmseg.models.utils.se_layer import SELayer
|
|
|
|
|
|
def test_se_layer():
|
|
with pytest.raises(AssertionError):
|
|
# test act_cfg assertion.
|
|
SELayer(32, act_cfg=(dict(type='ReLU'), ))
|
|
|
|
# test config with channels = 16.
|
|
se_layer = SELayer(16)
|
|
assert se_layer.conv1.conv.kernel_size == (1, 1)
|
|
assert se_layer.conv1.conv.stride == (1, 1)
|
|
assert se_layer.conv1.conv.padding == (0, 0)
|
|
assert isinstance(se_layer.conv1.activate, torch.nn.ReLU)
|
|
assert se_layer.conv2.conv.kernel_size == (1, 1)
|
|
assert se_layer.conv2.conv.stride == (1, 1)
|
|
assert se_layer.conv2.conv.padding == (0, 0)
|
|
assert isinstance(se_layer.conv2.activate, mmcv.cnn.HSigmoid)
|
|
|
|
x = torch.rand(1, 16, 64, 64)
|
|
output = se_layer(x)
|
|
assert output.shape == (1, 16, 64, 64)
|
|
|
|
# test config with channels = 16, act_cfg = dict(type='ReLU').
|
|
se_layer = SELayer(16, act_cfg=dict(type='ReLU'))
|
|
assert se_layer.conv1.conv.kernel_size == (1, 1)
|
|
assert se_layer.conv1.conv.stride == (1, 1)
|
|
assert se_layer.conv1.conv.padding == (0, 0)
|
|
assert isinstance(se_layer.conv1.activate, torch.nn.ReLU)
|
|
assert se_layer.conv2.conv.kernel_size == (1, 1)
|
|
assert se_layer.conv2.conv.stride == (1, 1)
|
|
assert se_layer.conv2.conv.padding == (0, 0)
|
|
assert isinstance(se_layer.conv2.activate, torch.nn.ReLU)
|
|
|
|
x = torch.rand(1, 16, 64, 64)
|
|
output = se_layer(x)
|
|
assert output.shape == (1, 16, 64, 64)
|