mirror of
https://github.com/open-mmlab/mmclassification.git
synced 2025-06-03 21:53:55 +08:00
* Refactor Mobilenetv3 structure and add ConvClsHead. * Change model's name from 'MobileNetv3' to 'MobileNetV3' * Modify configs for MobileNetV3 on CIFAR10. And add MobileNetV3 configs for imagenet * Fix activate setting bugs in MobileNetV3. And remove bias in SELayer. * Modify unittest * Remove useless config and file. * Fix mobilenetv3-large arch setting * Add dropout option in ConvClsHead * Fix MobilenetV3 structure according to torchvision version. 1. Remove with_expand_conv option in InvertedResidual, it should be decided by channels. 2. Revert activation function, should before SE layer. * Format code. * Rename MobilenetV3 arch "big" to "large". * Add mobilenetv3_small torchvision training recipe * Modify default `out_indices` of MobilenetV3, now it will change according to `arch` if not specified. * Add MobilenetV3 large config. * Add mobilenetv3 README * Modify InvertedResidual unit test. * Refactor ConvClsHead to StackedLinearClsHead, and add unit tests. * Add unit test for `simple_test` of `StackedLinearClsHead`. * Fix typo Co-authored-by: Yidi Shao <ydshao@smail.nju.edu.cn>
98 lines
2.7 KiB
Python
98 lines
2.7 KiB
Python
from unittest.mock import patch
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from mmcls.models.heads import (ClsHead, LinearClsHead, MultiLabelClsHead,
|
|
MultiLabelLinearClsHead, StackedLinearClsHead)
|
|
|
|
|
|
def test_cls_head():
|
|
|
|
# test ClsHead with cal_acc=False
|
|
head = ClsHead()
|
|
fake_cls_score = torch.rand(4, 3)
|
|
fake_gt_label = torch.randint(0, 2, (4, ))
|
|
|
|
losses = head.loss(fake_cls_score, fake_gt_label)
|
|
assert losses['loss'].item() > 0
|
|
|
|
# test ClsHead with cal_acc=True
|
|
head = ClsHead(cal_acc=True)
|
|
fake_cls_score = torch.rand(4, 3)
|
|
fake_gt_label = torch.randint(0, 2, (4, ))
|
|
|
|
losses = head.loss(fake_cls_score, fake_gt_label)
|
|
assert losses['loss'].item() > 0
|
|
|
|
# test LinearClsHead
|
|
head = LinearClsHead(10, 100)
|
|
fake_cls_score = torch.rand(4, 10)
|
|
fake_gt_label = torch.randint(0, 10, (4, ))
|
|
|
|
losses = head.loss(fake_cls_score, fake_gt_label)
|
|
assert losses['loss'].item() > 0
|
|
|
|
|
|
def test_multilabel_head():
|
|
head = MultiLabelClsHead()
|
|
fake_cls_score = torch.rand(4, 3)
|
|
fake_gt_label = torch.randint(0, 2, (4, 3))
|
|
|
|
losses = head.loss(fake_cls_score, fake_gt_label)
|
|
assert losses['loss'].item() > 0
|
|
|
|
|
|
def test_multilabel_linear_head():
|
|
head = MultiLabelLinearClsHead(3, 5)
|
|
fake_cls_score = torch.rand(4, 3)
|
|
fake_gt_label = torch.randint(0, 2, (4, 3))
|
|
|
|
head.init_weights()
|
|
losses = head.loss(fake_cls_score, fake_gt_label)
|
|
assert losses['loss'].item() > 0
|
|
|
|
|
|
def test_stacked_linear_cls_head():
|
|
# test assertion
|
|
with pytest.raises(AssertionError):
|
|
StackedLinearClsHead(num_classes=3, in_channels=5, mid_channels=10)
|
|
|
|
with pytest.raises(AssertionError):
|
|
StackedLinearClsHead(num_classes=-1, in_channels=5, mid_channels=[10])
|
|
|
|
fake_img = torch.rand(4, 5) # B, channel
|
|
fake_gt_label = torch.randint(0, 2, (4, )) # B, num_classes
|
|
|
|
# test forward with default setting
|
|
head = StackedLinearClsHead(
|
|
num_classes=3, in_channels=5, mid_channels=[10])
|
|
head.init_weights()
|
|
|
|
losses = head.forward_train(fake_img, fake_gt_label)
|
|
assert losses['loss'].item() > 0
|
|
|
|
# test simple test
|
|
pred = head.simple_test(fake_img)
|
|
assert len(pred) == 4
|
|
|
|
# test simple test in tracing
|
|
p = patch('torch.onnx.is_in_onnx_export', lambda: True)
|
|
p.start()
|
|
pred = head.simple_test(fake_img)
|
|
assert pred.shape == torch.Size((4, 3))
|
|
p.stop()
|
|
|
|
# test forward with full function
|
|
head = StackedLinearClsHead(
|
|
num_classes=3,
|
|
in_channels=5,
|
|
mid_channels=[8, 10],
|
|
dropout_rate=0.2,
|
|
norm_cfg=dict(type='BN1d'),
|
|
act_cfg=dict(type='HSwish'))
|
|
head.init_weights()
|
|
|
|
losses = head.forward_train(fake_img, fake_gt_label)
|
|
assert losses['loss'].item() > 0
|