2021-08-17 19:52:42 +08:00
|
|
|
# Copyright (c) OpenMMLab. All rights reserved.
|
2021-06-27 23:19:36 +08:00
|
|
|
from unittest.mock import patch
|
|
|
|
|
|
|
|
import pytest
|
2021-01-25 18:10:14 +08:00
|
|
|
import torch
|
|
|
|
|
2021-05-10 14:56:55 +08:00
|
|
|
from mmcls.models.heads import (ClsHead, LinearClsHead, MultiLabelClsHead,
|
2021-07-08 22:49:05 +08:00
|
|
|
MultiLabelLinearClsHead, StackedLinearClsHead,
|
|
|
|
VisionTransformerClsHead)
|
2021-04-13 13:52:14 +08:00
|
|
|
|
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
@pytest.mark.parametrize('feat', [torch.rand(4, 3), (torch.rand(4, 3), )])
|
|
|
|
def test_cls_head(feat):
|
2021-04-13 13:52:14 +08:00
|
|
|
|
2021-05-10 14:56:55 +08:00
|
|
|
# test ClsHead with cal_acc=False
|
2021-04-13 13:52:14 +08:00
|
|
|
head = ClsHead()
|
|
|
|
fake_gt_label = torch.randint(0, 2, (4, ))
|
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
losses = head.forward_train(feat, fake_gt_label)
|
2021-04-13 13:52:14 +08:00
|
|
|
assert losses['loss'].item() > 0
|
|
|
|
|
2021-05-10 14:56:55 +08:00
|
|
|
# test ClsHead with cal_acc=True
|
|
|
|
head = ClsHead(cal_acc=True)
|
2021-09-08 10:38:57 +08:00
|
|
|
feat = torch.rand(4, 3)
|
2021-04-13 13:52:14 +08:00
|
|
|
fake_gt_label = torch.randint(0, 2, (4, ))
|
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
losses = head.forward_train(feat, fake_gt_label)
|
2021-04-13 13:52:14 +08:00
|
|
|
assert losses['loss'].item() > 0
|
2021-01-25 18:10:14 +08:00
|
|
|
|
2021-07-08 22:49:05 +08:00
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
@pytest.mark.parametrize('feat', [torch.rand(4, 3), (torch.rand(4, 3), )])
|
|
|
|
def test_linear_head(feat):
|
2021-07-08 22:49:05 +08:00
|
|
|
|
2021-05-10 14:56:55 +08:00
|
|
|
fake_gt_label = torch.randint(0, 10, (4, ))
|
|
|
|
|
2021-07-08 22:49:05 +08:00
|
|
|
# test LinearClsHead forward
|
2021-09-08 10:38:57 +08:00
|
|
|
head = LinearClsHead(10, 3)
|
|
|
|
losses = head.forward_train(feat, fake_gt_label)
|
2021-05-10 14:56:55 +08:00
|
|
|
assert losses['loss'].item() > 0
|
|
|
|
|
2021-07-08 22:49:05 +08:00
|
|
|
# test init weights
|
2021-09-08 10:38:57 +08:00
|
|
|
head = LinearClsHead(10, 3)
|
2021-07-08 22:49:05 +08:00
|
|
|
head.init_weights()
|
|
|
|
assert abs(head.fc.weight).sum() > 0
|
|
|
|
|
|
|
|
# test simple_test
|
2021-09-08 10:38:57 +08:00
|
|
|
head = LinearClsHead(10, 3)
|
|
|
|
pred = head.simple_test(feat)
|
2021-07-08 22:49:05 +08:00
|
|
|
assert isinstance(pred, list) and len(pred) == 4
|
|
|
|
|
|
|
|
with patch('torch.onnx.is_in_onnx_export', return_value=True):
|
2021-09-08 10:38:57 +08:00
|
|
|
head = LinearClsHead(10, 3)
|
|
|
|
pred = head.simple_test(feat)
|
2021-07-08 22:49:05 +08:00
|
|
|
assert pred.shape == (4, 10)
|
|
|
|
|
2021-01-25 18:10:14 +08:00
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
@pytest.mark.parametrize('feat', [torch.rand(4, 3), (torch.rand(4, 3), )])
|
|
|
|
def test_multilabel_head(feat):
|
2021-01-25 18:10:14 +08:00
|
|
|
head = MultiLabelClsHead()
|
|
|
|
fake_gt_label = torch.randint(0, 2, (4, 3))
|
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
losses = head.forward_train(feat, fake_gt_label)
|
2021-01-25 18:10:14 +08:00
|
|
|
assert losses['loss'].item() > 0
|
|
|
|
|
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
@pytest.mark.parametrize('feat', [torch.rand(4, 5), (torch.rand(4, 5), )])
|
|
|
|
def test_multilabel_linear_head(feat):
|
2021-01-25 18:10:14 +08:00
|
|
|
head = MultiLabelLinearClsHead(3, 5)
|
|
|
|
fake_gt_label = torch.randint(0, 2, (4, 3))
|
|
|
|
|
|
|
|
head.init_weights()
|
2021-09-08 10:38:57 +08:00
|
|
|
losses = head.forward_train(feat, fake_gt_label)
|
2021-01-25 18:10:14 +08:00
|
|
|
assert losses['loss'].item() > 0
|
2021-06-27 23:19:36 +08:00
|
|
|
|
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
@pytest.mark.parametrize('feat', [torch.rand(4, 5), (torch.rand(4, 5), )])
|
|
|
|
def test_stacked_linear_cls_head(feat):
|
2021-06-27 23:19:36 +08:00
|
|
|
# test assertion
|
|
|
|
with pytest.raises(AssertionError):
|
|
|
|
StackedLinearClsHead(num_classes=3, in_channels=5, mid_channels=10)
|
|
|
|
|
|
|
|
with pytest.raises(AssertionError):
|
|
|
|
StackedLinearClsHead(num_classes=-1, in_channels=5, mid_channels=[10])
|
|
|
|
|
|
|
|
fake_gt_label = torch.randint(0, 2, (4, )) # B, num_classes
|
|
|
|
|
|
|
|
# test forward with default setting
|
|
|
|
head = StackedLinearClsHead(
|
|
|
|
num_classes=3, in_channels=5, mid_channels=[10])
|
|
|
|
head.init_weights()
|
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
losses = head.forward_train(feat, fake_gt_label)
|
2021-06-27 23:19:36 +08:00
|
|
|
assert losses['loss'].item() > 0
|
|
|
|
|
|
|
|
# test simple test
|
2021-09-08 10:38:57 +08:00
|
|
|
pred = head.simple_test(feat)
|
2021-06-27 23:19:36 +08:00
|
|
|
assert len(pred) == 4
|
|
|
|
|
|
|
|
# test simple test in tracing
|
2021-07-08 22:49:05 +08:00
|
|
|
with patch('torch.onnx.is_in_onnx_export', return_value=True):
|
2021-09-08 10:38:57 +08:00
|
|
|
pred = head.simple_test(feat)
|
2021-07-08 22:49:05 +08:00
|
|
|
assert pred.shape == torch.Size((4, 3))
|
2021-06-27 23:19:36 +08:00
|
|
|
|
|
|
|
# test forward with full function
|
|
|
|
head = StackedLinearClsHead(
|
|
|
|
num_classes=3,
|
|
|
|
in_channels=5,
|
|
|
|
mid_channels=[8, 10],
|
|
|
|
dropout_rate=0.2,
|
|
|
|
norm_cfg=dict(type='BN1d'),
|
|
|
|
act_cfg=dict(type='HSwish'))
|
|
|
|
head.init_weights()
|
|
|
|
|
2021-09-08 10:38:57 +08:00
|
|
|
losses = head.forward_train(feat, fake_gt_label)
|
2021-06-27 23:19:36 +08:00
|
|
|
assert losses['loss'].item() > 0
|
2021-07-08 22:49:05 +08:00
|
|
|
|
|
|
|
|
|
|
|
def test_vit_head():
|
|
|
|
fake_features = torch.rand(4, 100)
|
|
|
|
fake_gt_label = torch.randint(0, 10, (4, ))
|
|
|
|
|
|
|
|
# test vit head forward
|
|
|
|
head = VisionTransformerClsHead(10, 100)
|
|
|
|
losses = head.forward_train(fake_features, fake_gt_label)
|
|
|
|
assert not hasattr(head.layers, 'pre_logits')
|
|
|
|
assert not hasattr(head.layers, 'act')
|
|
|
|
assert losses['loss'].item() > 0
|
|
|
|
|
|
|
|
# test vit head forward with hidden layer
|
|
|
|
head = VisionTransformerClsHead(10, 100, hidden_dim=20)
|
|
|
|
losses = head.forward_train(fake_features, fake_gt_label)
|
|
|
|
assert hasattr(head.layers, 'pre_logits') and hasattr(head.layers, 'act')
|
|
|
|
assert losses['loss'].item() > 0
|
|
|
|
|
|
|
|
# test vit head init_weights
|
|
|
|
head = VisionTransformerClsHead(10, 100, hidden_dim=20)
|
|
|
|
head.init_weights()
|
|
|
|
assert abs(head.layers.pre_logits.weight).sum() > 0
|
|
|
|
|
|
|
|
# test simple_test
|
|
|
|
head = VisionTransformerClsHead(10, 100, hidden_dim=20)
|
|
|
|
pred = head.simple_test(fake_features)
|
|
|
|
assert isinstance(pred, list) and len(pred) == 4
|
|
|
|
|
|
|
|
with patch('torch.onnx.is_in_onnx_export', return_value=True):
|
|
|
|
head = VisionTransformerClsHead(10, 100, hidden_dim=20)
|
|
|
|
pred = head.simple_test(fake_features)
|
|
|
|
assert pred.shape == (4, 10)
|
|
|
|
|
|
|
|
# test assertion
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
VisionTransformerClsHead(-1, 100)
|