mirror of https://github.com/open-mmlab/mmocr.git
parent
05c4bc3c88
commit
717460055c
|
@ -0,0 +1,23 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from unittest import TestCase
|
||||
|
||||
from mmocr.evaluation.functional import compute_hmean
|
||||
|
||||
|
||||
class TestHmean(TestCase):
|
||||
|
||||
def test_compute_hmean(self):
|
||||
with self.assertRaises(AssertionError):
|
||||
compute_hmean(0, 0, 0.0, 0)
|
||||
with self.assertRaises(AssertionError):
|
||||
compute_hmean(0, 0, 0, 0.0)
|
||||
with self.assertRaises(AssertionError):
|
||||
compute_hmean([1], 0, 0, 0)
|
||||
with self.assertRaises(AssertionError):
|
||||
compute_hmean(0, [1], 0, 0)
|
||||
|
||||
_, _, hmean = compute_hmean(2, 2, 2, 2)
|
||||
self.assertEqual(hmean, 1)
|
||||
|
||||
_, _, hmean = compute_hmean(0, 0, 2, 2)
|
||||
self.assertEqual(hmean, 0)
|
|
@ -0,0 +1,37 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from unittest import TestCase
|
||||
|
||||
import torch
|
||||
|
||||
from mmocr.models.common.layers.transformer_layers import (TFDecoderLayer,
|
||||
TFEncoderLayer)
|
||||
|
||||
|
||||
class TestTFEncoderLayer(TestCase):
|
||||
|
||||
def test_forward(self):
|
||||
encoder_layer = TFEncoderLayer()
|
||||
in_enc = torch.rand(1, 20, 512)
|
||||
out_enc = encoder_layer(in_enc)
|
||||
self.assertEqual(out_enc.shape, torch.Size([1, 20, 512]))
|
||||
|
||||
encoder_layer = TFEncoderLayer(
|
||||
operation_order=('self_attn', 'norm', 'ffn', 'norm'))
|
||||
out_enc = encoder_layer(in_enc)
|
||||
self.assertEqual(out_enc.shape, torch.Size([1, 20, 512]))
|
||||
|
||||
|
||||
class TestTFDecoderLayer(TestCase):
|
||||
|
||||
def test_forward(self):
|
||||
decoder_layer = TFDecoderLayer()
|
||||
in_dec = torch.rand(1, 30, 512)
|
||||
out_enc = torch.rand(1, 128, 512)
|
||||
out_dec = decoder_layer(in_dec, out_enc)
|
||||
self.assertEqual(out_dec.shape, torch.Size([1, 30, 512]))
|
||||
|
||||
decoder_layer = TFDecoderLayer(
|
||||
operation_order=('self_attn', 'norm', 'enc_dec_attn', 'norm',
|
||||
'ffn', 'norm'))
|
||||
out_dec = decoder_layer(in_dec, out_enc)
|
||||
self.assertEqual(out_dec.shape, torch.Size([1, 30, 512]))
|
|
@ -0,0 +1,15 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from unittest import TestCase
|
||||
|
||||
import torch
|
||||
|
||||
from mmocr.models.common.modules import PositionalEncoding
|
||||
|
||||
|
||||
class TestPositionalEncoding(TestCase):
|
||||
|
||||
def test_forward(self):
|
||||
pos_encoder = PositionalEncoding()
|
||||
x = torch.rand(1, 30, 512)
|
||||
out = pos_encoder(x)
|
||||
assert out.size() == x.size()
|
|
@ -0,0 +1,42 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
from unittest import TestCase
|
||||
|
||||
import torch
|
||||
|
||||
from mmocr.models.textrecog.layers.conv_layer import (BasicBlock, Bottleneck,
|
||||
conv1x1, conv3x3)
|
||||
|
||||
|
||||
class TestUtils(TestCase):
|
||||
|
||||
def test_conv3x3(self):
|
||||
conv = conv3x3(3, 6)
|
||||
self.assertEqual(conv.in_channels, 3)
|
||||
self.assertEqual(conv.out_channels, 6)
|
||||
self.assertEqual(conv.kernel_size, (3, 3))
|
||||
|
||||
def test_conv1x1(self):
|
||||
conv = conv1x1(3, 6)
|
||||
self.assertEqual(conv.in_channels, 3)
|
||||
self.assertEqual(conv.out_channels, 6)
|
||||
self.assertEqual(conv.kernel_size, (1, 1))
|
||||
|
||||
|
||||
class TestBasicBlock(TestCase):
|
||||
|
||||
def test_forward(self):
|
||||
x = torch.rand(1, 64, 224, 224)
|
||||
basic_block = BasicBlock(64, 64)
|
||||
self.assertEqual(basic_block.expansion, 1)
|
||||
out = basic_block(x)
|
||||
self.assertEqual(out.shape, torch.Size([1, 64, 224, 224]))
|
||||
|
||||
|
||||
class TestBottleneck(TestCase):
|
||||
|
||||
def test_forward(self):
|
||||
x = torch.rand(1, 64, 224, 224)
|
||||
bottle_neck = Bottleneck(64, 64, downsample=True)
|
||||
self.assertEqual(bottle_neck.expansion, 4)
|
||||
out = bottle_neck(x)
|
||||
self.assertEqual(out.shape, torch.Size([1, 256, 224, 224]))
|
Loading…
Reference in New Issue