Experimenting with tiny test models, how small can they go and be useful for regression tests?
parent
5517b054dd
commit
66a0eb4673
|
@ -1834,6 +1834,18 @@ model_cfgs = dict(
|
|||
stem_type='one',
|
||||
stem_chs=64,
|
||||
),
|
||||
|
||||
test_tiny_resnet=ByoModelCfg(
|
||||
blocks=(
|
||||
ByoBlockCfg(type='basic', d=1, c=24, s=1, gs=1, br=0.25),
|
||||
ByoBlockCfg(type='basic', d=1, c=32, s=2, gs=1, br=0.25),
|
||||
ByoBlockCfg(type='basic', d=1, c=64, s=2, gs=1, br=0.25),
|
||||
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=1, br=0.25),
|
||||
),
|
||||
stem_chs=32,
|
||||
stem_pool='maxpool',
|
||||
act_layer='relu',
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
|
@ -2035,6 +2047,11 @@ default_cfgs = generate_default_cfgs({
|
|||
crop_pct=0.9,
|
||||
first_conv=('stem.conv_kxk.0.conv', 'stem.conv_scale.conv'),
|
||||
),
|
||||
|
||||
'test_tiny_byobnet.untrained': _cfgr(
|
||||
# hf_hub_id='timm/',
|
||||
input_size=(3, 160, 160), crop_pct=0.875, pool_size=(5, 5),
|
||||
),
|
||||
})
|
||||
|
||||
|
||||
|
@ -2337,3 +2354,10 @@ def mobileone_s4(pretrained=False, **kwargs) -> ByobNet:
|
|||
"""
|
||||
"""
|
||||
return _create_byobnet('mobileone_s4', pretrained=pretrained, **kwargs)
|
||||
|
||||
|
||||
@register_model
|
||||
def test_tiny_byobnet(pretrained=False, **kwargs) -> ByobNet:
|
||||
""" Minimal test ResNet (BYOB based) model.
|
||||
"""
|
||||
return _create_byobnet('test_tiny_byobnet', pretrained=pretrained, **kwargs)
|
||||
|
|
|
@ -1056,6 +1056,31 @@ def _gen_tinynet(
|
|||
return model
|
||||
|
||||
|
||||
def _gen_test_efficientnet(
|
||||
variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
|
||||
""" Minimal test EfficientNet generator.
|
||||
"""
|
||||
arch_def = [
|
||||
['cn_r1_k3_s1_e1_c16_skip'],
|
||||
['er_r1_k3_s2_e4_c24'],
|
||||
['er_r1_k3_s2_e4_c32'],
|
||||
['ir_r1_k3_s2_e4_c48_se0.25'],
|
||||
['ir_r1_k3_s2_e4_c64_se0.25'],
|
||||
]
|
||||
round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.)
|
||||
model_kwargs = dict(
|
||||
block_args=decode_arch_def(arch_def, depth_multiplier),
|
||||
num_features=round_chs_fn(256),
|
||||
stem_size=24,
|
||||
round_chs_fn=round_chs_fn,
|
||||
norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
|
||||
act_layer=resolve_act_layer(kwargs, 'silu'),
|
||||
**kwargs,
|
||||
)
|
||||
model = _create_effnet(variant, pretrained, **model_kwargs)
|
||||
return model
|
||||
|
||||
|
||||
def _cfg(url='', **kwargs):
|
||||
return {
|
||||
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
|
||||
|
@ -1584,6 +1609,10 @@ default_cfgs = generate_default_cfgs({
|
|||
input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475)
|
||||
url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth',
|
||||
hf_hub_id='timm/'),
|
||||
|
||||
"test_tiny_efficientnet.untrained": _cfg(
|
||||
# hf_hub_id='timm/'
|
||||
input_size=(3, 160, 160), pool_size=(5, 5)),
|
||||
})
|
||||
|
||||
|
||||
|
@ -2510,6 +2539,13 @@ def tinynet_e(pretrained=False, **kwargs) -> EfficientNet:
|
|||
return model
|
||||
|
||||
|
||||
@register_model
|
||||
def test_tiny_efficientnet(pretrained=False, **kwargs) -> EfficientNet:
|
||||
model = _gen_test_efficientnet('test_tiny_efficientnet', pretrained=pretrained, **kwargs)
|
||||
return model
|
||||
|
||||
|
||||
|
||||
register_model_deprecations(__name__, {
|
||||
'tf_efficientnet_b0_ap': 'tf_efficientnet_b0.ap_in1k',
|
||||
'tf_efficientnet_b1_ap': 'tf_efficientnet_b1.ap_in1k',
|
||||
|
|
|
@ -1929,13 +1929,16 @@ default_cfgs = {
|
|||
hf_hub_id='timm/',
|
||||
num_classes=11821,
|
||||
input_size=(3, 256, 256), crop_pct=0.95),
|
||||
'vit_base_patch16_reg4_gap_256': _cfg(
|
||||
'vit_base_patch16_reg4_gap_256.untrained': _cfg(
|
||||
input_size=(3, 256, 256)),
|
||||
|
||||
'vit_so150m_patch16_reg4_gap_256': _cfg(
|
||||
'vit_so150m_patch16_reg4_gap_256.untrained': _cfg(
|
||||
input_size=(3, 256, 256)),
|
||||
'vit_so150m_patch16_reg4_map_256': _cfg(
|
||||
'vit_so150m_patch16_reg4_map_256.untrained': _cfg(
|
||||
input_size=(3, 256, 256)),
|
||||
|
||||
'test_tiny_vit.untrained': _cfg(
|
||||
input_size=(3, 160, 160), crop_pct=0.875),
|
||||
}
|
||||
|
||||
_quick_gelu_cfgs = [
|
||||
|
@ -3106,6 +3109,15 @@ def vit_so150m_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> Visio
|
|||
return model
|
||||
|
||||
|
||||
@register_model
|
||||
def test_tiny_vit(pretrained: bool = False, **kwargs) -> VisionTransformer:
|
||||
""" ViT-TestTiny
|
||||
"""
|
||||
model_args = dict(patch_size=16, embed_dim=64, depth=4, num_heads=1, mlp_ratio=3)
|
||||
model = _create_vision_transformer('test_tiny_vit', pretrained=pretrained, **dict(model_args, **kwargs))
|
||||
return model
|
||||
|
||||
|
||||
register_model_deprecations(__name__, {
|
||||
'vit_tiny_patch16_224_in21k': 'vit_tiny_patch16_224.augreg_in21k',
|
||||
'vit_small_patch32_224_in21k': 'vit_small_patch32_224.augreg_in21k',
|
||||
|
|
Loading…
Reference in New Issue