Add native PyTorch weights for MixNet-Medium with no SAME padding necessary. Remove unused block of code.
parent
e7c8a37334
commit
857f33015a
|
@ -75,6 +75,7 @@ I've leveraged the training scripts in this repository to train a few of the mod
|
|||
| resnext50d_32x4d | 79.674 (20.326) | 94.868 (5.132) | 25.1M | bicubic |
|
||||
| resnext50_32x4d | 78.512 (21.488) | 94.042 (5.958) | 25M | bicubic |
|
||||
| resnet50 | 78.470 (21.530) | 94.266 (5.734) | 25.6M | bicubic |
|
||||
| mixnet_m | 77.256 (22.744) | 93.418 (6.582) | 5.01M | bicubic |
|
||||
| seresnext26_32x4d | 77.104 (22.896) | 93.316 (6.684) | 16.8M | bicubic |
|
||||
| efficientnet_b0 | 76.912 (23.088) | 93.210 (6.790) | 5.29M | bicubic |
|
||||
| resnet26d | 76.68 (23.32) | 93.166 (6.834) | 16M | bicubic |
|
||||
|
|
|
@ -103,7 +103,8 @@ default_cfgs = {
|
|||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5-c6949ce9.pth',
|
||||
input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934),
|
||||
'mixnet_s': _cfg(url=''),
|
||||
'mixnet_m': _cfg(url=''),
|
||||
'mixnet_m': _cfg(
|
||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth'),
|
||||
'mixnet_l': _cfg(url=''),
|
||||
'tf_mixnet_s': _cfg(
|
||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'),
|
||||
|
@ -271,13 +272,6 @@ def _decode_block_str(block_str, depth_multiplier=1.0):
|
|||
return [deepcopy(block_args) for _ in range(num_repeat)]
|
||||
|
||||
|
||||
def _decode_arch_args(string_list):
|
||||
block_args = []
|
||||
for block_str in string_list:
|
||||
block_args.append(_decode_block_str(block_str))
|
||||
return block_args
|
||||
|
||||
|
||||
def _decode_arch_def(arch_def, depth_multiplier=1.0):
|
||||
arch_args = []
|
||||
for stack_idx, block_strings in enumerate(arch_def):
|
||||
|
@ -1612,8 +1606,8 @@ def mixnet_m(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
|
|||
model = _gen_mixnet_m(
|
||||
channel_multiplier=1.0, num_classes=num_classes, in_chans=in_chans, **kwargs)
|
||||
model.default_cfg = default_cfg
|
||||
#if pretrained:
|
||||
# load_pretrained(model, default_cfg, num_classes, in_chans)
|
||||
if pretrained:
|
||||
load_pretrained(model, default_cfg, num_classes, in_chans)
|
||||
return model
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue