Update README, tweak fine-tune effv2 model names.
parent
c4f482a08b
commit
328249f11a
|
@ -23,6 +23,15 @@ I'm fortunate to be able to dedicate significant time and money of my own suppor
|
||||||
|
|
||||||
## What's New
|
## What's New
|
||||||
|
|
||||||
|
### May 14, 2021
|
||||||
|
* Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl.
|
||||||
|
* 1k trained variants: `tf_efficientnetv2_s/m/l`
|
||||||
|
* 21k trained variants: `tf_efficientnetv2_s/m/l_21k`
|
||||||
|
* 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_21ft1k`
|
||||||
|
* v2 models w/ v1 scaling: `tf_efficientnet_v2_b0` through `b3`
|
||||||
|
* Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s`
|
||||||
|
* Some blank `efficientnetv2_*` models in-place for future native PyTorch training
|
||||||
|
|
||||||
### May 5, 2021
|
### May 5, 2021
|
||||||
* Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen)
|
* Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen)
|
||||||
* Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit)
|
* Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
""" PyTorch EfficientNet Family
|
""" The EfficientNet Family in PyTorch
|
||||||
|
|
||||||
An implementation of EfficienNet that covers variety of related models with efficient architectures:
|
An implementation of EfficienNet that covers variety of related models with efficient architectures:
|
||||||
|
|
||||||
|
@ -25,6 +25,10 @@ An implementation of EfficienNet that covers variety of related models with effi
|
||||||
|
|
||||||
* And likely more...
|
* And likely more...
|
||||||
|
|
||||||
|
The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available
|
||||||
|
by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing
|
||||||
|
the models and weights open source!
|
||||||
|
|
||||||
Hacked together by / Copyright 2021 Ross Wightman
|
Hacked together by / Copyright 2021 Ross Wightman
|
||||||
"""
|
"""
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
@ -328,16 +332,16 @@ default_cfgs = {
|
||||||
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
|
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
|
||||||
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
|
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
|
||||||
|
|
||||||
'tf_efficientnetv2_s_21kft1k': _cfg(
|
'tf_efficientnetv2_s_21ft1k': _cfg(
|
||||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21kft1k-d7dafa41.pth',
|
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth',
|
||||||
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
|
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
|
||||||
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
|
input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0),
|
||||||
'tf_efficientnetv2_m_21kft1k': _cfg(
|
'tf_efficientnetv2_m_21ft1k': _cfg(
|
||||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21kft1k-bf41664a.pth',
|
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth',
|
||||||
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
|
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
|
||||||
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
|
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
|
||||||
'tf_efficientnetv2_l_21kft1k': _cfg(
|
'tf_efficientnetv2_l_21ft1k': _cfg(
|
||||||
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21kft1k-60127a9d.pth',
|
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth',
|
||||||
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
|
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
|
||||||
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
|
input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0),
|
||||||
|
|
||||||
|
@ -1925,35 +1929,39 @@ def tf_efficientnetv2_l(pretrained=False, **kwargs):
|
||||||
|
|
||||||
|
|
||||||
@register_model
|
@register_model
|
||||||
def tf_efficientnetv2_s_21kft1k(pretrained=False, **kwargs):
|
def tf_efficientnetv2_s_21ft1k(pretrained=False, **kwargs):
|
||||||
""" EfficientNet-V2 Small. Tensorflow compatible variant """
|
""" EfficientNet-V2 Small. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant
|
||||||
|
"""
|
||||||
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
||||||
kwargs['pad_type'] = 'same'
|
kwargs['pad_type'] = 'same'
|
||||||
model = _gen_efficientnetv2_s('tf_efficientnetv2_s_21kft1k', pretrained=pretrained, **kwargs)
|
model = _gen_efficientnetv2_s('tf_efficientnetv2_s_21ft1k', pretrained=pretrained, **kwargs)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
@register_model
|
@register_model
|
||||||
def tf_efficientnetv2_m_21kft1k(pretrained=False, **kwargs):
|
def tf_efficientnetv2_m_21ft1k(pretrained=False, **kwargs):
|
||||||
""" EfficientNet-V2 Medium. Tensorflow compatible variant """
|
""" EfficientNet-V2 Medium. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant
|
||||||
|
"""
|
||||||
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
||||||
kwargs['pad_type'] = 'same'
|
kwargs['pad_type'] = 'same'
|
||||||
model = _gen_efficientnetv2_m('tf_efficientnetv2_m_21kft1k', pretrained=pretrained, **kwargs)
|
model = _gen_efficientnetv2_m('tf_efficientnetv2_m_21ft1k', pretrained=pretrained, **kwargs)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
@register_model
|
@register_model
|
||||||
def tf_efficientnetv2_l_21kft1k(pretrained=False, **kwargs):
|
def tf_efficientnetv2_l_21ft1k(pretrained=False, **kwargs):
|
||||||
""" EfficientNet-V2 Large. Tensorflow compatible variant """
|
""" EfficientNet-V2 Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant
|
||||||
|
"""
|
||||||
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
||||||
kwargs['pad_type'] = 'same'
|
kwargs['pad_type'] = 'same'
|
||||||
model = _gen_efficientnetv2_l('tf_efficientnetv2_l_21kft1k', pretrained=pretrained, **kwargs)
|
model = _gen_efficientnetv2_l('tf_efficientnetv2_l_21ft1k', pretrained=pretrained, **kwargs)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
@register_model
|
@register_model
|
||||||
def tf_efficientnetv2_s_21k(pretrained=False, **kwargs):
|
def tf_efficientnetv2_s_21k(pretrained=False, **kwargs):
|
||||||
""" EfficientNet-V2 Small w/ ImageNet-21k pretrained weights. Tensorflow compatible variant """
|
""" EfficientNet-V2 Small w/ ImageNet-21k pretrained weights. Tensorflow compatible variant
|
||||||
|
"""
|
||||||
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
||||||
kwargs['pad_type'] = 'same'
|
kwargs['pad_type'] = 'same'
|
||||||
model = _gen_efficientnetv2_s('tf_efficientnetv2_s_21k', pretrained=pretrained, **kwargs)
|
model = _gen_efficientnetv2_s('tf_efficientnetv2_s_21k', pretrained=pretrained, **kwargs)
|
||||||
|
@ -1962,7 +1970,8 @@ def tf_efficientnetv2_s_21k(pretrained=False, **kwargs):
|
||||||
|
|
||||||
@register_model
|
@register_model
|
||||||
def tf_efficientnetv2_m_21k(pretrained=False, **kwargs):
|
def tf_efficientnetv2_m_21k(pretrained=False, **kwargs):
|
||||||
""" EfficientNet-V2 Medium w/ ImageNet-21k pretrained weights. Tensorflow compatible variant """
|
""" EfficientNet-V2 Medium w/ ImageNet-21k pretrained weights. Tensorflow compatible variant
|
||||||
|
"""
|
||||||
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
||||||
kwargs['pad_type'] = 'same'
|
kwargs['pad_type'] = 'same'
|
||||||
model = _gen_efficientnetv2_m('tf_efficientnetv2_m_21k', pretrained=pretrained, **kwargs)
|
model = _gen_efficientnetv2_m('tf_efficientnetv2_m_21k', pretrained=pretrained, **kwargs)
|
||||||
|
@ -1971,7 +1980,8 @@ def tf_efficientnetv2_m_21k(pretrained=False, **kwargs):
|
||||||
|
|
||||||
@register_model
|
@register_model
|
||||||
def tf_efficientnetv2_l_21k(pretrained=False, **kwargs):
|
def tf_efficientnetv2_l_21k(pretrained=False, **kwargs):
|
||||||
""" EfficientNet-V2 Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant """
|
""" EfficientNet-V2 Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant
|
||||||
|
"""
|
||||||
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
|
||||||
kwargs['pad_type'] = 'same'
|
kwargs['pad_type'] = 'same'
|
||||||
model = _gen_efficientnetv2_l('tf_efficientnetv2_l_21k', pretrained=pretrained, **kwargs)
|
model = _gen_efficientnetv2_l('tf_efficientnetv2_l_21k', pretrained=pretrained, **kwargs)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
__version__ = '0.4.8'
|
__version__ = '0.4.9'
|
||||||
|
|
Loading…
Reference in New Issue