From a9d0615f42189f4cd5a77121b84f21d4b096173e Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Wed, 26 Jul 2023 20:46:27 -0700 Subject: [PATCH] Fix ijepa vit issue with 448 model, minor formatting fixes --- timm/models/vision_transformer.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py index c072f13a..025a01a8 100644 --- a/timm/models/vision_transformer.py +++ b/timm/models/vision_transformer.py @@ -1466,6 +1466,7 @@ default_cfgs = generate_default_cfgs({ url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.16-448px-300e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', + input_size=(3, 448, 448), crop_pct=1.0, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_gigantic_patch16_224_ijepa.in22k': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.g.16-600e.pth.tar', @@ -2066,22 +2067,28 @@ def vit_giant_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer: 'vit_giant_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model + @register_model def vit_huge_patch14_224_ijepa(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) from `I-JEPA` - https://arxiv.org/abs/2301.08243 """ model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg') - model = _create_vision_transformer('vit_huge_patch14_224_ijepa', pretrained=pretrained, **dict(model_args, **kwargs)) + model = _create_vision_transformer( + 'vit_huge_patch14_224_ijepa', pretrained=pretrained, **dict(model_args, **kwargs)) return model + @register_model def vit_huge_patch16_448_ijepa(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/16) from `I-JEPA` - https://arxiv.org/abs/2301.08243 """ - model_args = dict(patch_size=16, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', img_size=448) - model = _create_vision_transformer('vit_huge_patch16_448_ijepa', pretrained=pretrained, **dict(model_args, **kwargs)) + model_args = dict( + patch_size=16, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', img_size=448) + model = _create_vision_transformer( + 'vit_huge_patch16_448_ijepa', pretrained=pretrained, **dict(model_args, **kwargs)) return model + @register_model def vit_gigantic_patch16_224_ijepa(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Gigantic (big-G) model (ViT-G/16) from `I-JEPA - https://arxiv.org/abs/2301.08243 @@ -2091,6 +2098,7 @@ def vit_gigantic_patch16_224_ijepa(pretrained=False, **kwargs) -> VisionTransfor 'vit_gigantic_patch16_224_ijepa', pretrained=pretrained, **dict(model_args, **kwargs)) return model + register_model_deprecations(__name__, { 'vit_tiny_patch16_224_in21k': 'vit_tiny_patch16_224.augreg_in21k', 'vit_small_patch32_224_in21k': 'vit_small_patch32_224.augreg_in21k',