diff --git a/timm/models/byobnet.py b/timm/models/byobnet.py index a368a4b0..105a55b9 100644 --- a/timm/models/byobnet.py +++ b/timm/models/byobnet.py @@ -2375,7 +2375,7 @@ default_cfgs = generate_default_cfgs({ input_size=(3, 224, 224), pool_size=(7, 7), ), 'resnet50_clip_gap.yfcc15m': _cfgr( - hf_hub_id='timm/resnet50_clip.cc12m', + hf_hub_id='timm/resnet50_clip.yfcc15m', hf_hub_filename='open_clip_pytorch_model.bin', num_classes=0, mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 224, 224), pool_size=(7, 7), diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py index ba5d1958..8bc09e94 100644 --- a/timm/models/vision_transformer.py +++ b/timm/models/vision_transformer.py @@ -1657,50 +1657,42 @@ default_cfgs = { crop_pct=1.0, input_size=(3, 378, 378), num_classes=1024), 'vit_base_patch32_clip_224.metaclip_2pt5b': _cfg( - hf_hub_id='timm/', - hf_hub_filename='open_clip_pytorch_model.bin', + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch16_clip_224.metaclip_2pt5b': _cfg( - hf_hub_id='timm/', - hf_hub_filename='open_clip_pytorch_model.bin', + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.metaclip_2pt5b': _cfg( - hf_hub_id='timm/', - hf_hub_filename='open_clip_pytorch_model.bin', + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.metaclip_2pt5b': _cfg( - hf_hub_id='timm/', - hf_hub_filename='open_clip_pytorch_model.bin', + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_gigantic_patch14_clip_224.metaclip_2pt5b': _cfg( - hf_hub_id='timm/', - hf_hub_filename='open_clip_pytorch_model.bin', + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280), 'vit_base_patch32_clip_224.metaclip_400m': _cfg( - hf_hub_id='timm/', - hf_hub_filename='open_clip_pytorch_model.bin', + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch16_clip_224.metaclip_400m': _cfg( - hf_hub_id='timm/', - hf_hub_filename='open_clip_pytorch_model.bin', + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.metaclip_400m': _cfg( - hf_hub_id='timm/', - hf_hub_filename='open_clip_pytorch_model.bin', + hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768),