mirror of
https://github.com/huggingface/pytorch-image-models.git
synced 2025-06-03 15:01:08 +08:00
Add vit_little in12k + in12k-ft-in1k weights
This commit is contained in:
parent
3c0283f9ef
commit
5dce710101
@ -1872,6 +1872,13 @@ default_cfgs = {
|
||||
'vit_pwee_patch16_reg1_gap_256.sbb_in1k': _cfg(
|
||||
hf_hub_id='timm/',
|
||||
input_size=(3, 256, 256), crop_pct=0.95),
|
||||
'vit_little_patch16_reg1_gap_256.sbb_in12k_ft_in1k': _cfg(
|
||||
hf_hub_id='timm/',
|
||||
input_size=(3, 256, 256), crop_pct=0.95),
|
||||
'vit_little_patch16_reg1_gap_256.sbb_in12k': _cfg(
|
||||
hf_hub_id='timm/',
|
||||
num_classes=11821,
|
||||
input_size=(3, 256, 256), crop_pct=0.95),
|
||||
'vit_little_patch16_reg4_gap_256.sbb_in1k': _cfg(
|
||||
hf_hub_id='timm/',
|
||||
input_size=(3, 256, 256), crop_pct=0.95),
|
||||
@ -2975,6 +2982,17 @@ def vit_pwee_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionT
|
||||
return model
|
||||
|
||||
|
||||
@register_model
|
||||
def vit_little_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
|
||||
model_args = dict(
|
||||
patch_size=16, embed_dim=320, depth=14, num_heads=5, init_values=1e-5, mlp_ratio=5.6,
|
||||
class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg',
|
||||
)
|
||||
model = _create_vision_transformer(
|
||||
'vit_little_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs))
|
||||
return model
|
||||
|
||||
|
||||
@register_model
|
||||
def vit_little_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
|
||||
model_args = dict(
|
||||
|
Loading…
x
Reference in New Issue
Block a user