mirror of
https://github.com/huggingface/pytorch-image-models.git
synced 2025-06-03 15:01:08 +08:00
Remove duplicate so400m/16 @ 256 model def
This commit is contained in:
parent
9758e0b8b0
commit
f63a11cf81
@ -3499,18 +3499,6 @@ def vit_so400m_patch14_siglip_gap_224(pretrained: bool = False, **kwargs) -> Vis
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
@register_model
|
|
||||||
def vit_so400m_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
|
|
||||||
""" A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP)."""
|
|
||||||
model_args = dict(
|
|
||||||
patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362,
|
|
||||||
class_token=False, global_pool='avg', fc_norm=False,
|
|
||||||
)
|
|
||||||
model = _create_vision_transformer(
|
|
||||||
'vit_so400m_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs))
|
|
||||||
return model
|
|
||||||
|
|
||||||
|
|
||||||
@register_model
|
@register_model
|
||||||
def vit_so400m_patch14_siglip_gap_378(pretrained: bool = False, **kwargs) -> VisionTransformer:
|
def vit_so400m_patch14_siglip_gap_378(pretrained: bool = False, **kwargs) -> VisionTransformer:
|
||||||
""" A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP)."""
|
""" A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP)."""
|
||||||
@ -3561,9 +3549,10 @@ def vit_so400m_patch14_siglip_gap_896(pretrained: bool = False, **kwargs) -> Vis
|
|||||||
|
|
||||||
@register_model
|
@register_model
|
||||||
def vit_so400m_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
|
def vit_so400m_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer:
|
||||||
|
""" A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP)."""
|
||||||
model_args = dict(
|
model_args = dict(
|
||||||
patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False,
|
patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362,
|
||||||
global_pool='avg', fc_norm=False, act_layer='gelu_tanh'
|
class_token=False, global_pool='avg', fc_norm=False, act_layer='gelu_tanh',
|
||||||
)
|
)
|
||||||
model = _create_vision_transformer(
|
model = _create_vision_transformer(
|
||||||
'vit_so400m_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs))
|
'vit_so400m_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user