From d81f75b461b0f32106614404d9ee8ce1ba375556 Mon Sep 17 00:00:00 2001 From: Ross Wightman Date: Wed, 13 Sep 2023 15:47:51 -0700 Subject: [PATCH] Remove patch dropout layer as it should be integrated into packing --- timm/models/vision_transformer_packed.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/timm/models/vision_transformer_packed.py b/timm/models/vision_transformer_packed.py index 382113b2..860f300a 100644 --- a/timm/models/vision_transformer_packed.py +++ b/timm/models/vision_transformer_packed.py @@ -603,12 +603,6 @@ class VisionTransformerPacked(nn.Module): self.pos_embed_h = nn.Parameter(torch.randn(grid_h, embed_dim) * .02) self.pos_embed_w = nn.Parameter(torch.randn(grid_w, embed_dim) * .02) self.pos_drop = nn.Dropout(p=pos_drop_rate) - if patch_drop_rate > 0: - self.patch_drop = PatchDropout( - patch_drop_rate, - ) - else: - self.patch_drop = nn.Identity() self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule