fix: fix a DeprecationWarning & style

pull/1347/head
gaotingquan 2021-09-30 10:37:32 +00:00 committed by Tingquan Gao
parent ba2dd01a13
commit 5025d09fd8
1 changed files with 10 additions and 28 deletions

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Callable
from collections.abc import Callable
import numpy as np
import paddle
@ -331,9 +331,7 @@ def _load_pretrained(pretrained, model, model_url, use_ssld=False):
)
def ViT_small_patch16_224(pretrained=False,
use_ssld=False,
**kwargs):
def ViT_small_patch16_224(pretrained=False, use_ssld=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
@ -350,9 +348,7 @@ def ViT_small_patch16_224(pretrained=False,
return model
def ViT_base_patch16_224(pretrained=False,
use_ssld=False,
**kwargs):
def ViT_base_patch16_224(pretrained=False, use_ssld=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
@ -370,9 +366,7 @@ def ViT_base_patch16_224(pretrained=False,
return model
def ViT_base_patch16_384(pretrained=False,
use_ssld=False,
**kwargs):
def ViT_base_patch16_384(pretrained=False, use_ssld=False, **kwargs):
model = VisionTransformer(
img_size=384,
patch_size=16,
@ -391,9 +385,7 @@ def ViT_base_patch16_384(pretrained=False,
return model
def ViT_base_patch32_384(pretrained=False,
use_ssld=False,
**kwargs):
def ViT_base_patch32_384(pretrained=False, use_ssld=False, **kwargs):
model = VisionTransformer(
img_size=384,
patch_size=32,
@ -412,9 +404,7 @@ def ViT_base_patch32_384(pretrained=False,
return model
def ViT_large_patch16_224(pretrained=False,
use_ssld=False,
**kwargs):
def ViT_large_patch16_224(pretrained=False, use_ssld=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=1024,
@ -432,9 +422,7 @@ def ViT_large_patch16_224(pretrained=False,
return model
def ViT_large_patch16_384(pretrained=False,
use_ssld=False,
**kwargs):
def ViT_large_patch16_384(pretrained=False, use_ssld=False, **kwargs):
model = VisionTransformer(
img_size=384,
patch_size=16,
@ -453,9 +441,7 @@ def ViT_large_patch16_384(pretrained=False,
return model
def ViT_large_patch32_384(pretrained=False,
use_ssld=False,
**kwargs):
def ViT_large_patch32_384(pretrained=False, use_ssld=False, **kwargs):
model = VisionTransformer(
img_size=384,
patch_size=32,
@ -474,9 +460,7 @@ def ViT_large_patch32_384(pretrained=False,
return model
def ViT_huge_patch16_224(pretrained=False,
use_ssld=False,
**kwargs):
def ViT_huge_patch16_224(pretrained=False, use_ssld=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=1280,
@ -492,9 +476,7 @@ def ViT_huge_patch16_224(pretrained=False,
return model
def ViT_huge_patch32_384(pretrained=False,
use_ssld=False,
**kwargs):
def ViT_huge_patch32_384(pretrained=False, use_ssld=False, **kwargs):
model = VisionTransformer(
img_size=384,
patch_size=32,