pre-commit check

pull/1727/head
zeyuanyin 2023-07-26 17:55:50 +04:00
parent b6117a4c18
commit 92a87a8848
19 changed files with 84 additions and 91 deletions

View File

@ -4,7 +4,7 @@ from mmcv.transforms import (LoadImageFromFile, RandomApply, RandomFlip,
RandomGrayscale)
from mmengine.dataset import DefaultSampler, default_collate
from mmpretrain.datasets import (ImageNet, ColorJitter, GaussianBlur, ImageNet,
from mmpretrain.datasets import (ColorJitter, GaussianBlur, ImageNet,
MultiView, PackInputs, RandomResizedCrop)
from mmpretrain.models import SelfSupDataPreprocessor

View File

@ -1,13 +1,11 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmcv.transforms import LoadImageFromFile, RandomFlip
from mmengine.dataset import DefaultSampler
from mmcv.transforms import (LoadImageFromFile, RandomFlip)
from mmpretrain.datasets import (ImageNet, CenterCrop, LoadImageFromFile,
from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile,
PackInputs, RandomFlip, RandomResizedCrop,
ResizeEdge)
from mmpretrain.evaluation import Accuracy
# dataset settings
@ -60,4 +58,4 @@ val_evaluator = dict(type=Accuracy, topk=(1, 5))
# If you want standard test, please manually configure the test dataset
test_dataloader = val_dataloader
test_evaluator = val_evaluator
test_evaluator = val_evaluator

View File

@ -1,17 +1,14 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmcv.transforms import LoadImageFromFile
from mmengine.dataset import DefaultSampler
from mmcv.transforms import LoadImageFromFile
from mmpretrain.datasets import (ImageNet, CenterCrop, LoadImageFromFile,
from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile,
PackInputs, RandomFlip, RandomResizedCrop,
ResizeEdge)
from mmpretrain.datasets.transforms import AutoAugment
from mmpretrain.evaluation import Accuracy
# dataset settings
dataset_type = ImageNet
data_preprocessor = dict(
@ -79,4 +76,4 @@ val_evaluator = dict(type=Accuracy, topk=(1, 5))
# If you want standard test, please manually configure the test dataset
test_dataloader = val_dataloader
test_evaluator = val_evaluator
test_evaluator = val_evaluator

View File

@ -1,9 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.dataset import DefaultSampler
from mmpretrain.datasets import (ImageNet, LoadImageFromFile, PackInputs,
RandomFlip, RandomResizedCrop, CenterCrop, ResizeEdge,
RandAugment, RandomErasing)
from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile,
PackInputs, RandAugment, RandomErasing,
RandomFlip, RandomResizedCrop, ResizeEdge)
from mmpretrain.evaluation import Accuracy
# dataset settings
@ -85,4 +86,4 @@ val_evaluator = dict(type=Accuracy, topk=(1, 5))
# If you want standard test, please manually configure the test dataset
test_dataloader = val_dataloader
test_evaluator = val_evaluator
test_evaluator = val_evaluator

View File

@ -1,9 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmpretrain.models import (VisionTransformer, ImageClassifier, VisionTransformerClsHead, CrossEntropyLoss)
from mmengine.model.weight_init import KaimingInit
from mmpretrain.models import (CrossEntropyLoss, ImageClassifier,
VisionTransformer, VisionTransformerClsHead)
# model settings
model = dict(
type=ImageClassifier,
@ -27,4 +28,4 @@ model = dict(
in_channels=1024,
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
topk=(1, 5),
))
))

View File

@ -1,8 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmpretrain.models import (VisionTransformer, ImageClassifier, VisionTransformerClsHead, CrossEntropyLoss)
from mmengine.model.weight_init import KaimingInit
from mmpretrain.models import (CrossEntropyLoss, ImageClassifier,
VisionTransformer, VisionTransformerClsHead)
# model settings
model = dict(
type=ImageClassifier,
@ -26,4 +28,4 @@ model = dict(
in_channels=768,
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
topk=(1, 5),
))
))

View File

@ -1,8 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmpretrain.models import (VisionTransformer, ImageClassifier, VisionTransformerClsHead, CrossEntropyLoss)
from mmengine.model.weight_init import KaimingInit
from mmpretrain.models import (CrossEntropyLoss, ImageClassifier,
VisionTransformer, VisionTransformerClsHead)
# model settings
model = dict(
type=ImageClassifier,
@ -26,4 +28,4 @@ model = dict(
in_channels=1024,
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
topk=(1, 5),
))
))

View File

@ -1,8 +1,10 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmpretrain.models import (VisionTransformer, ImageClassifier, VisionTransformerClsHead, CrossEntropyLoss)
from mmengine.model.weight_init import KaimingInit
from mmpretrain.models import (CrossEntropyLoss, ImageClassifier,
VisionTransformer, VisionTransformerClsHead)
# model settings
model = dict(
type=ImageClassifier,
@ -26,4 +28,4 @@ model = dict(
in_channels=1024,
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
topk=(1, 5),
))
))

View File

@ -1,7 +1,6 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.optim import CosineAnnealingLR, LinearLR
from torch.optim import AdamW
# optimizer
@ -42,4 +41,4 @@ test_cfg = dict()
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# based on the actual training batch size.
auto_scale_lr = dict(base_batch_size=4096)
auto_scale_lr = dict(base_batch_size=4096)

View File

@ -1,17 +1,17 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.config import read_base
from mmpretrain.models import Mixup, CutMix
from mmpretrain.models import (VisionTransformer, ImageClassifier, VisionTransformerClsHead, LabelSmoothLoss,
TruncNormalInit, ConstantInit)
from mmpretrain.engine import EMAHook
from torch.optim import AdamW
from mmpretrain.engine import EMAHook
from mmpretrain.models import (ConstantInit, CutMix, ImageClassifier,
LabelSmoothLoss, Mixup, TruncNormalInit,
VisionTransformer, VisionTransformerClsHead)
with read_base():
from .._base_.datasets.imagenet_bs64_swin_224 import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
from .._base_.default_runtime import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# model settings
model = dict(
@ -27,17 +27,15 @@ model = dict(
type=VisionTransformerClsHead,
num_classes=1000,
in_channels=768,
loss=dict(
type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'),
loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'),
),
init_cfg=[
dict(type=TruncNormalInit, layer='Linear', std=.02),
dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.),
],
train_cfg=dict(augments=[
dict(type=Mixup, alpha=0.8),
dict(type=CutMix, alpha=1.0)
]))
train_cfg=dict(
augments=[dict(type=Mixup, alpha=0.8),
dict(type=CutMix, alpha=1.0)]))
# dataset settings
train_dataloader.update(batch_size=128)
@ -64,4 +62,4 @@ custom_hooks = [dict(type=EMAHook, momentum=1e-4)]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -1,18 +1,19 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmcv.transforms import (CenterCrop, ImageToTensor, Normalize, Resize,
ToTensor)
from mmengine.config import read_base
from mmengine.model import PretrainedInit
from mmengine.optim import CosineAnnealingLR, LinearLR
from mmcv.transforms import (Normalize, ImageToTensor, ToTensor, Resize, CenterCrop)
from mmengine.runner import IterBasedRunner, CheckpointHook
from mmengine.runner import CheckpointHook, IterBasedRunner
from torch.optim import SGD
from mmpretrain.datasets import Collect
from torch.optim import SGD
with read_base():
from .._base_.datasets.imagenet_bs64_pil_resize_autoaug import *
from .._base_.models.vit_base_p16 import *
from .._base_.default_runtime import *
from .._base_.models.vit_base_p16 import *
# specific to vit pretrain
paramwise_cfg = dict(custom_keys={
@ -121,4 +122,4 @@ runner = dict(
default_hooks = dict(checkpoint=dict(type=CheckpointHook, interval=1000))
fp16 = dict(loss_scale=256.0, velocity_accum_type='half', accum_type='half')
fp16 = dict(loss_scale=256.0, velocity_accum_type='half', accum_type='half')

View File

@ -5,10 +5,10 @@ from mmengine.config import read_base
from mmpretrain.models import Mixup
with read_base():
from .._base_.models.vit_base_p16 import *
from .._base_.datasets.imagenet_bs64_pil_resize_autoaug import *
from .._base_.schedules.imagenet_bs4096_adamw import *
from .._base_.default_runtime import *
from .._base_.models.vit_base_p16 import *
from .._base_.schedules.imagenet_bs4096_adamw import *
# model setting
model.update(
@ -17,4 +17,4 @@ model.update(
)
# schedule setting
optim_wrapper.update(clip_grad=dict(max_norm=1.0))
optim_wrapper.update(clip_grad=dict(max_norm=1.0))

View File

@ -1,19 +1,16 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmcv.transforms import LoadImageFromFile, RandomFlip
from mmengine.config import read_base
from mmcv.transforms import (LoadImageFromFile, RandomFlip)
from mmpretrain.datasets import (CenterCrop, LoadImageFromFile,
PackInputs, RandomFlip, RandomResizedCrop,
ResizeEdge)
from mmpretrain.datasets import (CenterCrop, LoadImageFromFile, PackInputs,
RandomFlip, RandomResizedCrop, ResizeEdge)
with read_base():
from .._base_.models.vit_base_p16 import *
from .._base_.datasets.imagenet_bs64_pil_resize import *
from .._base_.schedules.imagenet_bs4096_adamw import *
from .._base_.default_runtime import *
from .._base_.models.vit_base_p16 import *
from .._base_.schedules.imagenet_bs4096_adamw import *
# model setting
model.update(backbone=dict(img_size=384))
@ -45,4 +42,4 @@ val_dataloader.update(dataset=dict(pipeline=test_pipeline))
test_dataloader.update(dataset=dict(pipeline=test_pipeline))
# schedule setting
optim_wrapper.update(clip_grad=dict(max_norm=1.0))
optim_wrapper.update(clip_grad=dict(max_norm=1.0))

View File

@ -5,11 +5,10 @@ from mmengine.config import read_base
from mmpretrain.models import Mixup
with read_base():
from .._base_.models.vit_base_p32 import *
from .._base_.datasets.imagenet_bs64_pil_resize_autoaug import *
from .._base_.schedules.imagenet_bs4096_adamw import *
from .._base_.default_runtime import *
from .._base_.models.vit_base_p32 import *
from .._base_.schedules.imagenet_bs4096_adamw import *
# model setting
model.update(
@ -18,4 +17,4 @@ model.update(
)
# schedule setting
optim_wrapper.update(clip_grad=dict(max_norm=1.0))
optim_wrapper.update(clip_grad=dict(max_norm=1.0))

View File

@ -1,18 +1,16 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmcv.transforms import LoadImageFromFile, RandomFlip
from mmengine.config import read_base
from mmcv.transforms import (LoadImageFromFile, RandomFlip)
from mmpretrain.datasets import (CenterCrop, LoadImageFromFile,
PackInputs, RandomFlip, RandomResizedCrop,
ResizeEdge)
from mmpretrain.datasets import (CenterCrop, LoadImageFromFile, PackInputs,
RandomFlip, RandomResizedCrop, ResizeEdge)
with read_base():
from .._base_.models.vit_base_p32 import *
from .._base_.datasets.imagenet_bs64_pil_resize import *
from .._base_.schedules.imagenet_bs4096_adamw import *
from .._base_.default_runtime import *
from .._base_.models.vit_base_p32 import *
from .._base_.schedules.imagenet_bs4096_adamw import *
# model setting
model.update(backbone=dict(img_size=384))
@ -44,4 +42,4 @@ val_dataloader.update(dataset=dict(pipeline=test_pipeline))
test_dataloader.update(dataset=dict(pipeline=test_pipeline))
# schedule setting
optim_wrapper.update(clip_grad=dict(max_norm=1.0))
optim_wrapper.update(clip_grad=dict(max_norm=1.0))

View File

@ -1,14 +1,14 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.config import read_base
from mmpretrain.models import Mixup
with read_base():
from .._base_.models.vit_large_p16 import *
from .._base_.datasets.imagenet_bs64_pil_resize_autoaug import *
from .._base_.schedules.imagenet_bs4096_adamw import *
from .._base_.default_runtime import *
from .._base_.models.vit_large_p16 import *
from .._base_.schedules.imagenet_bs4096_adamw import *
# model setting
model.update(
@ -17,4 +17,4 @@ model.update(
)
# schedule setting
optim_wrapper.update(clip_grad=dict(max_norm=1.0))
optim_wrapper.update(clip_grad=dict(max_norm=1.0))

View File

@ -1,19 +1,16 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmcv.transforms import LoadImageFromFile, RandomFlip
from mmengine.config import read_base
from mmcv.transforms import (LoadImageFromFile, RandomFlip)
from mmpretrain.datasets import (CenterCrop, LoadImageFromFile,
PackInputs, RandomFlip, RandomResizedCrop,
ResizeEdge)
from mmpretrain.datasets import (CenterCrop, LoadImageFromFile, PackInputs,
RandomFlip, RandomResizedCrop, ResizeEdge)
with read_base():
from .._base_.models.vit_large_p16 import *
from .._base_.datasets.imagenet_bs64_pil_resize import *
from .._base_.schedules.imagenet_bs4096_adamw import *
from .._base_.default_runtime import *
from .._base_.models.vit_large_p16 import *
from .._base_.schedules.imagenet_bs4096_adamw import *
# model setting
model.update(backbone=dict(img_size=384))
@ -45,4 +42,4 @@ val_dataloader.update(dataset=dict(pipeline=test_pipeline))
test_dataloader.update(dataset=dict(pipeline=test_pipeline))
# schedule setting
optim_wrapper.update(clip_grad=dict(max_norm=1.0))
optim_wrapper.update(clip_grad=dict(max_norm=1.0))

View File

@ -1,13 +1,14 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.config import read_base
from mmpretrain.models import Mixup
with read_base():
from .._base_.models.vit_large_p32 import *
from .._base_.datasets.imagenet_bs64_pil_resize_autoaug import *
from .._base_.schedules.imagenet_bs4096_adamw import *
from .._base_.default_runtime import *
from .._base_.models.vit_large_p32 import *
from .._base_.schedules.imagenet_bs4096_adamw import *
# model setting
model.update(
@ -16,4 +17,4 @@ model.update(
)
# schedule setting
optim_wrapper.update(clip_grad=dict(max_norm=1.0))
optim_wrapper.update(clip_grad=dict(max_norm=1.0))

View File

@ -1,16 +1,16 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmcv.transforms import LoadImageFromFile, RandomFlip
from mmengine.config import read_base
from mmcv.transforms import (LoadImageFromFile, RandomFlip)
from mmpretrain.datasets import (CenterCrop, LoadImageFromFile,
PackInputs, RandomFlip, RandomResizedCrop,
ResizeEdge)
from mmpretrain.datasets import (CenterCrop, LoadImageFromFile, PackInputs,
RandomFlip, RandomResizedCrop, ResizeEdge)
with read_base():
from .._base_.models.vit_large_p32 import *
from .._base_.datasets.imagenet_bs64_pil_resize import *
from .._base_.schedules.imagenet_bs4096_adamw import *
from .._base_.default_runtime import *
from .._base_.models.vit_large_p32 import *
from .._base_.schedules.imagenet_bs4096_adamw import *
# model setting
model.update(backbone=dict(img_size=384))
@ -42,4 +42,4 @@ val_dataloader.update(dataset=dict(pipeline=test_pipeline))
test_dataloader.update(dataset=dict(pipeline=test_pipeline))
# schedule setting
optim_wrapper.update(clip_grad=dict(max_norm=1.0))
optim_wrapper.update(clip_grad=dict(max_norm=1.0))