new version of config adapts ResNet
parent
a4c219e05d
commit
58630571ed
|
@ -0,0 +1,60 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.dataset import DefaultSampler
|
||||
|
||||
from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile,
|
||||
PackInputs, RandomFlip, RandomResizedCrop,
|
||||
ResizeEdge)
|
||||
from mmpretrain.evaluation import Accuracy
|
||||
|
||||
# dataset settings
|
||||
dataset_type = ImageNet
|
||||
data_preprocessor = dict(
|
||||
num_classes=1000,
|
||||
# RGB format normalization parameters
|
||||
mean=[123.675, 116.28, 103.53],
|
||||
std=[58.395, 57.12, 57.375],
|
||||
# convert image from BGR to RGB
|
||||
to_rgb=True,
|
||||
)
|
||||
|
||||
train_pipeline = [
|
||||
dict(type=LoadImageFromFile),
|
||||
dict(type=RandomResizedCrop, scale=224),
|
||||
dict(type=RandomFlip, prob=0.5, direction='horizontal'),
|
||||
dict(type=PackInputs),
|
||||
]
|
||||
|
||||
test_pipeline = [
|
||||
dict(type=LoadImageFromFile),
|
||||
dict(type=ResizeEdge, scale=256, edge='short'),
|
||||
dict(type=CenterCrop, crop_size=224),
|
||||
dict(type=PackInputs),
|
||||
]
|
||||
|
||||
train_dataloader = dict(
|
||||
batch_size=64,
|
||||
num_workers=5,
|
||||
dataset=dict(
|
||||
type=dataset_type,
|
||||
data_root='data/imagenet',
|
||||
split='train',
|
||||
pipeline=train_pipeline),
|
||||
sampler=dict(type=DefaultSampler, shuffle=True),
|
||||
)
|
||||
|
||||
val_dataloader = dict(
|
||||
batch_size=64,
|
||||
num_workers=5,
|
||||
dataset=dict(
|
||||
type=dataset_type,
|
||||
data_root='data/imagenet',
|
||||
split='val',
|
||||
pipeline=test_pipeline),
|
||||
sampler=dict(type=DefaultSampler, shuffle=False),
|
||||
)
|
||||
val_evaluator = dict(type=Accuracy, topk=(1, 5))
|
||||
|
||||
# If you want standard test, please manually configure the test dataset
|
||||
test_dataloader = val_dataloader
|
||||
test_evaluator = val_evaluator
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, LinearClsHead, ResNet)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet,
|
||||
depth=101,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=1000,
|
||||
in_channels=2048,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
topk=(1, 5),
|
||||
))
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, LinearClsHead, ResNet_CIFAR)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet_CIFAR,
|
||||
depth=101,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=1000,
|
||||
in_channels=2048,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
))
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, LinearClsHead, ResNet)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet,
|
||||
depth=152,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=1000,
|
||||
in_channels=2048,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
topk=(1, 5),
|
||||
))
|
|
@ -0,0 +1,21 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, LinearClsHead, ResNet_CIFAR)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet_CIFAR,
|
||||
depth=152,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=1000,
|
||||
in_channels=2048,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
))
|
|
@ -0,0 +1,21 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, LinearClsHead, ResNet_CIFAR)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet_CIFAR,
|
||||
depth=18,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=10,
|
||||
in_channels=512,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
))
|
|
@ -0,0 +1,22 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, LinearClsHead, ResNet)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet,
|
||||
depth=34,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=1000,
|
||||
in_channels=512,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
topk=(1, 5),
|
||||
))
|
|
@ -0,0 +1,21 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, LinearClsHead, ResNet_CIFAR)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet_CIFAR,
|
||||
depth=34,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=10,
|
||||
in_channels=512,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
))
|
|
@ -0,0 +1,22 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GeneralizedMeanPooling,
|
||||
ImageClassifier, LinearClsHead, ResNet)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet,
|
||||
depth=34,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GeneralizedMeanPooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=1000,
|
||||
in_channels=512,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
topk=(1, 5),
|
||||
))
|
|
@ -0,0 +1,22 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, LinearClsHead, ResNet)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet,
|
||||
depth=50,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=1000,
|
||||
in_channels=2048,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
topk=(1, 5),
|
||||
))
|
|
@ -0,0 +1,21 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, LinearClsHead, ResNet_CIFAR)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet_CIFAR,
|
||||
depth=50,
|
||||
num_stages=4,
|
||||
out_indices=(3, ),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=LinearClsHead,
|
||||
num_classes=10,
|
||||
in_channels=2048,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
|
||||
))
|
|
@ -0,0 +1,22 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, MultiLabelLinearClsHead, ResNet_CIFAR, Mixup)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet_CIFAR,
|
||||
depth=50,
|
||||
num_stages=4,
|
||||
out_indices=(3,),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=MultiLabelLinearClsHead,
|
||||
num_classes=10,
|
||||
in_channels=2048,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0, use_soft=True)),
|
||||
train_cfg=dict(augments=dict(type=Mixup, alpha=1.)),
|
||||
)
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling,
|
||||
ImageClassifier, MultiLabelLinearClsHead, ResNet, CutMix)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type=ImageClassifier,
|
||||
backbone=dict(
|
||||
type=ResNet,
|
||||
depth=50,
|
||||
num_stages=4,
|
||||
out_indices=(3,),
|
||||
style='pytorch'),
|
||||
neck=dict(type=GlobalAveragePooling),
|
||||
head=dict(
|
||||
type=MultiLabelLinearClsHead,
|
||||
num_classes=1000,
|
||||
in_channels=2048,
|
||||
loss=dict(type=CrossEntropyLoss, loss_weight=1.0, use_soft=True)),
|
||||
train_cfg=dict(augments=dict(type=CutMix, alpha=1.0, num_classes=1000, prob=1.0)),
|
||||
)
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.optim import MultiStepLR, LinearLR
|
||||
from torch.optim import SGD
|
||||
|
||||
# optimizer
|
||||
optim_wrapper = dict(
|
||||
optimizer=dict(
|
||||
type=SGD, lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True))
|
||||
|
||||
# learning policy
|
||||
param_scheduler = [
|
||||
dict(
|
||||
type=LinearLR, start_factor=0.25, by_epoch=False, begin=0, end=2500),
|
||||
dict(
|
||||
type=MultiStepLR, by_epoch=True, milestones=[30, 60, 90], gamma=0.1)
|
||||
]
|
||||
|
||||
# train, val, test setting
|
||||
train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1)
|
||||
val_cfg = dict()
|
||||
test_cfg = dict()
|
||||
|
||||
# NOTE: `auto_scale_lr` is for automatically scaling LR,
|
||||
# based on the actual training batch size.
|
||||
auto_scale_lr = dict(base_batch_size=2048)
|
|
@ -0,0 +1,21 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.optim import CosineAnnealingLR
|
||||
from torch.optim import SGD
|
||||
|
||||
# optimizer
|
||||
optim_wrapper = dict(
|
||||
optimizer=dict(type=SGD, lr=0.1, momentum=0.9, weight_decay=0.0001))
|
||||
|
||||
# learning policy
|
||||
param_scheduler = dict(
|
||||
type=CosineAnnealingLR, T_max=100, by_epoch=True, begin=0, end=100)
|
||||
|
||||
# train, val, test setting
|
||||
train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1)
|
||||
val_cfg = dict()
|
||||
test_cfg = dict()
|
||||
|
||||
# NOTE: `auto_scale_lr` is for automatically scaling LR,
|
||||
# based on the actual training batch size.
|
||||
auto_scale_lr = dict(base_batch_size=256)
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.cifar10_bs16 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet101_cifar import *
|
||||
from .._base_.schedules.cifar10_bs128 import *
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.imagenet_bs32 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet101 import *
|
||||
from .._base_.schedules.imagenet_bs256 import *
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.cifar10_bs16 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet152_cifar import *
|
||||
from .._base_.schedules.cifar10_bs128 import *
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.imagenet_bs32 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet152 import *
|
||||
from .._base_.schedules.imagenet_bs256 import *
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.cifar10_bs16 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet18_cifar import *
|
||||
from .._base_.schedules.cifar10_bs128 import *
|
|
@ -0,0 +1,10 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.cifar10_bs16 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet34_cifar import *
|
||||
from .._base_.schedules.cifar10_bs128 import *
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.imagenet_bs32 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet34 import *
|
||||
from .._base_.schedules.imagenet_bs256 import *
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.imagenet_bs64 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet50 import *
|
||||
from .._base_.schedules.imagenet_bs2048 import *
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.cifar10_bs16 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet50_cifar_mixup import *
|
||||
from .._base_.schedules.cifar10_bs128 import *
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.cifar10_bs16 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet50_cifar import *
|
||||
from .._base_.schedules.cifar10_bs128 import *
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
from mmpretrain.engine import PreciseBNHook
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.imagenet_bs32 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet50 import *
|
||||
from .._base_.schedules.imagenet_bs256_coslr import *
|
||||
|
||||
# Precise BN hook will update the bn stats, so this hook should be executed
|
||||
# before CheckpointHook(priority of 'VERY_LOW') and
|
||||
# EMAHook(priority of 'NORMAL') So set the priority of PreciseBNHook to
|
||||
# 'ABOVENORMAL' here.
|
||||
custom_hooks = [
|
||||
dict(
|
||||
type=PreciseBNHook,
|
||||
num_samples=8192,
|
||||
interval=1,
|
||||
priority='ABOVE_NORMAL')
|
||||
]
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.imagenet_bs32 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet50 import *
|
||||
from .._base_.schedules.imagenet_bs256_coslr import *
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.imagenet_bs32 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet50_cutmix import *
|
||||
from .._base_.schedules.imagenet_bs256 import *
|
|
@ -0,0 +1,9 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This is a BETA new format config file, and the usage may change recently.
|
||||
from mmengine.config import read_base
|
||||
|
||||
with read_base():
|
||||
from .._base_.datasets.imagenet_bs32 import *
|
||||
from .._base_.default_runtime import *
|
||||
from .._base_.models.resnet50 import *
|
||||
from .._base_.schedules.imagenet_bs256 import *
|
Loading…
Reference in New Issue