[CodeCamp2023-337] New Version of config Adapting ConvNeXt Algorithm (#1760)

* add configs\_base_\datasets\imagenet21k_bs128.py

* update convnext_base_32xb128_in1k_384px.py

* add  convnext-base_32xb128_in1k.py

* add convnext-base_32xb128_in21k.py

* add convnext-large_64xb64_in1k-384px.py

* add convnext-large_64xb64_in1k.py

* add convnext-large_64xb64_in21k.py

* add convnext-small_32xb128_in1k-384px.py

* add convnext-small_32xb128_in1k.py

* add convnext-tiny_32xb128_in1k-384px.py

* add convnext-tiny_32xb128_in1k.py

* add convnext-xlarge_64xb64_in1k-384px.py

* add convnext-xlarge_64xb64_in1k.py

* add convnext-xlarge_64xb64_in21k.py

* pre-commit check
pull/1765/head
AzulaFire 2023-08-14 15:25:59 +08:00 committed by GitHub
parent 29d706248c
commit 1be28ea7c4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 361 additions and 1 deletions

View File

@ -0,0 +1,35 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.dataset import DefaultSampler
from mmpretrain.datasets import (ImageNet21k, LoadImageFromFile, PackInputs,
RandomFlip, RandomResizedCrop)
# dataset settings
dataset_type = ImageNet21k
data_preprocessor = dict(
num_classes=21842,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True,
)
train_pipeline = [
dict(type=LoadImageFromFile),
dict(type=RandomResizedCrop, scale=224),
dict(type=RandomFlip, prob=0.5, direction='horizontal'),
dict(type=PackInputs),
]
train_dataloader = dict(
batch_size=128,
num_workers=5,
dataset=dict(
type=dataset_type,
data_root='data/imagenet21k',
split='train',
pipeline=train_pipeline),
sampler=dict(type=DefaultSampler, shuffle=True),
)

View File

@ -0,0 +1,28 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.config import read_base
with read_base():
from .._base_.datasets.imagenet_bs64_swin_224 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
from mmpretrain.engine import EMAHook
# dataset setting
train_dataloader.update(batch_size=128)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=None,
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
# This is a BETA new format config file, and the usage may change recently.
from mmengine.config import read_base
with read_base():
from .._base_.datasets.imagenet21k_bs128 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# model setting
model.update(head=dict(num_classes=21841))
# dataset setting
data_preprocessor.update(num_classes=21841)
train_dataloader.update(batch_size=128)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=dict(max_norm=5.0),
)
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet_bs64_swin_384 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# dataset setting
train_dataloader.update(batch_size=64)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=dict(max_norm=5.0),
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (64 GPUs) x (64 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet_bs64_swin_384 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# dataset setting
train_dataloader.update(batch_size=64)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=None,
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=1e-4, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (64 GPUs) x (64 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,26 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
with read_base():
from .._base_.datasets.imagenet21k_bs128 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# model setting
model.update(head=dict(num_classes=21841))
# dataset setting
data_preprocessor.update(num_classes=21841)
train_dataloader.update(batch_size=64)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=dict(max_norm=5.0),
)
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet_bs64_swin_384 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# dataset setting
train_dataloader.update(batch_size=128)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=dict(max_norm=5.0),
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet_bs64_swin_224 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# dataset setting
train_dataloader.update(batch_size=128)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=None,
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=1e-4, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet_bs64_swin_384 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# dataset setting
train_dataloader.update(batch_size=128)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=dict(max_norm=5.0),
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet_bs64_swin_224 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# dataset setting
train_dataloader.update(batch_size=128)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=None,
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=1e-4, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet_bs64_swin_384 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# dataset setting
train_dataloader.update(batch_size=64)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=dict(max_norm=5.0),
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (64 GPUs) x (64 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet_bs64_swin_224 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# dataset setting
train_dataloader.update(batch_size=64)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=None,
)
# runtime setting
custom_hooks = [dict(type=EMAHook, momentum=1e-4, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (64 GPUs) x (64 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -0,0 +1,28 @@
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
from mmpretrain.engine import EMAHook
with read_base():
from .._base_.datasets.imagenet21k_bs128 import *
from .._base_.default_runtime import *
from .._base_.models.convnext_base import *
from .._base_.schedules.imagenet_bs1024_adamw_swin import *
# model setting
model.update(head=dict(num_classes=21841))
# dataset setting
data_preprocessor.update(num_classes=21841)
train_dataloader.update(batch_size=64)
# schedule setting
optim_wrapper.update(
optimizer=dict(lr=4e-3),
clip_grad=dict(max_norm=5.0),
)
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr.update(base_batch_size=4096)

View File

@ -25,4 +25,4 @@ custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')]
# NOTE: `auto_scale_lr` is for automatically scaling LR
# based on the actual training batch size.
# base_batch_size = (32 GPUs) x (128 samples per GPU)
auto_scale_lr = dict(base_batch_size=4096)
auto_scale_lr.update(base_batch_size=4096)