From 4a309ce998938b91ec8374782970b1cb34f5d5c3 Mon Sep 17 00:00:00 2001 From: Mashiro <57566630+HAOCHENYE@users.noreply.github.com> Date: Mon, 5 Jun 2023 11:54:31 +0800 Subject: [PATCH] [Feature] New config type (#539) * update test.py * Update new config * Update new config * Update new config --- mmrazor/configs/distill/mmdet/pkd/test.py | 88 +++++++++++++++++++ ...er_act_deploy_retinanet_r50_fpn_1x_coco.py | 75 ++++++++++++++++ ..._rtmpose-s_8xb256-420e_aic-coco-256x192.py | 56 ++++++++++++ setup.cfg | 3 + 4 files changed, 222 insertions(+) create mode 100644 mmrazor/configs/distill/mmdet/pkd/test.py create mode 100644 mmrazor/configs/pruning/mmdet/group_fisher/retinanet/group_fisher_act_deploy_retinanet_r50_fpn_1x_coco.py create mode 100644 mmrazor/configs/pruning/mmpose/group_fisher/group_fisher_deploy_rtmpose-s_8xb256-420e_aic-coco-256x192.py diff --git a/mmrazor/configs/distill/mmdet/pkd/test.py b/mmrazor/configs/distill/mmdet/pkd/test.py new file mode 100644 index 00000000..ae0b483c --- /dev/null +++ b/mmrazor/configs/distill/mmdet/pkd/test.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +if '_base_': + from mmyolo.configs.rtmdet.rtmdet_tiny_syncbn_fast_8xb32_300e_coco import * + +from mmrazor.models import (ChannelWiseDivergence, ConfigurableDistiller, + ConvModuleConnector, FpnTeacherDistill, + NormConnector) + +default_scope = None +teacher_ckpt = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco/rtmdet_x_syncbn_fast_8xb32-300e_coco_20221231_100345-b85cd476.pth' # noqa: E501 + +norm_cfg = dict(type='BN', affine=False, track_running_stats=False) + +model = dict( + type=FpnTeacherDistill, + architecture=dict( + cfg_path='mmyolo::rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py'), + teacher=dict( + cfg_path='mmyolo::rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco.py'), + # teacher_ckpt=teacher_ckpt, + distiller=dict( + type=ConfigurableDistiller, + # `recorders` are used to record various intermediate results during + # the model forward. + student_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), + ), + teacher_recorders=dict( + fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), + fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), + fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), + # `connectors` are adaptive layers which usually map teacher's and + # students features to the same dimension. + connectors=dict( + fpn0_s=dict( + type=ConvModuleConnector, + in_channel=256, + out_channel=320, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn0_t=dict( + type=NormConnector, in_channels=320, norm_cfg=norm_cfg), + fpn1_s=dict( + type=ConvModuleConnector, + in_channel=256, + out_channel=320, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn1_t=dict( + type=NormConnector, in_channels=320, norm_cfg=norm_cfg), + fpn2_s=dict( + type=ConvModuleConnector, + in_channel=256, + out_channel=320, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None), + fpn2_t=dict( + type=NormConnector, in_channels=320, norm_cfg=norm_cfg)), + distill_losses=dict( + loss_fpn0=dict(type=ChannelWiseDivergence, loss_weight=1), + loss_fpn1=dict(type=ChannelWiseDivergence, loss_weight=1), + loss_fpn2=dict(type=ChannelWiseDivergence, loss_weight=1)), + # `loss_forward_mappings` are mappings between distill loss forward + # arguments and records. + loss_forward_mappings=dict( + loss_fpn0=dict( + preds_S=dict( + from_student=True, recorder='fpn0', connector='fpn0_s'), + preds_T=dict( + from_student=False, recorder='fpn0', connector='fpn0_t')), + loss_fpn1=dict( + preds_S=dict( + from_student=True, recorder='fpn1', connector='fpn1_s'), + preds_T=dict( + from_student=False, recorder='fpn1', connector='fpn1_t')), + loss_fpn2=dict( + preds_S=dict( + from_student=True, recorder='fpn2', connector='fpn2_s'), + preds_T=dict( + from_student=False, recorder='fpn2', + connector='fpn2_t'))))) + +find_unused_parameters = True diff --git a/mmrazor/configs/pruning/mmdet/group_fisher/retinanet/group_fisher_act_deploy_retinanet_r50_fpn_1x_coco.py b/mmrazor/configs/pruning/mmdet/group_fisher/retinanet/group_fisher_act_deploy_retinanet_r50_fpn_1x_coco.py new file mode 100644 index 00000000..7884b229 --- /dev/null +++ b/mmrazor/configs/pruning/mmdet/group_fisher/retinanet/group_fisher_act_deploy_retinanet_r50_fpn_1x_coco.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +############################################################################# +"""You have to fill these args. + +_base_(str): The path to your pretrain config file. +fix_subnet (Union[dict,str]): The dict store the pruning structure or the + json file including it. +divisor (int): The divisor the make the channel number divisible. +""" +if '_base_': + from mmdet.configs.retinanet.retinanet_r50_fpn_1x_coco import * + +from mmrazor.implementations.pruning.group_fisher import \ + GroupFisherDeploySubModel + +fix_subnet = { + 'backbone.conv1_(0, 64)_64': 60, + 'backbone.layer1.0.conv1_(0, 64)_64': 48, + 'backbone.layer1.0.conv2_(0, 64)_64': 44, + 'backbone.layer1.0.conv3_(0, 256)_256': 250, + 'backbone.layer1.1.conv1_(0, 64)_64': 40, + 'backbone.layer1.1.conv2_(0, 64)_64': 41, + 'backbone.layer1.2.conv1_(0, 64)_64': 48, + 'backbone.layer1.2.conv2_(0, 64)_64': 62, + 'backbone.layer2.0.conv1_(0, 128)_128': 115, + 'backbone.layer2.0.conv2_(0, 128)_128': 127, + 'backbone.layer2.0.conv3_(0, 512)_512': 511, + 'backbone.layer2.1.conv1_(0, 128)_128': 69, + 'backbone.layer2.1.conv2_(0, 128)_128': 83, + 'backbone.layer2.2.conv1_(0, 128)_128': 111, + 'backbone.layer2.2.conv2_(0, 128)_128': 121, + 'backbone.layer2.3.conv1_(0, 128)_128': 122, + 'backbone.layer2.3.conv2_(0, 128)_128': 128, + 'backbone.layer3.0.conv1_(0, 256)_256': 255, + 'backbone.layer3.0.conv2_(0, 256)_256': 256, + 'backbone.layer3.0.conv3_(0, 1024)_1024': 1024, + 'backbone.layer3.1.conv1_(0, 256)_256': 216, + 'backbone.layer3.1.conv2_(0, 256)_256': 223, + 'backbone.layer3.2.conv1_(0, 256)_256': 229, + 'backbone.layer3.2.conv2_(0, 256)_256': 247, + 'backbone.layer3.3.conv1_(0, 256)_256': 239, + 'backbone.layer3.3.conv2_(0, 256)_256': 246, + 'backbone.layer3.4.conv1_(0, 256)_256': 237, + 'backbone.layer3.4.conv2_(0, 256)_256': 239, + 'backbone.layer3.5.conv1_(0, 256)_256': 233, + 'backbone.layer3.5.conv2_(0, 256)_256': 221, + 'backbone.layer4.0.conv1_(0, 512)_512': 499, + 'backbone.layer4.0.conv2_(0, 512)_512': 494, + 'backbone.layer4.0.conv3_(0, 2048)_2048': 2031, + 'backbone.layer4.1.conv1_(0, 512)_512': 451, + 'backbone.layer4.1.conv2_(0, 512)_512': 401, + 'backbone.layer4.2.conv1_(0, 512)_512': 396, + 'backbone.layer4.2.conv2_(0, 512)_512': 237, + 'neck.lateral_convs.0.conv_(0, 256)_256': 237, + 'neck.fpn_convs.0.conv_(0, 256)_256': 241, + 'bbox_head.cls_convs.0.conv_(0, 256)_256': 133, + 'bbox_head.cls_convs.1.conv_(0, 256)_256': 134, + 'bbox_head.cls_convs.2.conv_(0, 256)_256': 139, + 'bbox_head.cls_convs.3.conv_(0, 256)_256': 79, + 'bbox_head.reg_convs.0.conv_(0, 256)_256': 89, + 'bbox_head.reg_convs.1.conv_(0, 256)_256': 92, + 'bbox_head.reg_convs.2.conv_(0, 256)_256': 82, + 'bbox_head.reg_convs.3.conv_(0, 256)_256': 117 +} +divisor = 16 + +############################################################################## + +architecture = model +model = dict( + type=GroupFisherDeploySubModel, + architecture=architecture, + fix_subnet=fix_subnet, + divisor=divisor, +) diff --git a/mmrazor/configs/pruning/mmpose/group_fisher/group_fisher_deploy_rtmpose-s_8xb256-420e_aic-coco-256x192.py b/mmrazor/configs/pruning/mmpose/group_fisher/group_fisher_deploy_rtmpose-s_8xb256-420e_aic-coco-256x192.py new file mode 100644 index 00000000..9ae0e361 --- /dev/null +++ b/mmrazor/configs/pruning/mmpose/group_fisher/group_fisher_deploy_rtmpose-s_8xb256-420e_aic-coco-256x192.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +############################################################################# +"""You have to fill these args. + +_base_(str): The path to your pretrain config file. +fix_subnet (Union[dict,str]): The dict store the pruning structure or the + json file including it. +divisor (int): The divisor the make the channel number divisible. +""" +if '_base_': + from mmpose.configs.body_2d_keypoint.rtmpose.coco.rtmpose_s_8xb256_420e_aic_coco_256x192 import * # noqa: E501 + +from mmrazor.implementations.pruning.group_fisher import \ + GroupFisherDeploySubModel + +fix_subnet = { + 'backbone.stem.0.conv_(0, 16)_16': 8, + 'backbone.stem.1.conv_(0, 16)_16': 9, + 'backbone.stem.2.conv_(0, 32)_32': 9, + 'backbone.stage1.0.conv_(0, 64)_64': 32, + 'backbone.stage1.1.short_conv.conv_(0, 32)_32': 30, + 'backbone.stage1.1.main_conv.conv_(0, 32)_32': 29, + 'backbone.stage1.1.blocks.0.conv1.conv_(0, 32)_32': 24, + 'backbone.stage1.1.final_conv.conv_(0, 64)_64': 27, + 'backbone.stage2.0.conv_(0, 128)_128': 62, + 'backbone.stage2.1.short_conv.conv_(0, 64)_64': 63, + 'backbone.stage2.1.main_conv.conv_(0, 64)_64': 64, + 'backbone.stage2.1.blocks.0.conv1.conv_(0, 64)_64': 56, + 'backbone.stage2.1.blocks.1.conv1.conv_(0, 64)_64': 62, + 'backbone.stage2.1.final_conv.conv_(0, 128)_128': 65, + 'backbone.stage3.0.conv_(0, 256)_256': 167, + 'backbone.stage3.1.short_conv.conv_(0, 128)_128': 127, + 'backbone.stage3.1.main_conv.conv_(0, 128)_128': 128, + 'backbone.stage3.1.blocks.0.conv1.conv_(0, 128)_128': 124, + 'backbone.stage3.1.blocks.1.conv1.conv_(0, 128)_128': 123, + 'backbone.stage3.1.final_conv.conv_(0, 256)_256': 172, + 'backbone.stage4.0.conv_(0, 512)_512': 337, + 'backbone.stage4.1.conv1.conv_(0, 256)_256': 256, + 'backbone.stage4.1.conv2.conv_(0, 512)_512': 379, + 'backbone.stage4.2.short_conv.conv_(0, 256)_256': 188, + 'backbone.stage4.2.main_conv.conv_(0, 256)_256': 227, + 'backbone.stage4.2.blocks.0.conv1.conv_(0, 256)_256': 238, + 'backbone.stage4.2.blocks.0.conv2.pointwise_conv.conv_(0, 256)_256': 195, + 'backbone.stage4.2.final_conv.conv_(0, 512)_512': 163 +} +divisor = 8 +############################################################################## + +architecture = model + +model = dict( + type=GroupFisherDeploySubModel, + architecture=architecture, + fix_subnet=fix_subnet, + divisor=divisor, +) diff --git a/setup.cfg b/setup.cfg index f3eceac2..bac5116f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,3 +22,6 @@ default_section = THIRDPARTY skip = *.ipynb quiet-level = 3 ignore-words-list = patten,confectionary,nd,ty,formating + +[flake8] +per-file-ignores = mmrazor/configs/*: F401,F403,F405