# encoding: utf-8 # based on: # https://github.com/zhanghang1989/ResNeSt/blob/master/resnest/torch/resnest.py """ResNeSt models""" import logging import math import torch from torch import nn from fastreid.layers import ( IBN, Non_local, SplAtConv2d, get_norm, ) from fastreid.utils.checkpoint import get_unexpected_parameters_message, get_missing_parameters_message from .build import BACKBONE_REGISTRY _url_format = 'https://hangzh.s3.amazonaws.com/encoding/models/{}-{}.pth' _model_sha256 = {name: checksum for checksum, name in [ ('528c19ca', 'resnest50'), ('22405ba7', 'resnest101'), ('75117900', 'resnest200'), ('0cc87c48', 'resnest269'), ]} def short_hash(name): if name not in _model_sha256: raise ValueError('Pretrained model for {name} is not available.'.format(name=name)) return _model_sha256[name][:8] model_urls = {name: _url_format.format(name, short_hash(name)) for name in _model_sha256.keys() } class Bottleneck(nn.Module): """ResNet Bottleneck """ # pylint: disable=unused-argument expansion = 4 def __init__(self, inplanes, planes, bn_norm, num_splits, with_ibn=False, stride=1, downsample=None, radix=1, cardinality=1, bottleneck_width=64, avd=False, avd_first=False, dilation=1, is_first=False, rectified_conv=False, rectify_avg=False, dropblock_prob=0.0, last_gamma=False): super(Bottleneck, self).__init__() group_width = int(planes * (bottleneck_width / 64.)) * cardinality self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) if with_ibn: self.bn1 = IBN(group_width, bn_norm, num_splits) else: self.bn1 = get_norm(bn_norm, group_width, num_splits) self.dropblock_prob = dropblock_prob self.radix = radix self.avd = avd and (stride > 1 or is_first) self.avd_first = avd_first if self.avd: self.avd_layer = nn.AvgPool2d(3, stride, padding=1) stride = 1 if radix > 1: self.conv2 = SplAtConv2d( group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False, radix=radix, rectify=rectified_conv, rectify_avg=rectify_avg, norm_layer=bn_norm, num_splits=num_splits, dropblock_prob=dropblock_prob) elif rectified_conv: from rfconv import RFConv2d self.conv2 = RFConv2d( group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False, average_mode=rectify_avg) self.bn2 = get_norm(bn_norm, group_width, num_splits) else: self.conv2 = nn.Conv2d( group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False) self.bn2 = get_norm(bn_norm, group_width, num_splits) self.conv3 = nn.Conv2d( group_width, planes * 4, kernel_size=1, bias=False) self.bn3 = get_norm(bn_norm, planes * 4, num_splits) if last_gamma: from torch.nn.init import zeros_ zeros_(self.bn3.weight) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.dilation = dilation self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) if self.dropblock_prob > 0.0: out = self.dropblock1(out) out = self.relu(out) if self.avd and self.avd_first: out = self.avd_layer(out) out = self.conv2(out) if self.radix == 1: out = self.bn2(out) if self.dropblock_prob > 0.0: out = self.dropblock2(out) out = self.relu(out) if self.avd and not self.avd_first: out = self.avd_layer(out) out = self.conv3(out) out = self.bn3(out) if self.dropblock_prob > 0.0: out = self.dropblock3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNest(nn.Module): """ResNet Variants ResNest Parameters ---------- block : Block Class for the residual block. Options are BasicBlockV1, BottleneckV1. layers : list of int Numbers of layers in each block classes : int, default 1000 Number of classification classes. dilated : bool, default False Applying dilation strategy to pretrained ResNet yielding a stride-8 model, typically used in Semantic Segmentation. norm_layer : object Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`; for Synchronized Cross-GPU BachNormalization). Reference: - He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016. - Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions." """ # pylint: disable=unused-variable def __init__(self, last_stride, bn_norm, num_splits, with_ibn, with_nl, block, layers, non_layers, radix=1, groups=1, bottleneck_width=64, dilated=False, dilation=1, deep_stem=False, stem_width=64, avg_down=False, rectified_conv=False, rectify_avg=False, avd=False, avd_first=False, final_drop=0.0, dropblock_prob=0, last_gamma=False): self.cardinality = groups self.bottleneck_width = bottleneck_width # ResNet-D params self.inplanes = stem_width * 2 if deep_stem else 64 self.avg_down = avg_down self.last_gamma = last_gamma # ResNeSt params self.radix = radix self.avd = avd self.avd_first = avd_first super().__init__() self.rectified_conv = rectified_conv self.rectify_avg = rectify_avg if rectified_conv: from rfconv import RFConv2d conv_layer = RFConv2d else: conv_layer = nn.Conv2d conv_kwargs = {'average_mode': rectify_avg} if rectified_conv else {} if deep_stem: self.conv1 = nn.Sequential( conv_layer(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False, **conv_kwargs), get_norm(bn_norm, stem_width, num_splits), nn.ReLU(inplace=True), conv_layer(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs), get_norm(bn_norm, stem_width, num_splits), nn.ReLU(inplace=True), conv_layer(stem_width, stem_width * 2, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs), ) else: self.conv1 = conv_layer(3, 64, kernel_size=7, stride=2, padding=3, bias=False, **conv_kwargs) self.bn1 = get_norm(bn_norm, self.inplanes, num_splits) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], 1, bn_norm, num_splits, with_ibn=with_ibn, is_first=False) self.layer2 = self._make_layer(block, 128, layers[1], 2, bn_norm, num_splits, with_ibn=with_ibn) if dilated or dilation == 4: self.layer3 = self._make_layer(block, 256, layers[2], 1, bn_norm, num_splits, with_ibn=with_ibn, dilation=2, dropblock_prob=dropblock_prob) self.layer4 = self._make_layer(block, 512, layers[3], 1, bn_norm, num_splits, with_ibn=with_ibn, dilation=4, dropblock_prob=dropblock_prob) elif dilation == 2: self.layer3 = self._make_layer(block, 256, layers[2], 2, bn_norm, num_splits, with_ibn=with_ibn, dilation=1, dropblock_prob=dropblock_prob) self.layer4 = self._make_layer(block, 512, layers[3], 1, bn_norm, num_splits, with_ibn=with_ibn, dilation=2, dropblock_prob=dropblock_prob) else: self.layer3 = self._make_layer(block, 256, layers[2], 2, bn_norm, num_splits, with_ibn=with_ibn, dropblock_prob=dropblock_prob) self.layer4 = self._make_layer(block, 512, layers[3], last_stride, bn_norm, num_splits, with_ibn=with_ibn, dropblock_prob=dropblock_prob) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() if with_nl: self._build_nonlocal(layers, non_layers, bn_norm, num_splits) else: self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = [] def _make_layer(self, block, planes, blocks, stride=1, bn_norm="BN", num_splits=1, with_ibn=False, dilation=1, dropblock_prob=0.0, is_first=True): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: down_layers = [] if self.avg_down: if dilation == 1: down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) else: down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False)) down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=1, bias=False)) else: down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False)) down_layers.append(get_norm(bn_norm, planes * block.expansion, num_splits)) downsample = nn.Sequential(*down_layers) layers = [] if planes == 512: with_ibn = False if dilation == 1 or dilation == 2: layers.append(block(self.inplanes, planes, bn_norm, num_splits, with_ibn, stride, downsample=downsample, radix=self.radix, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, avd_first=self.avd_first, dilation=1, is_first=is_first, rectified_conv=self.rectified_conv, rectify_avg=self.rectify_avg, dropblock_prob=dropblock_prob, last_gamma=self.last_gamma)) elif dilation == 4: layers.append(block(self.inplanes, planes, bn_norm, num_splits, with_ibn, stride, downsample=downsample, radix=self.radix, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, avd_first=self.avd_first, dilation=2, is_first=is_first, rectified_conv=self.rectified_conv, rectify_avg=self.rectify_avg, dropblock_prob=dropblock_prob, last_gamma=self.last_gamma)) else: raise RuntimeError("=> unknown dilation size: {}".format(dilation)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, bn_norm, num_splits, with_ibn, radix=self.radix, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, avd_first=self.avd_first, dilation=dilation, rectified_conv=self.rectified_conv, rectify_avg=self.rectify_avg, dropblock_prob=dropblock_prob, last_gamma=self.last_gamma)) return nn.Sequential(*layers) def _build_nonlocal(self, layers, non_layers, bn_norm, num_splits): self.NL_1 = nn.ModuleList( [Non_local(256, bn_norm, num_splits) for _ in range(non_layers[0])]) self.NL_1_idx = sorted([layers[0] - (i + 1) for i in range(non_layers[0])]) self.NL_2 = nn.ModuleList( [Non_local(512, bn_norm, num_splits) for _ in range(non_layers[1])]) self.NL_2_idx = sorted([layers[1] - (i + 1) for i in range(non_layers[1])]) self.NL_3 = nn.ModuleList( [Non_local(1024, bn_norm, num_splits) for _ in range(non_layers[2])]) self.NL_3_idx = sorted([layers[2] - (i + 1) for i in range(non_layers[2])]) self.NL_4 = nn.ModuleList( [Non_local(2048, bn_norm, num_splits) for _ in range(non_layers[3])]) self.NL_4_idx = sorted([layers[3] - (i + 1) for i in range(non_layers[3])]) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) NL1_counter = 0 if len(self.NL_1_idx) == 0: self.NL_1_idx = [-1] for i in range(len(self.layer1)): x = self.layer1[i](x) if i == self.NL_1_idx[NL1_counter]: _, C, H, W = x.shape x = self.NL_1[NL1_counter](x) NL1_counter += 1 # Layer 2 NL2_counter = 0 if len(self.NL_2_idx) == 0: self.NL_2_idx = [-1] for i in range(len(self.layer2)): x = self.layer2[i](x) if i == self.NL_2_idx[NL2_counter]: _, C, H, W = x.shape x = self.NL_2[NL2_counter](x) NL2_counter += 1 # Layer 3 NL3_counter = 0 if len(self.NL_3_idx) == 0: self.NL_3_idx = [-1] for i in range(len(self.layer3)): x = self.layer3[i](x) if i == self.NL_3_idx[NL3_counter]: _, C, H, W = x.shape x = self.NL_3[NL3_counter](x) NL3_counter += 1 # Layer 4 NL4_counter = 0 if len(self.NL_4_idx) == 0: self.NL_4_idx = [-1] for i in range(len(self.layer4)): x = self.layer4[i](x) if i == self.NL_4_idx[NL4_counter]: _, C, H, W = x.shape x = self.NL_4[NL4_counter](x) NL4_counter += 1 return x @BACKBONE_REGISTRY.register() def build_resnest_backbone(cfg): """ Create a ResNest instance from config. Returns: ResNet: a :class:`ResNet` instance. """ # fmt: off pretrain = cfg.MODEL.BACKBONE.PRETRAIN last_stride = cfg.MODEL.BACKBONE.LAST_STRIDE bn_norm = cfg.MODEL.BACKBONE.NORM num_splits = cfg.MODEL.BACKBONE.NORM_SPLIT with_ibn = cfg.MODEL.BACKBONE.WITH_IBN with_se = cfg.MODEL.BACKBONE.WITH_SE with_nl = cfg.MODEL.BACKBONE.WITH_NL depth = cfg.MODEL.BACKBONE.DEPTH num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 200: [3, 24, 36, 3], 269: [3, 30, 48, 8]}[depth] nl_layers_per_stage = {50: [0, 2, 3, 0], 101: [0, 2, 3, 0]}[depth] stem_width = {50: 32, 101: 64, 200: 64, 269: 64}[depth] model = ResNest(last_stride, bn_norm, num_splits, with_ibn, with_nl, Bottleneck, num_blocks_per_stage, nl_layers_per_stage, radix=2, groups=1, bottleneck_width=64, deep_stem=True, stem_width=stem_width, avg_down=True, avd=True, avd_first=False) if pretrain: # if not with_ibn: # original resnet state_dict = torch.hub.load_state_dict_from_url( model_urls['resnest' + str(depth)], progress=True, check_hash=True) # else: # raise KeyError('Not implementation ibn in resnest') # # ibn resnet # state_dict = torch.load(pretrain_path)['state_dict'] # # remove module in name # new_state_dict = {} # for k in state_dict: # new_k = '.'.join(k.split('.')[1:]) # if new_k in model.state_dict() and (model.state_dict()[new_k].shape == state_dict[k].shape): # new_state_dict[new_k] = state_dict[k] # state_dict = new_state_dict incompatible = model.load_state_dict(state_dict, strict=False) logger = logging.getLogger(__name__) if incompatible.missing_keys: logger.info( get_missing_parameters_message(incompatible.missing_keys) ) if incompatible.unexpected_keys: logger.info( get_unexpected_parameters_message(incompatible.unexpected_keys) ) return model