mmsegmentation/mmseg/models/utils/ckpt_convert.py
Ze Liu b6c7c77a08
[WIP] Add Swin Transformer (#511)
* add Swin Transformer

* add Swin Transformer

* fixed import

* Add some swin training settings.

* Fix some filename error.

* Fix attribute name: pretrain -> pretrained

* Upload mmcls implementation of swin transformer.

* Refactor Swin Transformer to follow mmcls style.

* Refactor init_weigths of swin_transformer.py

* Fix lint

* Match inference precision

* Add some comments

* Add swin_convert to load official style ckpt

* Remove arg: auto_pad

* 1. Complete comments for each block;

2. Correct weight convert function;

3. Fix the pad of Patch Merging;

* Clean function args.

* Fix vit unit test.

* 1. Add swin transformer unit tests;

2. Fix some pad bug;

3. Modify config to adapt new swin implementation;

* Modify config arg

* Update readme.md of swin

* Fix config arg error and Add some swin benchmark msg.

* Add MeM and ms test content for readme.md of swin transformer.

* Fix doc string of swin module

* 1. Register swin transformer to model list;

2. Modify pth url which keep meta attribute;

* Update swin.py

* Merge config settings.

* Modify config style.

* Update README.md

Add ViT link

* Modify main readme.md

Co-authored-by: Jiarui XU <xvjiarui0826@gmail.com>
Co-authored-by: sennnnn <201730271412@mail.scut.edu.cn>
Co-authored-by: Junjun2016 <hejunjun@sjtu.edu.cn>
2021-07-01 23:41:55 +08:00

91 lines
2.8 KiB
Python

from collections import OrderedDict
def swin_convert(ckpt):
new_ckpt = OrderedDict()
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1,
2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
for k, v in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('layers'):
new_v = v
if 'attn.' in k:
new_k = k.replace('attn.', 'attn.w_msa.')
elif 'mlp.' in k:
if 'mlp.fc1.' in k:
new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
elif 'mlp.fc2.' in k:
new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
else:
new_k = k.replace('mlp.', 'ffn.')
elif 'downsample' in k:
new_k = k
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
else:
new_k = k
new_k = new_k.replace('layers', 'stages', 1)
elif k.startswith('patch_embed'):
new_v = v
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
else:
new_v = v
new_k = k
new_ckpt[new_k] = new_v
return new_ckpt
def vit_convert(ckpt):
new_ckpt = OrderedDict()
for k, v in ckpt.items():
if k.startswith('head'):
continue
if k.startswith('norm'):
new_k = k.replace('norm.', 'ln1.')
elif k.startswith('patch_embed'):
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
elif k.startswith('blocks'):
if 'norm' in k:
new_k = k.replace('norm', 'ln')
elif 'mlp.fc1' in k:
new_k = k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in k:
new_k = k.replace('mlp.fc2', 'ffn.layers.1')
elif 'attn.qkv' in k:
new_k = k.replace('attn.qkv.', 'attn.attn.in_proj_')
elif 'attn.proj' in k:
new_k = k.replace('attn.proj', 'attn.attn.out_proj')
else:
new_k = k
new_k = new_k.replace('blocks.', 'layers.')
else:
new_k = k
new_ckpt[new_k] = v
return new_ckpt