yolov7/tools/reparameterization.ipynb
Akash A Desai 09d6293f32
updated Reparameterization weight path & added steps for doing Reparameterization ,also fixed few typos (#552)
* Update reparameterization.ipynb

* Update reparameterization.ipynb

* Update reparameterization.ipynb

* Update reparameterization.ipynb

* Update reparameterization.ipynb

* Add files via upload

* Update userdata.sh

fix typo of yolov7
2022-09-12 17:22:26 +03:00

539 lines
31 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"id": "d7cbe5ee",
"metadata": {},
"source": [
"# Reparameterization"
]
},
{
"cell_type": "markdown",
"id": "9725e211",
"metadata": {},
"source": [
"\n",
"### What is Reparameterization ?\n",
"Reparameterization is used to reduce trainable BoF modules into deploy model for fast inference. For example merge BN to conv, merge YOLOR to conv, ..etc\n",
"However, before reparameterization, the model has more parameters and computation cost.reparameterized model (cfg/deploy) used for deployment purpose\n",
"\n",
"\n",
"\n",
"### Steps required for model conversion.\n",
"1.train custom model & you will get your own weight i.e custom_weight.pt / use (pretrained weight which is available i.e yolov7_traing.pt)\n",
"\n",
"2.Converting this weight using Reparameterization method.\n",
"\n",
"3.Trained model (cfg/training) and reparameterized model (cfg/deploy) will get same prediction results.\n",
"However, before reparameterization, the model has more parameters and computation cost.\n",
"\n",
"4.Convert reparameterized weight into onnx & tensorrt\n",
"For faster inference & deployment purpose."
]
},
{
"cell_type": "markdown",
"id": "13393b70",
"metadata": {},
"source": [
"## YOLOv7 reparameterization"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf53becf",
"metadata": {},
"outputs": [],
"source": [
"# import\n",
"from copy import deepcopy\n",
"from models.yolo import Model\n",
"import torch\n",
"from utils.torch_utils import select_device, is_parallel\n",
"import yaml\n",
"\n",
"device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7_training.pt', map_location=device)\n",
"# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7.yaml', ch=3, nc=80).to(device)\n",
"\n",
"with open('cfg/deploy/yolov7.yaml') as f:\n",
" yml = yaml.load(f, Loader=yaml.SafeLoader)\n",
"anchors = len(yml['anchors'][0]) // 2\n",
"\n",
"# copy intersect weights\n",
"state_dict = ckpt['model'].float().state_dict()\n",
"exclude = []\n",
"intersect_state_dict = {k: v for k, v in state_dict.items() if k in model.state_dict() and not any(x in k for x in exclude) and v.shape == model.state_dict()[k].shape}\n",
"model.load_state_dict(intersect_state_dict, strict=False)\n",
"model.names = ckpt['model'].names\n",
"model.nc = ckpt['model'].nc\n",
"\n",
"# reparametrized YOLOR\n",
"for i in range((model.nc+5)*anchors):\n",
" model.state_dict()['model.105.m.0.weight'].data[i, :, :, :] *= state_dict['model.105.im.0.implicit'].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.105.m.1.weight'].data[i, :, :, :] *= state_dict['model.105.im.1.implicit'].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.105.m.2.weight'].data[i, :, :, :] *= state_dict['model.105.im.2.implicit'].data[:, i, : :].squeeze()\n",
"model.state_dict()['model.105.m.0.bias'].data += state_dict['model.105.m.0.weight'].mul(state_dict['model.105.ia.0.implicit']).sum(1).squeeze()\n",
"model.state_dict()['model.105.m.1.bias'].data += state_dict['model.105.m.1.weight'].mul(state_dict['model.105.ia.1.implicit']).sum(1).squeeze()\n",
"model.state_dict()['model.105.m.2.bias'].data += state_dict['model.105.m.2.weight'].mul(state_dict['model.105.ia.2.implicit']).sum(1).squeeze()\n",
"model.state_dict()['model.105.m.0.bias'].data *= state_dict['model.105.im.0.implicit'].data.squeeze()\n",
"model.state_dict()['model.105.m.1.bias'].data *= state_dict['model.105.im.1.implicit'].data.squeeze()\n",
"model.state_dict()['model.105.m.2.bias'].data *= state_dict['model.105.im.2.implicit'].data.squeeze()\n",
"\n",
"# model to be saved\n",
"ckpt = {'model': deepcopy(model.module if is_parallel(model) else model).half(),\n",
" 'optimizer': None,\n",
" 'training_results': None,\n",
" 'epoch': -1}\n",
"\n",
"# save reparameterized model\n",
"torch.save(ckpt, 'cfg/deploy/yolov7.pt')\n"
]
},
{
"cell_type": "markdown",
"id": "5b396a53",
"metadata": {},
"source": [
"## YOLOv7x reparameterization"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9d54d17f",
"metadata": {},
"outputs": [],
"source": [
"# import\n",
"from copy import deepcopy\n",
"from models.yolo import Model\n",
"import torch\n",
"from utils.torch_utils import select_device, is_parallel\n",
"import yaml\n",
"\n",
"device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7x_trainig.pt', map_location=device)\n",
"# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7x.yaml', ch=3, nc=80).to(device)\n",
"\n",
"with open('cfg/deploy/yolov7x.yaml') as f:\n",
" yml = yaml.load(f, Loader=yaml.SafeLoader)\n",
"anchors = len(yml['anchors'][0]) // 2\n",
"\n",
"# copy intersect weights\n",
"state_dict = ckpt['model'].float().state_dict()\n",
"exclude = []\n",
"intersect_state_dict = {k: v for k, v in state_dict.items() if k in model.state_dict() and not any(x in k for x in exclude) and v.shape == model.state_dict()[k].shape}\n",
"model.load_state_dict(intersect_state_dict, strict=False)\n",
"model.names = ckpt['model'].names\n",
"model.nc = ckpt['model'].nc\n",
"\n",
"# reparametrized YOLOR\n",
"for i in range((model.nc+5)*anchors):\n",
" model.state_dict()['model.121.m.0.weight'].data[i, :, :, :] *= state_dict['model.121.im.0.implicit'].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.121.m.1.weight'].data[i, :, :, :] *= state_dict['model.121.im.1.implicit'].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.121.m.2.weight'].data[i, :, :, :] *= state_dict['model.121.im.2.implicit'].data[:, i, : :].squeeze()\n",
"model.state_dict()['model.121.m.0.bias'].data += state_dict['model.121.m.0.weight'].mul(state_dict['model.121.ia.0.implicit']).sum(1).squeeze()\n",
"model.state_dict()['model.121.m.1.bias'].data += state_dict['model.121.m.1.weight'].mul(state_dict['model.121.ia.1.implicit']).sum(1).squeeze()\n",
"model.state_dict()['model.121.m.2.bias'].data += state_dict['model.121.m.2.weight'].mul(state_dict['model.121.ia.2.implicit']).sum(1).squeeze()\n",
"model.state_dict()['model.121.m.0.bias'].data *= state_dict['model.121.im.0.implicit'].data.squeeze()\n",
"model.state_dict()['model.121.m.1.bias'].data *= state_dict['model.121.im.1.implicit'].data.squeeze()\n",
"model.state_dict()['model.121.m.2.bias'].data *= state_dict['model.121.im.2.implicit'].data.squeeze()\n",
"\n",
"# model to be saved\n",
"ckpt = {'model': deepcopy(model.module if is_parallel(model) else model).half(),\n",
" 'optimizer': None,\n",
" 'training_results': None,\n",
" 'epoch': -1}\n",
"\n",
"# save reparameterized model\n",
"torch.save(ckpt, 'cfg/deploy/yolov7x.pt')\n"
]
},
{
"cell_type": "markdown",
"id": "11a9108e",
"metadata": {},
"source": [
"## YOLOv7-W6 reparameterization"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d032c629",
"metadata": {},
"outputs": [],
"source": [
"# import\n",
"from copy import deepcopy\n",
"from models.yolo import Model\n",
"import torch\n",
"from utils.torch_utils import select_device, is_parallel\n",
"import yaml\n",
"\n",
"device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-w6_trainig.pt', map_location=device)\n",
"# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-w6.yaml', ch=3, nc=80).to(device)\n",
"\n",
"with open('cfg/deploy/yolov7-w6.yaml') as f:\n",
" yml = yaml.load(f, Loader=yaml.SafeLoader)\n",
"anchors = len(yml['anchors'][0]) // 2\n",
"\n",
"# copy intersect weights\n",
"state_dict = ckpt['model'].float().state_dict()\n",
"exclude = []\n",
"intersect_state_dict = {k: v for k, v in state_dict.items() if k in model.state_dict() and not any(x in k for x in exclude) and v.shape == model.state_dict()[k].shape}\n",
"model.load_state_dict(intersect_state_dict, strict=False)\n",
"model.names = ckpt['model'].names\n",
"model.nc = ckpt['model'].nc\n",
"\n",
"idx = 118\n",
"idx2 = 122\n",
"\n",
"# copy weights of lead head\n",
"model.state_dict()['model.{}.m.0.weight'.format(idx)].data -= model.state_dict()['model.{}.m.0.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.1.weight'.format(idx)].data -= model.state_dict()['model.{}.m.1.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.2.weight'.format(idx)].data -= model.state_dict()['model.{}.m.2.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.3.weight'.format(idx)].data -= model.state_dict()['model.{}.m.3.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.0.weight'.format(idx)].data += state_dict['model.{}.m.0.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.1.weight'.format(idx)].data += state_dict['model.{}.m.1.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.2.weight'.format(idx)].data += state_dict['model.{}.m.2.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.3.weight'.format(idx)].data += state_dict['model.{}.m.3.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data -= model.state_dict()['model.{}.m.0.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data -= model.state_dict()['model.{}.m.1.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data -= model.state_dict()['model.{}.m.2.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data -= model.state_dict()['model.{}.m.3.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data += state_dict['model.{}.m.0.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data += state_dict['model.{}.m.1.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data += state_dict['model.{}.m.2.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data += state_dict['model.{}.m.3.bias'.format(idx2)].data\n",
"\n",
"# reparametrized YOLOR\n",
"for i in range((model.nc+5)*anchors):\n",
" model.state_dict()['model.{}.m.0.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.0.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.1.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.1.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.2.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.2.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.3.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.3.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data += state_dict['model.{}.m.0.weight'.format(idx2)].mul(state_dict['model.{}.ia.0.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data += state_dict['model.{}.m.1.weight'.format(idx2)].mul(state_dict['model.{}.ia.1.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data += state_dict['model.{}.m.2.weight'.format(idx2)].mul(state_dict['model.{}.ia.2.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data += state_dict['model.{}.m.3.weight'.format(idx2)].mul(state_dict['model.{}.ia.3.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data *= state_dict['model.{}.im.0.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data *= state_dict['model.{}.im.1.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data *= state_dict['model.{}.im.2.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data *= state_dict['model.{}.im.3.implicit'.format(idx2)].data.squeeze()\n",
"\n",
"# model to be saved\n",
"ckpt = {'model': deepcopy(model.module if is_parallel(model) else model).half(),\n",
" 'optimizer': None,\n",
" 'training_results': None,\n",
" 'epoch': -1}\n",
"\n",
"# save reparameterized model\n",
"torch.save(ckpt, 'cfg/deploy/yolov7-w6.pt')\n"
]
},
{
"cell_type": "markdown",
"id": "5f093d43",
"metadata": {},
"source": [
"## YOLOv7-E6 reparameterization"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aa2b2142",
"metadata": {},
"outputs": [],
"source": [
"# import\n",
"from copy import deepcopy\n",
"from models.yolo import Model\n",
"import torch\n",
"from utils.torch_utils import select_device, is_parallel\n",
"import yaml\n",
"\n",
"device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-e6.pt', map_location=device)\n",
"# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-e6.yaml', ch=3, nc=80).to(device)\n",
"\n",
"with open('cfg/deploy/yolov7-e6.yaml') as f:\n",
" yml = yaml.load(f, Loader=yaml.SafeLoader)\n",
"anchors = len(yml['anchors'][0]) // 2\n",
"\n",
"# copy intersect weights\n",
"state_dict = ckpt['model'].float().state_dict()\n",
"exclude = []\n",
"intersect_state_dict = {k: v for k, v in state_dict.items() if k in model.state_dict() and not any(x in k for x in exclude) and v.shape == model.state_dict()[k].shape}\n",
"model.load_state_dict(intersect_state_dict, strict=False)\n",
"model.names = ckpt['model'].names\n",
"model.nc = ckpt['model'].nc\n",
"\n",
"idx = 140\n",
"idx2 = 144\n",
"\n",
"# copy weights of lead head\n",
"model.state_dict()['model.{}.m.0.weight'.format(idx)].data -= model.state_dict()['model.{}.m.0.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.1.weight'.format(idx)].data -= model.state_dict()['model.{}.m.1.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.2.weight'.format(idx)].data -= model.state_dict()['model.{}.m.2.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.3.weight'.format(idx)].data -= model.state_dict()['model.{}.m.3.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.0.weight'.format(idx)].data += state_dict['model.{}.m.0.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.1.weight'.format(idx)].data += state_dict['model.{}.m.1.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.2.weight'.format(idx)].data += state_dict['model.{}.m.2.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.3.weight'.format(idx)].data += state_dict['model.{}.m.3.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data -= model.state_dict()['model.{}.m.0.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data -= model.state_dict()['model.{}.m.1.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data -= model.state_dict()['model.{}.m.2.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data -= model.state_dict()['model.{}.m.3.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data += state_dict['model.{}.m.0.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data += state_dict['model.{}.m.1.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data += state_dict['model.{}.m.2.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data += state_dict['model.{}.m.3.bias'.format(idx2)].data\n",
"\n",
"# reparametrized YOLOR\n",
"for i in range((model.nc+5)*anchors):\n",
" model.state_dict()['model.{}.m.0.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.0.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.1.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.1.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.2.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.2.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.3.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.3.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data += state_dict['model.{}.m.0.weight'.format(idx2)].mul(state_dict['model.{}.ia.0.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data += state_dict['model.{}.m.1.weight'.format(idx2)].mul(state_dict['model.{}.ia.1.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data += state_dict['model.{}.m.2.weight'.format(idx2)].mul(state_dict['model.{}.ia.2.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data += state_dict['model.{}.m.3.weight'.format(idx2)].mul(state_dict['model.{}.ia.3.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data *= state_dict['model.{}.im.0.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data *= state_dict['model.{}.im.1.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data *= state_dict['model.{}.im.2.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data *= state_dict['model.{}.im.3.implicit'.format(idx2)].data.squeeze()\n",
"\n",
"# model to be saved\n",
"ckpt = {'model': deepcopy(model.module if is_parallel(model) else model).half(),\n",
" 'optimizer': None,\n",
" 'training_results': None,\n",
" 'epoch': -1}\n",
"\n",
"# save reparameterized model\n",
"torch.save(ckpt, 'cfg/deploy/yolov7-e6.pt')\n"
]
},
{
"cell_type": "markdown",
"id": "a3bccf89",
"metadata": {},
"source": [
"## YOLOv7-D6 reparameterization"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e5216b70",
"metadata": {},
"outputs": [],
"source": [
"# import\n",
"from copy import deepcopy\n",
"from models.yolo import Model\n",
"import torch\n",
"from utils.torch_utils import select_device, is_parallel\n",
"import yaml\n",
"\n",
"device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-d6_trainig.pt', map_location=device)\n",
"# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-d6.yaml', ch=3, nc=80).to(device)\n",
"\n",
"with open('cfg/deploy/yolov7-d6.yaml') as f:\n",
" yml = yaml.load(f, Loader=yaml.SafeLoader)\n",
"anchors = len(yml['anchors'][0]) // 2\n",
"\n",
"# copy intersect weights\n",
"state_dict = ckpt['model'].float().state_dict()\n",
"exclude = []\n",
"intersect_state_dict = {k: v for k, v in state_dict.items() if k in model.state_dict() and not any(x in k for x in exclude) and v.shape == model.state_dict()[k].shape}\n",
"model.load_state_dict(intersect_state_dict, strict=False)\n",
"model.names = ckpt['model'].names\n",
"model.nc = ckpt['model'].nc\n",
"\n",
"idx = 162\n",
"idx2 = 166\n",
"\n",
"# copy weights of lead head\n",
"model.state_dict()['model.{}.m.0.weight'.format(idx)].data -= model.state_dict()['model.{}.m.0.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.1.weight'.format(idx)].data -= model.state_dict()['model.{}.m.1.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.2.weight'.format(idx)].data -= model.state_dict()['model.{}.m.2.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.3.weight'.format(idx)].data -= model.state_dict()['model.{}.m.3.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.0.weight'.format(idx)].data += state_dict['model.{}.m.0.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.1.weight'.format(idx)].data += state_dict['model.{}.m.1.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.2.weight'.format(idx)].data += state_dict['model.{}.m.2.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.3.weight'.format(idx)].data += state_dict['model.{}.m.3.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data -= model.state_dict()['model.{}.m.0.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data -= model.state_dict()['model.{}.m.1.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data -= model.state_dict()['model.{}.m.2.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data -= model.state_dict()['model.{}.m.3.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data += state_dict['model.{}.m.0.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data += state_dict['model.{}.m.1.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data += state_dict['model.{}.m.2.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data += state_dict['model.{}.m.3.bias'.format(idx2)].data\n",
"\n",
"# reparametrized YOLOR\n",
"for i in range((model.nc+5)*anchors):\n",
" model.state_dict()['model.{}.m.0.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.0.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.1.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.1.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.2.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.2.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.3.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.3.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data += state_dict['model.{}.m.0.weight'.format(idx2)].mul(state_dict['model.{}.ia.0.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data += state_dict['model.{}.m.1.weight'.format(idx2)].mul(state_dict['model.{}.ia.1.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data += state_dict['model.{}.m.2.weight'.format(idx2)].mul(state_dict['model.{}.ia.2.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data += state_dict['model.{}.m.3.weight'.format(idx2)].mul(state_dict['model.{}.ia.3.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data *= state_dict['model.{}.im.0.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data *= state_dict['model.{}.im.1.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data *= state_dict['model.{}.im.2.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data *= state_dict['model.{}.im.3.implicit'.format(idx2)].data.squeeze()\n",
"\n",
"# model to be saved\n",
"ckpt = {'model': deepcopy(model.module if is_parallel(model) else model).half(),\n",
" 'optimizer': None,\n",
" 'training_results': None,\n",
" 'epoch': -1}\n",
"\n",
"# save reparameterized model\n",
"torch.save(ckpt, 'cfg/deploy/yolov7-d6.pt')\n"
]
},
{
"cell_type": "markdown",
"id": "334c273b",
"metadata": {},
"source": [
"## YOLOv7-E6E reparameterization"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "635fd8d2",
"metadata": {},
"outputs": [],
"source": [
"# import\n",
"from copy import deepcopy\n",
"from models.yolo import Model\n",
"import torch\n",
"from utils.torch_utils import select_device, is_parallel\n",
"import yaml\n",
"\n",
"device = select_device('0', batch_size=1)\n",
"# model trained by cfg/training/*.yaml\n",
"ckpt = torch.load('cfg/training/yolov7-e6e_trainig.pt', map_location=device)\n",
"# reparameterized model in cfg/deploy/*.yaml\n",
"model = Model('cfg/deploy/yolov7-e6e.yaml', ch=3, nc=80).to(device)\n",
"\n",
"with open('cfg/deploy/yolov7-e6e.yaml') as f:\n",
" yml = yaml.load(f, Loader=yaml.SafeLoader)\n",
"anchors = len(yml['anchors'][0]) // 2)\n",
"\n",
"# copy intersect weights\n",
"state_dict = ckpt['model'].float().state_dict()\n",
"exclude = []\n",
"intersect_state_dict = {k: v for k, v in state_dict.items() if k in model.state_dict() and not any(x in k for x in exclude) and v.shape == model.state_dict()[k].shape}\n",
"model.load_state_dict(intersect_state_dict, strict=False)\n",
"model.names = ckpt['model'].names\n",
"model.nc = ckpt['model'].nc\n",
"\n",
"idx = 261\n",
"idx2 = 265\n",
"\n",
"# copy weights of lead head\n",
"model.state_dict()['model.{}.m.0.weight'.format(idx)].data -= model.state_dict()['model.{}.m.0.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.1.weight'.format(idx)].data -= model.state_dict()['model.{}.m.1.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.2.weight'.format(idx)].data -= model.state_dict()['model.{}.m.2.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.3.weight'.format(idx)].data -= model.state_dict()['model.{}.m.3.weight'.format(idx)].data\n",
"model.state_dict()['model.{}.m.0.weight'.format(idx)].data += state_dict['model.{}.m.0.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.1.weight'.format(idx)].data += state_dict['model.{}.m.1.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.2.weight'.format(idx)].data += state_dict['model.{}.m.2.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.3.weight'.format(idx)].data += state_dict['model.{}.m.3.weight'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data -= model.state_dict()['model.{}.m.0.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data -= model.state_dict()['model.{}.m.1.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data -= model.state_dict()['model.{}.m.2.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data -= model.state_dict()['model.{}.m.3.bias'.format(idx)].data\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data += state_dict['model.{}.m.0.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data += state_dict['model.{}.m.1.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data += state_dict['model.{}.m.2.bias'.format(idx2)].data\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data += state_dict['model.{}.m.3.bias'.format(idx2)].data\n",
"\n",
"# reparametrized YOLOR\n",
"for i in range((model.nc+5)*anchors):\n",
" model.state_dict()['model.{}.m.0.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.0.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.1.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.1.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.2.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.2.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
" model.state_dict()['model.{}.m.3.weight'.format(idx)].data[i, :, :, :] *= state_dict['model.{}.im.3.implicit'.format(idx2)].data[:, i, : :].squeeze()\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data += state_dict['model.{}.m.0.weight'.format(idx2)].mul(state_dict['model.{}.ia.0.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data += state_dict['model.{}.m.1.weight'.format(idx2)].mul(state_dict['model.{}.ia.1.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data += state_dict['model.{}.m.2.weight'.format(idx2)].mul(state_dict['model.{}.ia.2.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data += state_dict['model.{}.m.3.weight'.format(idx2)].mul(state_dict['model.{}.ia.3.implicit'.format(idx2)]).sum(1).squeeze()\n",
"model.state_dict()['model.{}.m.0.bias'.format(idx)].data *= state_dict['model.{}.im.0.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.1.bias'.format(idx)].data *= state_dict['model.{}.im.1.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.2.bias'.format(idx)].data *= state_dict['model.{}.im.2.implicit'.format(idx2)].data.squeeze()\n",
"model.state_dict()['model.{}.m.3.bias'.format(idx)].data *= state_dict['model.{}.im.3.implicit'.format(idx2)].data.squeeze()\n",
"\n",
"# model to be saved\n",
"ckpt = {'model': deepcopy(model.module if is_parallel(model) else model).half(),\n",
" 'optimizer': None,\n",
" 'training_results': None,\n",
" 'epoch': -1}\n",
"\n",
"# save reparameterized model\n",
"torch.save(ckpt, 'cfg/deploy/yolov7-e6e.pt')\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "63a62625",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.7"
},
"vscode": {
"interpreter": {
"hash": "73080970ff6fd25f9fcdf9c6f9e85b950a97864bb936ee53fb633f473cbfae4b"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}