mmdeploy/tests/test_codebase/test_mmdet/test_mmdet_models.py

1661 lines
55 KiB
Python
Raw Normal View History

2021-11-30 15:00:37 +08:00
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
import random
from typing import Dict, List
import mmcv
import numpy as np
import pytest
import torch
from mmdeploy.codebase import import_codebase
from mmdeploy.utils import Backend, Codebase
Dev v0.4.0 (#301) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * remove roi_align plugin for ORT (#258) * remove roi_align plugin * remove ut * skip single_roi_extractor UT for ORT in CI * move align to symbolic and update docs * recover UT * resolve comments * [Enhancement]: Support fcn_unet deployment with dynamic shape (#251) * support mmseg fcn+unet dynamic shape * add test * fix ci * fix units * resolve comments * [Enhancement] fix-cmake-relocatable (#223) * require user to specify xxx_dir * fix line ending * fix end-of-file-fixer * try to fix ld cudart cublas * add ENV var search * fix CMAKE_CUDA_COMPILER * cpu, cuda should all work well * remove commented code * fix ncnn example find ncnn package (#282) * table format is wrong (#283) * update pre-commit (#284) * update pre-commit * fix clang-format * fix mmseg config (#281) * fix mmseg config * fix mmpose evaluate outputs * fix lint * update pre-commit config * fix lint * Revert "update pre-commit config" This reverts commit c3fd71611f0b79dfa9ad73fc0f4555c1b3563665. * miss code symbol (#296) * refactor cmake build (#295) * add-mmpose-sdk (#259) * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * add-mmpose-codebase * fix ci * fix img_shape after TopDownAffine * rename TopDown module -> XheadDecode & implement regression decode * align keypoints_from_heatmap * remove hardcode keypoint_head, need refactor, current only support topdown config * add mmpose python api * update mmpose-python code * can't clip fake box * fix rebase error * fix rebase error * link mspn decoder to base decoder * fix ci * compile with gcc7.5 * remove no use code * fix * fix prompt * remove unnecessary cv::parallel_for_ * rewrite TopdownHeatmapMultiStageHead.inference_model * add comment * add more detail docstring why use _cs2xyxy in sdk backend * fix Registry name * remove no use param & add comment of output result Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> * update faq about WinError 1455 (#297) * update faq about WinError 1455 * Update faq.md * Update faq.md * fix ci Co-authored-by: chenxin2 <chenxin2@sensetime.com> * [Feature]Support centerpoint (#252) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * support for centerpoint * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * add docstring and dcn model support * add centerpoint ut and docs * add config and fix input rank * fix merge error * fix a bug * fix comment * [Doc] update benchmark add supported-model-list (#286) * update benchmark add supported-model-list * fix lint * fix lint * loc mmocr maximum version * fix ut Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: Chen Xin <xinchen.tju@gmail.com> Co-authored-by: chenxin2 <chenxin2@sensetime.com>
2022-04-01 18:14:23 +08:00
from mmdeploy.utils.config_utils import get_ir_config
from mmdeploy.utils.test import (WrapModel, check_backend, get_model_outputs,
get_rewrite_outputs)
import_codebase(Codebase.MMDET)
def seed_everything(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = False
def convert_to_list(rewrite_output: Dict, output_names: List[str]) -> List:
"""Converts output from a dictionary to a list.
The new list will contain only those output values, whose names are in list
'output_names'.
"""
outputs = [
value for name, value in rewrite_output.items() if name in output_names
]
return outputs
def get_anchor_head_model():
"""AnchorHead Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import AnchorHead
model = AnchorHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_ssd_head_model():
"""SSDHead Config."""
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
from mmdet.models import SSDHead
model = SSDHead(
in_channels=(96, 1280, 512, 256, 256, 128),
num_classes=4,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='Normal', layer='Conv2d', std=0.001),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_fcos_head_model():
"""FCOS Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import FCOSHead
model = FCOSHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_focus_backbone_model():
"""Backbone Focus Config."""
from mmdet.models.backbones.csp_darknet import Focus
model = Focus(3, 32)
model.requires_grad_(False)
return model
def get_l2norm_forward_model():
"""L2Norm Neck Config."""
from mmdet.models.necks.ssd_neck import L2Norm
model = L2Norm(16)
2022-03-14 14:45:42 +08:00
torch.nn.init.uniform_(model.weight)
model.requires_grad_(False)
return model
def get_rpn_head_model():
"""RPN Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
[Refactor] Refactor codebase (#220) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com>
2021-11-25 09:57:05 +08:00
nms_pre=0,
max_per_img=100,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0))
from mmdet.models.dense_heads import RPNHead
model = RPNHead(in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_reppoints_head_model():
"""Reppoints Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import RepPointsHead
model = RepPointsHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def get_single_roi_extractor():
"""SingleRoIExtractor Config."""
from mmdet.models.roi_heads import SingleRoIExtractor
roi_layer = dict(type='RoIAlign', output_size=7, sampling_ratio=2)
out_channels = 1
featmap_strides = [4, 8, 16, 32]
model = SingleRoIExtractor(roi_layer, out_channels, featmap_strides).eval()
return model
def get_gfl_head_model():
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
anchor_generator = dict(
type='AnchorGenerator',
scales_per_octave=1,
octave_base_scale=8,
ratios=[1.0],
strides=[8, 16, 32, 64, 128])
from mmdet.models.dense_heads import GFLHead
model = GFLHead(
num_classes=3,
in_channels=256,
reg_max=3,
test_cfg=test_cfg,
anchor_generator=anchor_generator)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.NCNN])
def test_focus_forward(backend_type):
check_backend(backend_type)
focus_model = get_focus_backbone_model()
focus_model.cpu().eval()
s = 128
seed_everything(1234)
x = torch.rand(1, 3, s, s)
model_outputs = [focus_model.forward(x)]
wrapped_model = WrapModel(focus_model, 'forward')
rewrite_inputs = {
'x': x,
}
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=None)))
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
for model_output, rewrite_output in zip(model_outputs[0], rewrite_outputs):
model_output = model_output.squeeze()
rewrite_output = rewrite_output.squeeze()
torch.testing.assert_allclose(
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_l2norm_forward(backend_type):
check_backend(backend_type)
l2norm_neck = get_l2norm_forward_model()
l2norm_neck.cpu().eval()
s = 128
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=None)))
Dev v0.4.0 (#301) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * remove roi_align plugin for ORT (#258) * remove roi_align plugin * remove ut * skip single_roi_extractor UT for ORT in CI * move align to symbolic and update docs * recover UT * resolve comments * [Enhancement]: Support fcn_unet deployment with dynamic shape (#251) * support mmseg fcn+unet dynamic shape * add test * fix ci * fix units * resolve comments * [Enhancement] fix-cmake-relocatable (#223) * require user to specify xxx_dir * fix line ending * fix end-of-file-fixer * try to fix ld cudart cublas * add ENV var search * fix CMAKE_CUDA_COMPILER * cpu, cuda should all work well * remove commented code * fix ncnn example find ncnn package (#282) * table format is wrong (#283) * update pre-commit (#284) * update pre-commit * fix clang-format * fix mmseg config (#281) * fix mmseg config * fix mmpose evaluate outputs * fix lint * update pre-commit config * fix lint * Revert "update pre-commit config" This reverts commit c3fd71611f0b79dfa9ad73fc0f4555c1b3563665. * miss code symbol (#296) * refactor cmake build (#295) * add-mmpose-sdk (#259) * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * add-mmpose-codebase * fix ci * fix img_shape after TopDownAffine * rename TopDown module -> XheadDecode & implement regression decode * align keypoints_from_heatmap * remove hardcode keypoint_head, need refactor, current only support topdown config * add mmpose python api * update mmpose-python code * can't clip fake box * fix rebase error * fix rebase error * link mspn decoder to base decoder * fix ci * compile with gcc7.5 * remove no use code * fix * fix prompt * remove unnecessary cv::parallel_for_ * rewrite TopdownHeatmapMultiStageHead.inference_model * add comment * add more detail docstring why use _cs2xyxy in sdk backend * fix Registry name * remove no use param & add comment of output result Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> * update faq about WinError 1455 (#297) * update faq about WinError 1455 * Update faq.md * Update faq.md * fix ci Co-authored-by: chenxin2 <chenxin2@sensetime.com> * [Feature]Support centerpoint (#252) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * support for centerpoint * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * add docstring and dcn model support * add centerpoint ut and docs * add config and fix input rank * fix merge error * fix a bug * fix comment * [Doc] update benchmark add supported-model-list (#286) * update benchmark add supported-model-list * fix lint * fix lint * loc mmocr maximum version * fix ut Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: Chen Xin <xinchen.tju@gmail.com> Co-authored-by: chenxin2 <chenxin2@sensetime.com>
2022-04-01 18:14:23 +08:00
seed_everything(1234)
feat = torch.rand(1, 16, s, s)
model_outputs = [l2norm_neck.forward(feat)]
wrapped_model = WrapModel(l2norm_neck, 'forward')
rewrite_inputs = {
'x': feat,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs[0]):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
assert np.allclose(
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
else:
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs[0]):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
assert np.allclose(
model_output[0], rewrite_output, rtol=1e-03, atol=1e-05)
2021-12-21 11:56:51 +08:00
def test_get_bboxes_of_fcos_head_ncnn():
backend_type = Backend.NCNN
check_backend(backend_type)
fcos_head = get_fcos_head_model()
fcos_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
2021-12-21 11:56:51 +08:00
output_names = ['detection_output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
2021-12-21 11:56:51 +08:00
model_type='ncnn_end2end',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
[Refactor] Refactor codebase (#220) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com>
2021-11-25 09:57:05 +08:00
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, fcos_head.num_classes, pow(2, i), pow(2, i))
for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
seed_everything(9101)
centernesses = [
torch.rand(1, 1, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
fcos_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'centernesses': centernesses
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
2021-12-21 11:56:51 +08:00
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
assert rewrite_outputs[0].shape[-1] == 6
else:
assert rewrite_outputs.shape[-1] == 6
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.NCNN])
def test_get_bboxes_of_rpn_head(backend_type: Backend):
check_backend(backend_type)
[Refactor] Refactor codebase (#220) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com>
2021-11-25 09:57:05 +08:00
head = get_rpn_head_model()
head.cpu().eval()
s = 4
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
[Refactor] Refactor codebase (#220) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com>
2021-11-25 09:57:05 +08:00
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, 9, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
head, 'get_bboxes', img_metas=img_metas, with_nms=True)
[Refactor] Refactor codebase (#220) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com>
2021-11-25 09:57:05 +08:00
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
# do not run with ncnn backend
run_with_backend = False if backend_type in [Backend.NCNN] else True
[Refactor] Refactor codebase (#220) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com>
2021-11-25 09:57:05 +08:00
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg,
run_with_backend=run_with_backend)
[Refactor] Refactor codebase (#220) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com>
2021-11-25 09:57:05 +08:00
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_get_bboxes_of_gfl_head(backend_type):
check_backend(backend_type)
head = get_gfl_head_model()
head.cpu().eval()
s = 4
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
model_type='ncnn_end2end',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
seed_everything(1234)
cls_score = [
torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 16, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
# do not run with ncnn backend
run_with_backend = False if backend_type in [Backend.NCNN] else True
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg,
run_with_backend=run_with_backend)
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME])
def test_forward_of_gfl_head(backend_type):
check_backend(backend_type)
head = get_gfl_head_model()
head.cpu().eval()
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(input_shape=None)))
feats = [torch.rand(1, 256, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
model_outputs = [head.forward(feats)]
wrapped_model = WrapModel(head, 'forward')
rewrite_inputs = {
'feats': feats,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
model_outputs[0] = [*model_outputs[0][0], *model_outputs[0][1]]
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs[0]):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
assert np.allclose(
model_output, rewrite_output, rtol=1e-03, atol=1e-05)
def _replace_r50_with_r18(model):
"""Replace ResNet50 with ResNet18 in config."""
model = copy.deepcopy(model)
if model.backbone.type == 'ResNet':
model.backbone.depth = 18
model.backbone.base_channels = 2
model.neck.in_channels = [2, 4, 8, 16]
return model
@pytest.mark.parametrize('backend', [Backend.ONNXRUNTIME])
@pytest.mark.parametrize('model_cfg_path', [
[Refactor] Refactor codebase (#220) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: RunningLeon <maningsheng@sensetime.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com>
2021-11-25 09:57:05 +08:00
'tests/test_codebase/test_mmdet/data/single_stage_model.json',
'tests/test_codebase/test_mmdet/data/mask_model.json'
])
def test_forward_of_base_detector(model_cfg_path, backend):
check_backend(backend)
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend.value),
onnx_config=dict(
output_names=['dets', 'labels'], input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1,
))))
model_cfg = mmcv.Config(dict(model=mmcv.load(model_cfg_path)))
model_cfg.model = _replace_r50_with_r18(model_cfg.model)
from mmdet.apis import init_detector
model = init_detector(model_cfg, None, 'cpu')
img = torch.randn(1, 3, 64, 64)
rewrite_inputs = {'img': img}
rewrite_outputs, _ = get_rewrite_outputs(
wrapped_model=model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_single_roi_extractor(backend_type: Backend):
check_backend(backend_type)
single_roi_extractor = get_single_roi_extractor()
output_names = ['roi_feat']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
)))
seed_everything(1234)
out_channels = single_roi_extractor.out_channels
feats = [
torch.rand((1, out_channels, 200, 336)),
torch.rand((1, out_channels, 100, 168)),
torch.rand((1, out_channels, 50, 84)),
torch.rand((1, out_channels, 25, 42)),
]
seed_everything(5678)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
model_inputs = {
'feats': feats,
'rois': rois,
}
model_outputs = get_model_outputs(single_roi_extractor, 'forward',
model_inputs)
backend_outputs, _ = get_rewrite_outputs(
wrapped_model=single_roi_extractor,
model_inputs=model_inputs,
deploy_cfg=deploy_cfg)
if isinstance(backend_outputs, dict):
backend_outputs = backend_outputs.values()
for model_output, backend_output in zip(model_outputs[0], backend_outputs):
model_output = model_output.squeeze().cpu().numpy()
backend_output = backend_output.squeeze()
assert np.allclose(
model_output, backend_output, rtol=1e-03, atol=1e-05)
def get_cascade_roi_head(is_instance_seg=False):
"""CascadeRoIHead Config."""
num_stages = 3
stage_loss_weights = [1, 0.5, 0.25]
bbox_roi_extractor = {
'type': 'SingleRoIExtractor',
'roi_layer': {
'type': 'RoIAlign',
'output_size': 7,
'sampling_ratio': 0
},
'out_channels': 64,
'featmap_strides': [4, 8, 16, 32]
}
all_target_stds = [[0.1, 0.1, 0.2, 0.2], [0.05, 0.05, 0.1, 0.1],
[0.033, 0.033, 0.067, 0.067]]
bbox_head = [{
'type': 'Shared2FCBBoxHead',
'in_channels': 64,
'fc_out_channels': 1024,
'roi_feat_size': 7,
'num_classes': 80,
'bbox_coder': {
'type': 'DeltaXYWHBBoxCoder',
'target_means': [0.0, 0.0, 0.0, 0.0],
'target_stds': target_stds
},
'reg_class_agnostic': True,
'loss_cls': {
'type': 'CrossEntropyLoss',
'use_sigmoid': False,
'loss_weight': 1.0
},
'loss_bbox': {
'type': 'SmoothL1Loss',
'beta': 1.0,
'loss_weight': 1.0
}
} for target_stds in all_target_stds]
mask_roi_extractor = {
'type': 'SingleRoIExtractor',
'roi_layer': {
'type': 'RoIAlign',
'output_size': 14,
'sampling_ratio': 0
},
'out_channels': 64,
'featmap_strides': [4, 8, 16, 32]
}
mask_head = {
'type': 'FCNMaskHead',
'num_convs': 4,
'in_channels': 64,
'conv_out_channels': 64,
'num_classes': 80,
'loss_mask': {
'type': 'CrossEntropyLoss',
'use_mask': True,
'loss_weight': 1.0
}
}
test_cfg = mmcv.Config(
dict(
score_thr=0.05,
nms=mmcv.Config(dict(type='nms', iou_threshold=0.5)),
max_per_img=100,
mask_thr_binary=0.5))
args = [num_stages, stage_loss_weights, bbox_roi_extractor, bbox_head]
kwargs = {'test_cfg': test_cfg}
if is_instance_seg:
args += [mask_roi_extractor, mask_head]
from mmdet.models.roi_heads import CascadeRoIHead
model = CascadeRoIHead(*args, **kwargs).eval()
return model
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_cascade_roi_head(backend_type: Backend):
check_backend(backend_type)
cascade_roi_head = get_cascade_roi_head()
seed_everything(1234)
x = [
torch.rand((1, 64, 200, 304)),
torch.rand((1, 64, 100, 152)),
torch.rand((1, 64, 50, 76)),
torch.rand((1, 64, 25, 38)),
]
proposals = torch.tensor([[587.8285, 52.1405, 886.2484, 341.5644, 0.5]])
img_metas = {
'img_shape': torch.tensor([800, 1216]),
'ori_shape': torch.tensor([800, 1216]),
'scale_factor': torch.tensor([1, 1, 1, 1])
}
model_inputs = {
'x': x,
'proposal_list': [proposals],
'img_metas': [img_metas]
}
output_names = ['results']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1))))
model_inputs = {'x': x, 'proposals': proposals.unsqueeze(0)}
wrapped_model = WrapModel(
cascade_roi_head, 'simple_test', img_metas=[img_metas])
backend_outputs, _ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=model_inputs,
deploy_cfg=deploy_cfg)
assert backend_outputs is not None
def get_fovea_head_model():
"""FoveaHead Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import FoveaHead
model = FoveaHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_get_bboxes_of_fovea_head(backend_type: Backend):
check_backend(backend_type)
fovea_head = get_fovea_head_model()
fovea_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1,
))))
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, fovea_head.num_classes, pow(2, i), pow(2, i))
for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
model_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'img_metas': img_metas
}
model_outputs = get_model_outputs(fovea_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(fovea_head, 'get_bboxes', img_metas=img_metas)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
assert np.allclose(
model_output[:rewrite_output.shape[0]],
rewrite_output,
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type', [Backend.OPENVINO])
def test_cascade_roi_head_with_mask(backend_type: Backend):
check_backend(backend_type)
cascade_roi_head = get_cascade_roi_head(is_instance_seg=True)
seed_everything(1234)
x = [
torch.rand((1, 64, 200, 304)),
torch.rand((1, 64, 100, 152)),
torch.rand((1, 64, 50, 76)),
torch.rand((1, 64, 25, 38)),
]
proposals = torch.tensor([[587.8285, 52.1405, 886.2484, 341.5644, 0.5]])
img_metas = {
'img_shape': torch.tensor([800, 1216]),
'ori_shape': torch.tensor([800, 1216]),
'scale_factor': torch.tensor([1, 1, 1, 1])
}
output_names = ['bbox_results', 'segm_results']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1))))
model_inputs = {'x': x, 'proposals': proposals.unsqueeze(0)}
wrapped_model = WrapModel(
cascade_roi_head, 'simple_test', img_metas=[img_metas])
backend_outputs, _ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=model_inputs,
deploy_cfg=deploy_cfg)
bbox_results = backend_outputs[0]
segm_results = backend_outputs[1]
assert bbox_results is not None
assert segm_results is not None
def get_yolov3_head_model():
"""yolov3 Head Config."""
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
from mmdet.models.dense_heads import YOLOV3Head
model = YOLOV3Head(
num_classes=4,
in_channels=[16, 8, 4],
out_channels=[32, 16, 8],
test_cfg=test_cfg)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_yolov3_head_get_bboxes(backend_type):
"""Test get_bboxes rewrite of yolov3 head."""
check_backend(backend_type)
yolov3_head = get_yolov3_head_model()
yolov3_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.45,
confidence_threshold=0.005,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1,
))))
seed_everything(1234)
pred_maps = [
torch.rand(1, 27, 5, 5),
torch.rand(1, 27, 10, 10),
torch.rand(1, 27, 20, 20)
]
# to get outputs of pytorch model
model_inputs = {'pred_maps': pred_maps, 'img_metas': img_metas}
model_outputs = get_model_outputs(yolov3_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(
yolov3_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'pred_maps': pred_maps,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
assert np.allclose(
model_output[:rewrite_output.shape[0]],
rewrite_output,
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
def test_yolov3_head_get_bboxes_ncnn():
"""Test get_bboxes rewrite of yolov3 head."""
backend_type = Backend.NCNN
check_backend(backend_type)
yolov3_head = get_yolov3_head_model()
yolov3_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['detection_output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
model_type='ncnn_end2end',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.45,
confidence_threshold=0.005,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=10,
background_label_id=-1,
))))
seed_everything(1234)
pred_maps = [
torch.rand(1, 27, 5, 5),
torch.rand(1, 27, 10, 10),
torch.rand(1, 27, 20, 20)
]
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(
yolov3_head, 'get_bboxes', img_metas=img_metas[0], with_nms=True)
rewrite_inputs = {
'pred_maps': pred_maps,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
assert rewrite_outputs[0].shape[-1] == 6
else:
assert rewrite_outputs.shape[-1] == 6
def get_yolox_head_model():
"""YOLOX Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import YOLOXHead
model = YOLOXHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
@pytest.mark.parametrize('backend_type',
[Backend.ONNXRUNTIME, Backend.OPENVINO])
def test_yolox_head_get_bboxes(backend_type: Backend):
"""Test get_bboxes rewrite of YOLOXHead."""
check_backend(backend_type)
yolox_head = get_yolox_head_model()
yolox_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=20,
pre_top_k=-1,
keep_top_k=10,
background_label_id=-1,
))))
seed_everything(1234)
cls_scores = [
torch.rand(1, yolox_head.num_classes, pow(2, i), pow(2, i))
for i in range(3, 0, -1)
]
seed_everything(5678)
bbox_preds = [
torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(3, 0, -1)
]
seed_everything(9101)
objectnesses = [
torch.rand(1, 1, pow(2, i), pow(2, i)) for i in range(3, 0, -1)
]
# to get outputs of pytorch model
model_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
'img_metas': img_metas
}
model_outputs = get_model_outputs(yolox_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(
yolox_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze().cpu().numpy()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
min_shape = min(model_output.shape[0], rewrite_output.shape[0], 5)
assert np.allclose(
model_output[:min_shape],
rewrite_output[:min_shape],
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
def test_yolox_head_get_bboxes_ncnn():
"""Test get_bboxes rewrite of yolox head for ncnn."""
backend_type = Backend.NCNN
check_backend(backend_type)
yolox_head = get_yolox_head_model()
yolox_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['detection_output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=20,
pre_top_k=5000,
keep_top_k=10,
background_label_id=0,
))))
seed_everything(1234)
cls_scores = [
torch.rand(1, yolox_head.num_classes, pow(2, i), pow(2, i))
for i in range(3, 0, -1)
]
seed_everything(5678)
bbox_preds = [
torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(3, 0, -1)
]
seed_everything(9101)
objectnesses = [
torch.rand(1, 1, pow(2, i), pow(2, i)) for i in range(3, 0, -1)
]
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(yolox_head, 'get_bboxes', img_metas=img_metas)
rewrite_inputs = {
'cls_scores': cls_scores,
'bbox_preds': bbox_preds,
'objectnesses': objectnesses,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
assert rewrite_outputs[0].shape[-1] == 6
else:
assert rewrite_outputs.shape[-1] == 6
def get_vfnet_head_model():
"""VFNet Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
from mmdet.models.dense_heads import VFNetHead
model = VFNetHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
model.cpu().eval()
return model
@pytest.mark.parametrize('backend_type',
[Backend.OPENVINO, Backend.ONNXRUNTIME])
def test_get_bboxes_of_vfnet_head(backend_type: Backend):
"""Test get_bboxes rewrite of VFNet head."""
check_backend(backend_type)
vfnet_head = get_vfnet_head_model()
vfnet_head.cpu().eval()
s = 16
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['dets', 'labels']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=-1,
keep_top_k=100,
background_label_id=-1,
))))
seed_everything(1234)
cls_score = [
torch.rand(1, vfnet_head.num_classes, pow(2, i), pow(2, i))
for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
seed_everything(9101)
model_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'img_metas': img_metas
}
model_outputs = get_model_outputs(vfnet_head, 'get_bboxes', model_inputs)
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
vfnet_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {'cls_scores': cls_score, 'bbox_preds': bboxes}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
min_shape = min(model_output.shape[0], rewrite_output.shape[0])
assert np.allclose(
model_output[:min_shape],
rewrite_output[:min_shape],
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
Dev v0.4.0 (#301) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * remove roi_align plugin for ORT (#258) * remove roi_align plugin * remove ut * skip single_roi_extractor UT for ORT in CI * move align to symbolic and update docs * recover UT * resolve comments * [Enhancement]: Support fcn_unet deployment with dynamic shape (#251) * support mmseg fcn+unet dynamic shape * add test * fix ci * fix units * resolve comments * [Enhancement] fix-cmake-relocatable (#223) * require user to specify xxx_dir * fix line ending * fix end-of-file-fixer * try to fix ld cudart cublas * add ENV var search * fix CMAKE_CUDA_COMPILER * cpu, cuda should all work well * remove commented code * fix ncnn example find ncnn package (#282) * table format is wrong (#283) * update pre-commit (#284) * update pre-commit * fix clang-format * fix mmseg config (#281) * fix mmseg config * fix mmpose evaluate outputs * fix lint * update pre-commit config * fix lint * Revert "update pre-commit config" This reverts commit c3fd71611f0b79dfa9ad73fc0f4555c1b3563665. * miss code symbol (#296) * refactor cmake build (#295) * add-mmpose-sdk (#259) * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * add-mmpose-codebase * fix ci * fix img_shape after TopDownAffine * rename TopDown module -> XheadDecode & implement regression decode * align keypoints_from_heatmap * remove hardcode keypoint_head, need refactor, current only support topdown config * add mmpose python api * update mmpose-python code * can't clip fake box * fix rebase error * fix rebase error * link mspn decoder to base decoder * fix ci * compile with gcc7.5 * remove no use code * fix * fix prompt * remove unnecessary cv::parallel_for_ * rewrite TopdownHeatmapMultiStageHead.inference_model * add comment * add more detail docstring why use _cs2xyxy in sdk backend * fix Registry name * remove no use param & add comment of output result Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> * update faq about WinError 1455 (#297) * update faq about WinError 1455 * Update faq.md * Update faq.md * fix ci Co-authored-by: chenxin2 <chenxin2@sensetime.com> * [Feature]Support centerpoint (#252) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * support for centerpoint * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * add docstring and dcn model support * add centerpoint ut and docs * add config and fix input rank * fix merge error * fix a bug * fix comment * [Doc] update benchmark add supported-model-list (#286) * update benchmark add supported-model-list * fix lint * fix lint * loc mmocr maximum version * fix ut Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: Chen Xin <xinchen.tju@gmail.com> Co-authored-by: chenxin2 <chenxin2@sensetime.com>
2022-04-01 18:14:23 +08:00
def get_deploy_cfg(backend_type: Backend, ir_type: str):
return mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
Dev v0.4.0 (#301) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * remove roi_align plugin for ORT (#258) * remove roi_align plugin * remove ut * skip single_roi_extractor UT for ORT in CI * move align to symbolic and update docs * recover UT * resolve comments * [Enhancement]: Support fcn_unet deployment with dynamic shape (#251) * support mmseg fcn+unet dynamic shape * add test * fix ci * fix units * resolve comments * [Enhancement] fix-cmake-relocatable (#223) * require user to specify xxx_dir * fix line ending * fix end-of-file-fixer * try to fix ld cudart cublas * add ENV var search * fix CMAKE_CUDA_COMPILER * cpu, cuda should all work well * remove commented code * fix ncnn example find ncnn package (#282) * table format is wrong (#283) * update pre-commit (#284) * update pre-commit * fix clang-format * fix mmseg config (#281) * fix mmseg config * fix mmpose evaluate outputs * fix lint * update pre-commit config * fix lint * Revert "update pre-commit config" This reverts commit c3fd71611f0b79dfa9ad73fc0f4555c1b3563665. * miss code symbol (#296) * refactor cmake build (#295) * add-mmpose-sdk (#259) * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * add-mmpose-codebase * fix ci * fix img_shape after TopDownAffine * rename TopDown module -> XheadDecode & implement regression decode * align keypoints_from_heatmap * remove hardcode keypoint_head, need refactor, current only support topdown config * add mmpose python api * update mmpose-python code * can't clip fake box * fix rebase error * fix rebase error * link mspn decoder to base decoder * fix ci * compile with gcc7.5 * remove no use code * fix * fix prompt * remove unnecessary cv::parallel_for_ * rewrite TopdownHeatmapMultiStageHead.inference_model * add comment * add more detail docstring why use _cs2xyxy in sdk backend * fix Registry name * remove no use param & add comment of output result Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> * update faq about WinError 1455 (#297) * update faq about WinError 1455 * Update faq.md * Update faq.md * fix ci Co-authored-by: chenxin2 <chenxin2@sensetime.com> * [Feature]Support centerpoint (#252) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * support for centerpoint * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * add docstring and dcn model support * add centerpoint ut and docs * add config and fix input rank * fix merge error * fix a bug * fix comment * [Doc] update benchmark add supported-model-list (#286) * update benchmark add supported-model-list * fix lint * fix lint * loc mmocr maximum version * fix ut Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: Chen Xin <xinchen.tju@gmail.com> Co-authored-by: chenxin2 <chenxin2@sensetime.com>
2022-04-01 18:14:23 +08:00
onnx_config=dict(
type=ir_type,
output_names=['dets', 'labels'],
input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
Dev v0.4.0 (#301) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * remove roi_align plugin for ORT (#258) * remove roi_align plugin * remove ut * skip single_roi_extractor UT for ORT in CI * move align to symbolic and update docs * recover UT * resolve comments * [Enhancement]: Support fcn_unet deployment with dynamic shape (#251) * support mmseg fcn+unet dynamic shape * add test * fix ci * fix units * resolve comments * [Enhancement] fix-cmake-relocatable (#223) * require user to specify xxx_dir * fix line ending * fix end-of-file-fixer * try to fix ld cudart cublas * add ENV var search * fix CMAKE_CUDA_COMPILER * cpu, cuda should all work well * remove commented code * fix ncnn example find ncnn package (#282) * table format is wrong (#283) * update pre-commit (#284) * update pre-commit * fix clang-format * fix mmseg config (#281) * fix mmseg config * fix mmpose evaluate outputs * fix lint * update pre-commit config * fix lint * Revert "update pre-commit config" This reverts commit c3fd71611f0b79dfa9ad73fc0f4555c1b3563665. * miss code symbol (#296) * refactor cmake build (#295) * add-mmpose-sdk (#259) * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * add-mmpose-codebase * fix ci * fix img_shape after TopDownAffine * rename TopDown module -> XheadDecode & implement regression decode * align keypoints_from_heatmap * remove hardcode keypoint_head, need refactor, current only support topdown config * add mmpose python api * update mmpose-python code * can't clip fake box * fix rebase error * fix rebase error * link mspn decoder to base decoder * fix ci * compile with gcc7.5 * remove no use code * fix * fix prompt * remove unnecessary cv::parallel_for_ * rewrite TopdownHeatmapMultiStageHead.inference_model * add comment * add more detail docstring why use _cs2xyxy in sdk backend * fix Registry name * remove no use param & add comment of output result Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> * update faq about WinError 1455 (#297) * update faq about WinError 1455 * Update faq.md * Update faq.md * fix ci Co-authored-by: chenxin2 <chenxin2@sensetime.com> * [Feature]Support centerpoint (#252) * bump version to v0.4.0 * [Enhancement] Make rewriter more powerful (#150) * Finish function tests * lint * resolve comments * Fix tests * docstring & fix * Complement informations * lint * Add example * Fix version * Remove todo Co-authored-by: RunningLeon <mnsheng@yeah.net> * Torchscript support (#159) * support torchscript * add nms * add torchscript configs and update deploy process and dump-info * typescript -> torchscript * add torchscript custom extension support * add ts custom ops again * support mmseg unet * [WIP] add optimizer for torchscript (#119) * add passes * add python api * Torchscript optimizer python api (#121) * add passes * add python api * use python api instead of executable * Merge Master, update optimizer (#151) * [Feature] add yolox ncnn (#29) * add yolox ncnn * add ncnn android performance of yolox * add ut * fix lint * fix None bugs for ncnn * test codecov * test codecov * add device * fix yapf * remove if-else for img shape * use channelshuffle optimize * change benchmark after channelshuffle * fix yapf * fix yapf * fuse continuous reshape * fix static shape deploy * fix code * drop pad * only static shape * fix static * fix docstring * Added mask overlay to output image, changed fprintf info messages to … (#55) * Added mask overlay to output image, changed fprintf info messages to stdout * Improved box filtering (filter area/score), make sure roi coordinates stay within bounds * clang-format * Support UNet in mmseg (#77) * Repeatdataset in train has no CLASSES & PALETTE * update result for unet * update docstring for mmdet * remove ppl for unet in docs * fix ort wrap about input type (#81) * Fix memleak (#86) * delete [] * fix build error when enble MMDEPLOY_ACTIVE_LEVEL * fix lint * [Doc] Nano benchmark and tutorial (#71) * add cls benchmark * add nano zh-cn benchmark and en tutorial * add device row * add doc path to index.rst * fix typo * [Fix] fix missing deploy_core (#80) * fix missing deploy_core * mv flag to demo * target link * [Docs] Fix links in Chinese doc (#84) * Fix docs in Chinese link * Fix links * Delete symbolic link and add links to html * delete files * Fix link * [Feature] Add docker files (#67) * add gpu and cpu dockerfile * fix lint * fix cpu docker and remove redundant * use pip instead * add build arg and readme * fix grammar * update readme * add chinese doc for dockerfile and add docker build to build.md * grammar * refine dockerfiles * add FAQs * update Dpplcv_DIR for SDK building * remove mmcls * add sdk demos * fix typo and lint * update FAQs * [Fix]fix check_env (#101) * fix check_env * update * Replace convert_syncbatchnorm in mmseg (#93) * replace convert_syncbatchnorm with revert_sync_batchnorm from mmcv * change logger * [Doc] Update FAQ for TensorRT (#96) * update FAQ * comment * [Docs]: Update doc for openvino installation (#102) * fix docs * fix docs * fix docs * fix mmcv version * fix docs * rm blank line * simplify non batch nms (#99) * [Enhacement] Allow test.py to save evaluation results (#108) * Add log file * Delete debug code * Rename logger * resolve comments * [Enhancement] Support mmocr v0.4+ (#115) * support mmocr v0.4+ * 0.4.0 -> 0.4.1 * fix onnxruntime wrapper for gpu inference (#123) * fix ncnn wrapper for ort-gpu * resolve comment * fix lint * Fix typo (#132) * lock mmcls version (#131) * [Enhancement] upgrade isort in pre-commit config (#141) * [Enhancement] upgrade isort in pre-commit config by refering to mmflow pr #87 * fix lint * remove .isort.cfg and put its known_third_party to setup.cfg * Fix ci for mmocr (#144) * fix mmocr unittests * remove useless * lock mmdet maximum version to 2.20 * pip install -U numpy * Fix capture_output (#125) Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * configs for all tasks * use torchvision roi align * remote unnecessary code * fix ut * fix ut * export * det dynamic * det dynamic * add ut * fix ut * add ut and docs * fix ut * skip torchscript ut if no ops available * add torchscript option to build.md * update benchmark and resolve comments * resolve conflicts * rename configs * fix mrcnn cuda test * remove useless * add version requirements to docs and comments to codes * enable empty image exporting for torchscript and accelerate ORT inference for MRCNN * rebase * update example for torchscript.md * update FAQs for torchscript.md * resolve comments * only use torchvision roi_align for torchscript * fix ut * use torchvision roi align when pool model is avg * resolve comments Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> * Update supported mmseg models (#181) * fix ocrnet cascade decoder * update mmseg support models * update mmseg configs * support emanet and icnet * set max K of TopK for tensorrt * update supported models for mmseg in docs * add test for emamodule * add configs and update docs * Update docs * update benchmark * [Features]Support mmdet3d (#103) * add mmdet3d code * add code * update code * [log]This commit finish pointpillar export and evaluate on onnxruntime.The model is sample with nvidia repo model * add tensorrt config * fix config * update * support for tensorrt * add config * fix config` * fix apis about torch2onnx * update * mmdet3d deploy version1.0 * map is ok * fix code * version1.0 * fix code * fix visual * fix bug * tensorrt support success * add docstring * add docs * fix docs * fix comments * fix comment * fix comment * fix openvino wrapper * add unit test * fix device about cpu * fix comment * fix show_result * fix lint * fix requirments * remove ci about det3d * fix ut * add ut data * support for new version pointpillars * fix comment * fix support_list * fix comments * fix config name * [Enhancement] Update pad logic in detection heads (#168) * pad with register * fix lint Co-authored-by: AllentDan <dongchunyu@sensetime.com> * [Enhancement] Additional arguments support for OpenVINO Model Optimizer (#178) * Add mo args. * [Docs]: update docs and argument descriptions (#196) * bump version to v0.4.0 * update docs and argument descriptions * revert version change * fix unnecessary change of config for dynamic exportation (#199) * fix mmcls get classes (#215) * fix mmcls get classes * resolve comment * resolve comment * Add ModelOptimizerOptions. * Fix merge bugs. * Update mmpose.md (#224) * [Dostring]add example in apis docstring (#214) * add example in apis docstring * add backend example in docstring * rm blank line * Fixed get_mo_options_from_cfg args * fix l2norm test Co-authored-by: RunningLeon <mnsheng@yeah.net> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: grimoire <yaoqian@sensetime.com> * [Enhancement] Switch to statically typed Value::Any (#209) * replace std::any with StaticAny * fix __compare_typeid * remove fallback id support * constraint on traits::TypeId<T>::value * fix includes * support for centerpoint * [Enhancement] TensorRT DCN support (#205) * add tensorrt dcn support * fix lint * add docstring and dcn model support * add centerpoint ut and docs * add config and fix input rank * fix merge error * fix a bug * fix comment * [Doc] update benchmark add supported-model-list (#286) * update benchmark add supported-model-list * fix lint * fix lint * loc mmocr maximum version * fix ut Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: maningsheng <mnsheng@yeah.net> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: grimoire <streetyao@live.com> Co-authored-by: grimoire <yaoqian@sensetime.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: Johannes L <tehkillerbee@users.noreply.github.com> Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: 杨培文 (Yang Peiwen) <915505626@qq.com> Co-authored-by: Semyon Bevzyuk <semen.bevzuk@gmail.com> Co-authored-by: AllentDan <dongchunyu@sensetime.com> Co-authored-by: Haofan Wang <frankmiracle@outlook.com> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: Chen Xin <xinchen.tju@gmail.com> Co-authored-by: chenxin2 <chenxin2@sensetime.com>
2022-04-01 18:14:23 +08:00
@pytest.mark.parametrize('backend_type, ir_type',
[(Backend.ONNXRUNTIME, 'onnx'),
(Backend.OPENVINO, 'onnx'),
(Backend.TORCHSCRIPT, 'torchscript')])
def test_base_dense_head_get_bboxes(backend_type: Backend, ir_type: str):
"""Test get_bboxes rewrite of base dense head."""
check_backend(backend_type)
anchor_head = get_anchor_head_model()
anchor_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
deploy_cfg = get_deploy_cfg(backend_type, ir_type)
output_names = get_ir_config(deploy_cfg).get('output_names', None)
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of pytorch model
model_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'img_metas': img_metas
}
model_outputs = get_model_outputs(anchor_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
anchor_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
assert np.allclose(
model_output[:rewrite_output.shape[0]],
rewrite_output,
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
def test_base_dense_head_get_bboxes__ncnn():
"""Test get_bboxes rewrite of base dense head."""
backend_type = Backend.NCNN
check_backend(backend_type)
anchor_head = get_anchor_head_model()
anchor_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(output_names=output_names, input_shape=None),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
model_type='ncnn_end2end',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
anchor_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
rewrite_outputs = rewrite_outputs[0]
assert rewrite_outputs.shape[-1] == 6
@pytest.mark.parametrize('is_dynamic', [True, False])
def test_ssd_head_get_bboxes__ncnn(is_dynamic: bool):
"""Test get_bboxes rewrite of ssd head for ncnn."""
check_backend(Backend.NCNN)
ssd_head = get_ssd_head_model()
ssd_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
output_names = ['output']
input_names = []
for i in range(6):
input_names.append('cls_scores_' + str(i))
input_names.append('bbox_preds_' + str(i))
dynamic_axes = None
if is_dynamic:
dynamic_axes = {
output_names[0]: {
1: 'num_dets',
}
}
for input_name in input_names:
dynamic_axes[input_name] = {2: 'height', 3: 'width'}
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=Backend.NCNN.value),
onnx_config=dict(
input_names=input_names,
output_names=output_names,
input_shape=None,
dynamic_axes=dynamic_axes),
codebase_config=dict(
type='mmdet',
task='ObjectDetection',
model_type='ncnn_end2end',
post_processing=dict(
score_threshold=0.05,
iou_threshold=0.5,
max_output_boxes_per_class=200,
pre_top_k=5000,
keep_top_k=100,
background_label_id=-1,
))))
# For the ssd_head:
# the cls_score's size: (1, 30, 20, 20), (1, 30, 10, 10),
# (1, 30, 5, 5), (1, 30, 3, 3), (1, 30, 2, 2), (1, 30, 1, 1)
# the bboxes's size: (1, 24, 20, 20), (1, 24, 10, 10),
# (1, 24, 5, 5), (1, 24, 3, 3), (1, 24, 2, 2), (1, 24, 1, 1)
feat_shape = [20, 10, 5, 3, 2, 1]
num_prior = 6
seed_everything(1234)
cls_score = [
torch.rand(1, 30, feat_shape[i], feat_shape[i])
for i in range(num_prior)
]
seed_everything(5678)
bboxes = [
torch.rand(1, 24, feat_shape[i], feat_shape[i])
for i in range(num_prior)
]
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.tensor([s, s]) if is_dynamic else [s, s]
wrapped_model = WrapModel(
ssd_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
# output should be of shape [1, N, 6]
if is_backend_output:
rewrite_outputs = rewrite_outputs[0]
assert rewrite_outputs.shape[-1] == 6
@pytest.mark.parametrize('backend_type, ir_type', [(Backend.OPENVINO, 'onnx')])
def test_reppoints_head_get_bboxes(backend_type: Backend, ir_type: str):
"""Test get_bboxes rewrite of base dense head."""
check_backend(backend_type)
dense_head = get_reppoints_head_model()
dense_head.cpu().eval()
s = 128
img_metas = [{
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 3)
}]
deploy_cfg = get_deploy_cfg(backend_type, ir_type)
output_names = get_ir_config(deploy_cfg).get('output_names', None)
# the cls_score's size: (1, 4, 32, 32), (1, 4, 16, 16),
# (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2).
# the bboxes's size: (1, 4, 32, 32), (1, 4, 16, 16),
# (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2)
seed_everything(1234)
cls_score = [
torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)
]
seed_everything(5678)
bboxes = [torch.rand(1, 4, pow(2, i), pow(2, i)) for i in range(5, 0, -1)]
# to get outputs of pytorch model
model_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
'img_metas': img_metas
}
model_outputs = get_model_outputs(dense_head, 'get_bboxes', model_inputs)
# to get outputs of onnx model after rewrite
img_metas[0]['img_shape'] = torch.Tensor([s, s])
wrapped_model = WrapModel(
dense_head, 'get_bboxes', img_metas=img_metas, with_nms=True)
rewrite_inputs = {
'cls_scores': cls_score,
'bbox_preds': bboxes,
}
rewrite_outputs, is_backend_output = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
if is_backend_output:
if isinstance(rewrite_outputs, dict):
rewrite_outputs = convert_to_list(rewrite_outputs, output_names)
for model_output, rewrite_output in zip(model_outputs[0],
rewrite_outputs):
model_output = model_output.squeeze().cpu().numpy()
rewrite_output = rewrite_output.squeeze()
# hard code to make two tensors with the same shape
# rewrite and original codes applied different nms strategy
assert np.allclose(
model_output[:rewrite_output.shape[0]],
rewrite_output,
rtol=1e-03,
atol=1e-05)
else:
assert rewrite_outputs is not None
@pytest.mark.parametrize('backend_type, ir_type', [(Backend.OPENVINO, 'onnx')])
def test_reppoints_head_points2bbox(backend_type: Backend, ir_type: str):
"""Test get_bboxes rewrite of base dense head."""
check_backend(backend_type)
dense_head = get_reppoints_head_model()
dense_head.cpu().eval()
output_names = ['output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(type=backend_type.value),
onnx_config=dict(
input_shape=None,
input_names=['pts'],
output_names=output_names)))
# the cls_score's size: (1, 4, 32, 32), (1, 4, 16, 16),
# (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2).
# the bboxes's size: (1, 4, 32, 32), (1, 4, 16, 16),
# (1, 4, 8, 8), (1, 4, 4, 4), (1, 4, 2, 2)
seed_everything(1234)
pts = torch.rand(1, 18, 16, 16)
# to get outputs of onnx model after rewrite
wrapped_model = WrapModel(dense_head, 'points2bbox', y_first=True)
rewrite_inputs = {'pts': pts}
_ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
@pytest.mark.skipif(
reason='Only support GPU test', condition=not torch.cuda.is_available())
@pytest.mark.parametrize('backend_type', [(Backend.TENSORRT)])
def test_windows_msa(backend_type: Backend):
check_backend(backend_type)
from mmdet.models.backbones.swin import WindowMSA
model = WindowMSA(96, 3, (7, 7))
model.cuda().eval()
output_names = ['output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(
type=backend_type.value,
common_config=dict(fp16_mode=True, max_workspace_size=1 << 20),
model_inputs=[
dict(
input_shapes=dict(
x=dict(
min_shape=[12, 49, 96],
opt_shape=[12, 49, 96],
max_shape=[12, 49, 96]),
mask=dict(
min_shape=[12, 49, 49],
opt_shape=[12, 49, 49],
max_shape=[12, 49, 49])))
]),
onnx_config=dict(
input_shape=None,
input_names=['x', 'mask'],
output_names=output_names)))
x = torch.randn([12, 49, 96]).cuda()
mask = torch.randn([12, 49, 49]).cuda()
wrapped_model = WrapModel(model, 'forward')
rewrite_inputs = {'x': x, 'mask': mask}
_ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg)
@pytest.mark.skipif(
reason='Only support GPU test', condition=not torch.cuda.is_available())
@pytest.mark.parametrize('backend_type', [(Backend.TENSORRT)])
def test_shift_windows_msa(backend_type: Backend):
check_backend(backend_type)
from mmdet.models.backbones.swin import ShiftWindowMSA
model = ShiftWindowMSA(96, 3, 7)
model.cuda().eval()
output_names = ['output']
deploy_cfg = mmcv.Config(
dict(
backend_config=dict(
type=backend_type.value,
model_inputs=[
dict(
input_shapes=dict(
query=dict(
min_shape=[1, 60800, 96],
opt_shape=[1, 60800, 96],
max_shape=[1, 60800, 96])))
]),
onnx_config=dict(
input_shape=None,
input_names=['query'],
output_names=output_names)))
query = torch.randn([1, 60800, 96]).cuda()
hw_shape = (torch.tensor(200), torch.tensor(304))
wrapped_model = WrapModel(model, 'forward')
rewrite_inputs = {'query': query, 'hw_shape': hw_shape}
_ = get_rewrite_outputs(
wrapped_model=wrapped_model,
model_inputs=rewrite_inputs,
deploy_cfg=deploy_cfg,
run_with_backend=False)