From 4d8ea40f55ae9a7ab39d208870fb791de9d46bf9 Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Fri, 19 Aug 2022 09:30:13 +0800 Subject: [PATCH] Sync v0.7.0 to dev-1.x (#907) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * make -install -> make install (#621) change `make -install` to `make install` https://github.com/open-mmlab/mmdeploy/issues/618 * [Fix] fix csharp api detector release result (#620) * fix csharp api detector release result * fix wrong count arg of xxx_release_result in c# api * [Enhancement] Support two-stage rotated detector TensorRT. (#530) * upload * add fake_multiclass_nms_rotated * delete unused code * align with pytorch * Update delta_midpointoffset_rbbox_coder.py * add trt rotated roi align * add index feature in nms * not good * fix index * add ut * add benchmark * move to csrc/mmdeploy * update unit test Co-authored-by: zytx121 <592267829@qq.com> * Reduce mmcls version dependency (#635) * fix shufflenetv2 with trt (#645) * fix shufflenetv2 and pspnet * fix ci * remove print * ' -> " (#654) If there is a variable in the string, single quotes will ignored it, while double quotes will bring the variable into the string after parsing * ' -> " (#655) same with https://github.com/open-mmlab/mmdeploy/pull/654 * Support deployment of Segmenter (#587) * support segmentor with ncnn * update regression yml * replace chunk with split to support ts * update regression yml * update docs * fix segmenter ncnn inference failure brought by #477 * add test * fix test for ncnn and trt * fix lint * export nn.linear to Gemm op in onnx for ncnn * fix ci * simplify `Expand` (#617) * Fix typo (#625) * Add make install in en docs * Add make install in zh docs * Fix typo * Merge and add windows build Co-authored-by: tripleMu <865626@163.com> * [Enhancement] Fix ncnn unittest (#626) * optmize-csp-darknet * replace floordiv to torch.div * update csp_darknet default implement * fix test * [Enhancement] TensorRT Anchor generator plugin (#646) * custom trt anchor generator * add ut * add docstring, update doc * Add partition doc and sample code (#599) * update torch2onnx tool to support onnx partition * add model partition of yolov3 * add cn doc * update torch2onnx tool to support onnx partition * add model partition of yolov3 * add cn doc * add to index.rst * resolve comment * resolve comments * fix lint * change caption level in docs * update docs (#624) * Add java apis and demos (#563) * add java classifier detector * add segmentor * fix lint * add ImageRestorer java apis and demo * remove useless count parameter for Segmentor and Restorer, add PoseDetector * add RotatedDetection java api and demo * add Ocr java demo and apis * remove mmrotate ncnn java api and demo * fix lint * sync java api folder after rebase to master * fix include * remove record * fix java apis dir path in cmake * add java demo readme * fix lint mdformat * add test javaapi ci * fix lint * fix flake8 * fix test javaapi ci * refactor readme.md * fix install opencv for ci * fix install opencv : add permission * add all codebases and mmcv install * add torch * install mmdeploy * fix image path * fix picture path * fix import ncnn * fix import ncnn * add submodule of pybind * fix pybind submodule * change download to git clone for submodule * fix ncnn dir * fix README error * simplify the github ci * fix ci * fix yapf * add JNI as required * fix Capitalize * fix Capitalize * fix copyright * ignore .class changed * add OpenJDK installation docs * install target of javaapi * simplify ci * add jar * fix ci * fix ci * fix test java command * debugging what failed * debugging what failed * debugging what failed * add java version info * install openjdk * add java env var * fix export * fix export * fix export * fix export * fix picture path * fix picture path * fix file name * fix file name * fix README * remove java_api strategy * fix python version * format task name * move args position * extract common utils code * show image class result * add detector result * segmentation result format * add ImageRestorer result * add PoseDetection java result format * fix ci * stage ocr * add visualize * move utils * fix lint * fix ocr bugs * fix ci demo * fix java classpath for ci * fix popd * fix ocr demo text garbled * fix ci * fix ci * fix ci * fix path of utils ci * update the circleci config file by adding workflows both for linux, windows and linux-gpu (#368) * update circleci by adding more workflows * fix test workflow failure on windows platform * fix docker exec command for SDK unittests * Fixed tensorrt plugin not found in Windows (#672) * update introduction.png (#674) * [Enhancement] Add fuse select assign pass (#589) * Add fuse select assign pass * move code to csrc * add config flag * remove bool cast * fix export sdk info of input shape (#667) * Update get_started.md (#675) Fix backend model assignment * Update get_started.md (#676) Fix backend model assignment * [Fix] fix clang build (#677) * fix clang build * fix ndk build * fix ndk build * switch to `std::filesystem` for clang-7 and later * Deploy the Swin Transformer on TensorRT. (#652) * resolve conflicts * update ut and docs * fix ut * refine docstring * add comments and refine UT * resolve comments * resolve comments * update doc * add roll export * check backend * update regression test * bump version to 0.6.0 (#680) * bump vertion to 0.6.0 * update version * pass img_metas while exporting to onnx (#681) * pass img_metas while exporting to onnx * remove try-catch in tools for beter debugging * use get * fix typo * [Fix] fix ssd ncnn ut (#692) * fix ssd ncnn ut * fix yapf * fix passing img_metas to pytorch2onnx for mmedit (#700) * fix passing img_metas for mmdet3d (#707) * [Fix] Fix android build (#698) * fix android build * fix cmake * fix url link * fix wrong exit code in pipeline_manager (#715) * fix exit * change to general exit errorcode=1 * fix passing wrong backend type (#719) * Rename onnx2ncnn to mmdeploy_onnx2ncnn (#694) * improvement(tools/onnx2ncnn.py): rename to mmdeploy_onnx2ncnn * format(tools/deploy.py): clean code * fix(init_plugins.py): improve if condition * fix(CI): update target * fix(test_onnx2ncnn.py): update desc * Update init_plugins.py * [Fix] Fix mmdet ort static shape bug (#687) * fix shape * add device * fix yapf * fix rewriter for transforms * reverse image shape * fix ut of distance2bbox * fix rewriter name * fix c4 for torchscript (#724) * [Enhancement] Standardize C API (#634) * unify C API naming * fix demo and move apis/c/* -> apis/c/mmdeploy/* * fix lint * fix C# project * fix Java API * [Enhancement] Support Slide Vertex TRT (#650) * reorgnize mmrotate * fix * add hbb2obb * add ut * fix rotated nms * update docs * update benchmark * update test * remove ort regression test, remove comment * Fix get-started rendering issues in readthedocs (#740) * fix mermaid markdown rendering issue in readthedocs * fix error in C++ example * fix error in c++ example in zh_cn get_started doc * [Fix] set default topk for dump info (#702) * set default topk for dump info * remove redundant docstrings * add ci densenet * fix classification warnings * fix mmcls version * fix logger.warnings * add version control (#754) * fix satrn for ORT (#753) * fix satrn for ORT * move rewrite into pytorch * Add inference latency test tool (#665) * add profile tool * remove print envs in profile tool * set cudnn_benchmark to True * add doc * update tests * fix typo * support test with images from a directory * update doc * resolve comments * [Enhancement] Add CSE ONNX pass (#647) * Add fuse select assign pass * move code to csrc * add config flag * Add fuse select assign pass * Add CSE for ONNX * remove useless code * Test robot Just test robot * Update README.md Revert * [Fix] fix yolox point_generator (#758) * fix yolox point_generator * add a UT * resolve comments * fix comment lines * limit markdown version (#773) * [Enhancement] Better index put ONNX export. (#704) * Add rewriter for tensor setitem * add version check * Upgrade Dockerfile to use TensorRT==8.2.4.2 (#706) * Upgrade TensorRT to 8.2.4.2 * upgrade pytorch&mmcv in CPU Dockerfile * Delete redundant port example in Docker * change 160x160-608x608 to 64x64-608x608 for yolov3 * [Fix] reduce log verbosity & improve error reporting (#755) * reduce log verbosity & improve error reporting * improve error reporting * [Enhancement] Support latest ppl.nn & ppl.cv (#564) * support latest ppl.nn * fix pplnn for model convertor * fix lint * update memory policy * import algo from buffer * update ppl.cv * use `ppl.cv==0.7.0` * document supported ppl.nn version * skip pplnn dependency when building shared libs * [Fix][P0] Fix for torch1.12 (#751) * fix for torch1.12 * add comment * fix check env (#785) * [Fix] fix cascade mask rcnn (#787) * fix cascade mask rcnn * fix lint * add regression * [Feature] Support RoITransRoIHead (#713) * [Feature] Support RoITransRoIHead * Add docs * Add mmrotate models regression test * Add a draft for test code * change the argument name * fix test code * fix minor change for not class agnostic case * fix sample for test code * fix sample for test code * Add mmrotate in requirements * Revert "Add mmrotate in requirements" This reverts commit 043490075e6dbe4a8fb98e94b2b583b91fc5038d. * [Fix] fix triu (#792) * fix triu * triu -> triu_default * [Enhancement] Install Optimizer by setuptools (#690) * Add fuse select assign pass * move code to csrc * add config flag * Add fuse select assign pass * Add CSE for ONNX * remove useless code * Install optimizer by setup tools * fix comment * [Feature] support MMRotate model with le135 (#788) * support MMRotate model with le135 * cse before fuse select assign * remove unused import * [Fix] Support macOS build (#762) * fix macOS build * fix missing * add option to build & install examples (#822) * [Fix] Fix setup on non-linux-x64 (#811) * fix setup * replace long to int64_t * [Feature] support build single sdk library (#806) * build single lib for c api * update csharp doc & project * update test build * fix test build * fix * update document for building android sdk (#817) Co-authored-by: dwSun * [Enhancement] support kwargs in SDK python bindings (#794) * support-kwargs * make '__call__' as single image inference and add 'batch' API to deal with batch images inference * fix linting error and typo * fix lint * improvement(sdk): add sdk code coverage (#808) * feat(doc): add CI * CI(sdk): add sdk coverage * style(test): code format * fix(CI): update coverage.info path * improvement(CI): use internal image * improvement(CI): push coverage info once * [Feature] Add C++ API for SDK (#831) * add C++ API * unify result type & add examples * minor fix * install cxx API headers * fix Mat, add more examples * fix monolithic build & fix lint * install examples correctly * fix lint * feat(tools/deploy.py): support snpe (#789) * fix(tools/deploy.py): support snpe * improvement(backend/snpe): review advices * docs(backend/snpe): update build * docs(backend/snpe): server support specify port * docs(backend/snpe): update path * fix(backend/snpe): time counter missing argument * docs(backend/snpe): add missing argument * docs(backend/snpe): update download and using * improvement(snpe_net.cpp): load model with modeldata * Support setup on environment with no PyTorch (#843) * support test with multi batch (#829) * support test with multi batch * resolve comment * import algorithm from buffer (#793) * [Enhancement] build sdk python api in standard-alone manner (#810) * build sdk python api in standard-alone manner * enable MMDEPLOY_BUILD_SDK_MONOLITHIC and MMDEPLOY_BUILD_EXAMPLES in prebuild config * link mmdeploy to python target when monolithic option is on * checkin README to describe precompiled package build procedure * use packaging.version.parse(python_version) instead of list(python_version) * fix according to review results * rebase master * rollback cmake.in and apis/python/CMakeLists.txt * reorganize files in install/example * let cmake detect visual studio instead of specifying 2019 * rename whl name of precompiled package * fix according to review results * Fix SDK backend (#844) * fix mmpose python api (#852) * add prebuild package usage docs on windows (#816) * add prebuild package usage docs on windows * fix lint * update * try fix lint * add en docs * update * update * udpate faq * fix typo (#862) * [Enhancement] Improve get_started documents and bump version to 0.7.0 (#813) * simplify commands in get_started * add installation commands for Windows * fix typo * limit markdown and sphinx_markdown_tables version * adopt html
tag * bump mmdeploy version * bump mmdeploy version * update get_started * update get_started * use python3.8 instead of python3.7 * remove duplicate section * resolve issue #856 * update according to review results * add reference to prebuilt_package_windows.md * fix error when build sdk demos * fix mmcls Co-authored-by: Ryan_Huang <44900829+DrRyanHuang@users.noreply.github.com> Co-authored-by: Chen Xin Co-authored-by: q.yao Co-authored-by: zytx121 <592267829@qq.com> Co-authored-by: Li Zhang Co-authored-by: tripleMu Co-authored-by: tripleMu <865626@163.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> Co-authored-by: lvhan028 Co-authored-by: Bryan Glen Suello <11388006+bgsuello@users.noreply.github.com> Co-authored-by: zambranohally <63218980+zambranohally@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: tpoisonooo Co-authored-by: Hakjin Lee Co-authored-by: 孙德伟 <5899962+dwSun@users.noreply.github.com> Co-authored-by: dwSun Co-authored-by: Chen Xin --- .circleci/config.yml | 65 +-- .circleci/docker/Dockerfile | 41 ++ .circleci/scripts/linux/build.sh | 16 + .../scripts/linux/convert_onnxruntime.sh | 18 + .../scripts/linux/install_onnxruntime.sh | 30 ++ .circleci/scripts/linux/install_python.sh | 17 + .../scripts/windows/install_onnxruntime.ps1 | 19 + .circleci/scripts/windows/install_opencv.ps1 | 3 + .circleci/test.yml | 313 ++++++++++++++ .github/scripts/test_java_demo.py | 84 ++++ .github/scripts/test_onnx2ncnn.py | 9 +- .github/workflows/backend-ncnn.yml | 7 +- .github/workflows/backend-snpe.yml | 60 +++ .github/workflows/build.yml | 37 +- .github/workflows/java_api.yml | 72 ++++ .gitignore | 8 + .pre-commit-config.yaml | 1 + CMakeLists.txt | 31 +- MANIFEST.in | 3 + README.md | 9 +- README_zh-CN.md | 7 +- cmake/MMDeploy.cmake | 16 +- cmake/MMDeployConfig.cmake.in | 5 + configs/_base_/backends/snpe.py | 1 + configs/_base_/onnx_config.py | 3 +- configs/mmcls/classification_snpe_static.py | 3 + ...sification_tensorrt-fp16_static-384x384.py | 13 + ...on_tensorrt-fp16_dynamic-64x64-608x608.py} | 2 +- ...on_tensorrt-int8_dynamic-64x64-608x608.py} | 2 +- ...tection_tensorrt_dynamic-64x64-608x608.py} | 2 +- .../yolov3_partition_onnxruntime_static.py | 12 + .../super-resolution_snpe_static-256x256.py | 2 + .../text-detection_snpe_static.py | 3 + .../pose-detection_snpe_static-256x256.py | 3 + ...pose-detection_tensorrt_dynamic-256x192.py | 23 + configs/mmrotate/rotated-detection_static.py | 3 +- .../segmentation_snpe_static-512x1024.py | 3 + csrc/mmdeploy/CMakeLists.txt | 3 +- csrc/mmdeploy/apis/c/CMakeLists.txt | 49 --- csrc/mmdeploy/apis/c/common.h | 101 ----- csrc/mmdeploy/apis/c/mmdeploy/CMakeLists.txt | 80 ++++ .../apis/c/{ => mmdeploy}/classifier.cpp | 76 ++-- .../apis/c/{ => mmdeploy}/classifier.h | 51 ++- .../mmdeploy/apis/c/{ => mmdeploy}/common.cpp | 17 +- csrc/mmdeploy/apis/c/mmdeploy/common.h | 92 ++++ .../apis/c/{ => mmdeploy}/common_internal.h | 18 +- .../apis/c/{ => mmdeploy}/detector.cpp | 77 ++-- .../mmdeploy/apis/c/{ => mmdeploy}/detector.h | 58 +-- .../apis/c/{ => mmdeploy}/executor.cpp | 17 +- .../mmdeploy/apis/c/{ => mmdeploy}/executor.h | 0 .../apis/c/{ => mmdeploy}/executor_internal.h | 0 csrc/mmdeploy/apis/c/{ => mmdeploy}/handle.h | 5 +- csrc/mmdeploy/apis/c/{ => mmdeploy}/model.cpp | 22 +- csrc/mmdeploy/apis/c/{ => mmdeploy}/model.h | 8 +- .../apis/c/{ => mmdeploy}/pipeline.cpp | 45 +- .../mmdeploy/apis/c/{ => mmdeploy}/pipeline.h | 20 +- .../apis/c/{ => mmdeploy}/pose_detector.cpp | 90 ++-- .../apis/c/{ => mmdeploy}/pose_detector.h | 64 +-- .../apis/c/{ => mmdeploy}/restorer.cpp | 79 ++-- .../mmdeploy/apis/c/{ => mmdeploy}/restorer.h | 39 +- .../c/{ => mmdeploy}/rotated_detector.cpp | 94 ++-- .../apis/c/{ => mmdeploy}/rotated_detector.h | 50 ++- .../apis/c/{ => mmdeploy}/segmentor.cpp | 76 ++-- .../apis/c/{ => mmdeploy}/segmentor.h | 45 +- .../apis/c/{ => mmdeploy}/text_detector.cpp | 93 ++-- .../apis/c/{ => mmdeploy}/text_detector.h | 61 +-- .../apis/c/{ => mmdeploy}/text_recognizer.cpp | 118 +++--- .../apis/c/{ => mmdeploy}/text_recognizer.h | 68 +-- .../apis/csharp/MMDeploy/APIs/Classifier.cs | 3 +- .../apis/csharp/MMDeploy/APIs/Detector.cs | 3 +- .../apis/csharp/MMDeploy/APIs/TextDetector.cs | 3 +- .../apis/csharp/MMDeploy/MMDeploy.csproj | 4 +- .../MMDeploy/NativeAPIs/NativeMethods.cs | 2 +- csrc/mmdeploy/apis/csharp/README.md | 2 +- csrc/mmdeploy/apis/cxx/CMakeLists.txt | 28 ++ .../mmdeploy/apis/cxx/mmdeploy/classifier.hpp | 67 +++ csrc/mmdeploy/apis/cxx/mmdeploy/common.hpp | 145 +++++++ csrc/mmdeploy/apis/cxx/mmdeploy/detector.hpp | 67 +++ .../apis/cxx/mmdeploy/pose_detector.hpp | 78 ++++ csrc/mmdeploy/apis/cxx/mmdeploy/restorer.hpp | 62 +++ .../apis/cxx/mmdeploy/rotated_detector.hpp | 68 +++ csrc/mmdeploy/apis/cxx/mmdeploy/segmentor.hpp | 64 +++ .../apis/cxx/mmdeploy/text_detector.hpp | 68 +++ .../apis/cxx/mmdeploy/text_recognizer.hpp | 79 ++++ csrc/mmdeploy/apis/java/CMakeLists.txt | 23 + csrc/mmdeploy/apis/java/README.md | 48 +++ .../apis/java/mmdeploy/Classifier.java | 54 +++ .../mmdeploy/apis/java/mmdeploy/DataType.java | 13 + .../mmdeploy/apis/java/mmdeploy/Detector.java | 58 +++ .../apis/java/mmdeploy/InstanceMask.java | 12 + csrc/mmdeploy/apis/java/mmdeploy/Mat.java | 17 + .../apis/java/mmdeploy/PixelFormat.java | 15 + csrc/mmdeploy/apis/java/mmdeploy/PointF.java | 12 + .../apis/java/mmdeploy/PoseDetector.java | 50 +++ csrc/mmdeploy/apis/java/mmdeploy/Rect.java | 16 + .../mmdeploy/apis/java/mmdeploy/Restorer.java | 48 +++ .../apis/java/mmdeploy/Segmentor.java | 54 +++ .../apis/java/mmdeploy/TextDetector.java | 54 +++ .../apis/java/mmdeploy/TextRecognizer.java | 57 +++ csrc/mmdeploy/apis/java/native/CMakeLists.txt | 28 ++ csrc/mmdeploy/apis/java/native/common.h | 54 +++ .../apis/java/native/mmdeploy_Classifier.cpp | 58 +++ .../apis/java/native/mmdeploy_Classifier.h | 35 ++ .../apis/java/native/mmdeploy_Detector.cpp | 82 ++++ .../apis/java/native/mmdeploy_Detector.h | 35 ++ .../java/native/mmdeploy_PoseDetector.cpp | 58 +++ .../apis/java/native/mmdeploy_PoseDetector.h | 36 ++ .../apis/java/native/mmdeploy_Restorer.cpp | 71 ++++ .../apis/java/native/mmdeploy_Restorer.h | 34 ++ .../apis/java/native/mmdeploy_Segmentor.cpp | 52 +++ .../apis/java/native/mmdeploy_Segmentor.h | 35 ++ .../java/native/mmdeploy_TextDetector.cpp | 64 +++ .../apis/java/native/mmdeploy_TextDetector.h | 36 ++ .../java/native/mmdeploy_TextRecognizer.cpp | 103 +++++ .../java/native/mmdeploy_TextRecognizer.h | 45 ++ csrc/mmdeploy/apis/python/CMakeLists.txt | 2 + csrc/mmdeploy/apis/python/classifier.cpp | 32 +- csrc/mmdeploy/apis/python/common.cpp | 10 +- csrc/mmdeploy/apis/python/common.h | 4 +- csrc/mmdeploy/apis/python/detector.cpp | 33 +- csrc/mmdeploy/apis/python/pose_detector.cpp | 80 ++-- csrc/mmdeploy/apis/python/restorer.cpp | 31 +- .../mmdeploy/apis/python/rotated_detector.cpp | 33 +- csrc/mmdeploy/apis/python/segmentor.cpp | 32 +- csrc/mmdeploy/apis/python/text_detector.cpp | 31 +- csrc/mmdeploy/apis/python/text_recognizer.cpp | 53 ++- .../backend_ops/ncnn/onnx2ncnn/CMakeLists.txt | 11 +- .../backend_ops/onnxruntime/CMakeLists.txt | 2 + .../tensorrt/batched_nms/trt_batched_nms.cpp | 43 +- .../tensorrt/batched_nms/trt_batched_nms.hpp | 9 +- .../trt_batched_rotated_nms.cpp | 17 +- .../trt_batched_rotated_nms.hpp | 3 - .../common/nms/batched_nms_kernel.hpp | 4 +- .../backend_ops/tensorrt/common/nms/kernel.h | 3 +- .../tensorrt/common/trt_plugin_base.hpp | 17 +- .../tensorrt/common_impl/nms/allClassNMS.cu | 1 + .../common_impl/nms/allClassRotatedNMS.cu | 3 +- .../common_impl/nms/batched_nms_kernel.cpp | 6 +- .../common_impl/nms/gatherNMSOutputs.cu | 24 +- .../tensorrt/grid_priors/trt_grid_priors.cpp | 154 +++++++ .../tensorrt/grid_priors/trt_grid_priors.hpp | 66 +++ .../grid_priors/trt_grid_priors_kernel.cu | 43 ++ .../grid_priors/trt_grid_priors_kernel.hpp | 10 + .../trt_multi_level_rotated_roi_align.cpp | 228 ++++++++++ .../trt_multi_level_rotated_roi_align.hpp | 79 ++++ ...rt_multi_level_rotated_roi_align_kernel.cu | 164 +++++++ ...t_multi_level_rotated_roi_align_kernel.hpp | 13 + .../backend_ops/torchscript/CMakeLists.txt | 1 - .../torchscript/optimizer/CMakeLists.txt | 3 +- .../torchscript/optimizer/bind.cpp | 8 + .../optimizer/ir/subgraph_matcher.cpp | 4 +- .../optimizer/ir/subgraph_matcher.h | 4 +- .../onnx/common_subgraph_elimination.cpp | 138 ++++++ .../passes/onnx/common_subgraph_elimination.h | 20 + .../passes/onnx/flatten_cls_head.cpp | 2 +- .../passes/onnx/fuse_select_assign.cpp | 163 +++++++ .../passes/onnx/fuse_select_assign.h | 17 + .../passes/onnx/merge_shape_concate.cpp | 4 +- csrc/mmdeploy/codebase/common.h | 3 +- .../codebase/mmocr/contour_expand.cpp | 2 +- .../codebase/mmocr/cpu/CMakeLists.txt | 3 + csrc/mmdeploy/codebase/mmocr/cpu/dbnet.cpp | 1 - csrc/mmdeploy/codebase/mmocr/cuda/dbnet.cpp | 1 - csrc/mmdeploy/codebase/mmocr/dbnet.cpp | 4 +- csrc/mmdeploy/codebase/mmocr/panet.cpp | 6 +- csrc/mmdeploy/codebase/mmocr/pixel_group.cpp | 2 +- csrc/mmdeploy/codebase/mmocr/psenet.cpp | 6 +- csrc/mmdeploy/codebase/mmocr/warp.cpp | 5 +- .../mmpose/keypoints_from_heatmap.cpp | 1 - .../mmpose/keypoints_from_regression.cpp | 1 - .../mmrotate/oriented_object_detection.cpp | 1 - csrc/mmdeploy/core/CMakeLists.txt | 5 +- csrc/mmdeploy/core/model.cpp | 6 +- csrc/mmdeploy/core/mpl/span.h | 14 +- csrc/mmdeploy/core/registry.cpp | 12 +- csrc/mmdeploy/core/registry.h | 28 +- csrc/mmdeploy/core/status_code.cpp | 5 + csrc/mmdeploy/core/types.h | 9 + csrc/mmdeploy/core/utils/filesystem.h | 3 +- csrc/mmdeploy/device/cuda/cuda_device.cpp | 2 +- csrc/mmdeploy/execution/bulk.h | 2 +- csrc/mmdeploy/execution/expand.h | 71 +--- .../schedulers/dynamic_batch_scheduler.h | 2 +- .../execution/schedulers/schedulers.cpp | 2 +- csrc/mmdeploy/graph/common.h | 7 +- csrc/mmdeploy/graph/pipeline.cpp | 12 +- csrc/mmdeploy/graph/task.cpp | 7 +- csrc/mmdeploy/model/CMakeLists.txt | 3 +- csrc/mmdeploy/model/zip_model_impl.cpp | 6 +- csrc/mmdeploy/net/CMakeLists.txt | 4 + csrc/mmdeploy/net/net_module.cpp | 4 +- csrc/mmdeploy/net/ort/ort_net.cpp | 4 +- csrc/mmdeploy/net/ppl/CMakeLists.txt | 4 +- csrc/mmdeploy/net/ppl/ppl_net.cpp | 68 ++- csrc/mmdeploy/net/snpe/CMakeLists.txt | 25 ++ csrc/mmdeploy/net/snpe/snpe_net.cpp | 262 ++++++++++++ csrc/mmdeploy/net/snpe/snpe_net.h | 61 +++ csrc/mmdeploy/net/trt/trt_net.cpp | 6 +- csrc/mmdeploy/preprocess/cuda/pad_impl.cpp | 10 +- csrc/mmdeploy/preprocess/cuda/resize_impl.cpp | 11 - .../mmdeploy/preprocess/transform/collect.cpp | 3 +- .../mmdeploy/preprocess/transform/compose.cpp | 9 +- .../mmdeploy/preprocess/transform/transform.h | 4 +- csrc/mmdeploy/preprocess/transform_module.cpp | 3 +- csrc/mmdeploy/utils/opencv/opencv_utils.cpp | 1 + csrc/mmdeploy/utils/opencv/opencv_utils.h | 2 +- demo/csrc/CMakeLists.txt | 57 ++- demo/csrc/async_ocr.cpp | 33 +- demo/csrc/async_ocr_v2.cpp | 43 +- demo/csrc/classifier.cxx | 32 ++ demo/csrc/detector.cxx | 69 +++ demo/csrc/image_classification.cpp | 19 +- demo/csrc/image_restorer.cpp | 15 +- demo/csrc/image_segmentation.cpp | 15 +- demo/csrc/object_detection.cpp | 21 +- demo/csrc/ocr.cpp | 32 +- demo/csrc/pose_detection.cpp | 19 +- demo/csrc/pose_detector.cxx | 33 ++ demo/csrc/restorer.cxx | 34 ++ demo/csrc/rotated_detector.cxx | 51 +++ demo/csrc/rotated_object_detection.cpp | 13 +- demo/csrc/segmentor.cxx | 58 +++ demo/csrc/text_ocr.cxx | 46 ++ demo/java/ImageClassification.java | 48 +++ demo/java/ImageRestorer.java | 48 +++ demo/java/ImageSegmentation.java | 48 +++ demo/java/ObjectDetection.java | 68 +++ demo/java/Ocr.java | 60 +++ demo/java/PoseDetection.java | 50 +++ demo/java/README.md | 22 + demo/java/Utils.java | 21 + demo/python/image_classification.py | 15 +- demo/python/image_restorer.py | 12 +- demo/python/image_segmentation.py | 13 +- demo/python/object_detection.py | 17 +- demo/python/ocr.py | 51 ++- demo/python/pose_detection.py | 24 +- demo/python/rotated_object_detection.py | 14 +- docker/CPU/Dockerfile | 13 +- docker/GPU/Dockerfile | 19 +- docs/en/01-how-to-build/android.md | 78 ++-- docs/en/01-how-to-build/build_from_docker.md | 10 +- docs/en/01-how-to-build/build_from_source.md | 1 + docs/en/01-how-to-build/jetsons.md | 14 +- docs/en/01-how-to-build/linux-x86_64.md | 36 +- docs/en/01-how-to-build/snpe.md | 194 +++++++++ docs/en/01-how-to-build/windows.md | 34 +- .../02-how-to-run/how_to_evaluate_a_model.md | 2 + .../02-how-to-run/prebuilt_package_windows.md | 384 +++++++++++++++++ docs/en/02-how-to-run/write_config.md | 38 +- docs/en/03-benchmark/benchmark.md | 80 ++++ docs/en/03-benchmark/benchmark_edge.md | 57 +++ docs/en/03-benchmark/supported_models.md | 143 ++++--- docs/en/04-supported-codebases/mmcls.md | 17 +- docs/en/04-supported-codebases/mmdet.md | 1 + docs/en/04-supported-codebases/mmrotate.md | 4 +- docs/en/04-supported-codebases/mmseg.md | 2 +- docs/en/05-supported-backends/pplnn.md | 2 +- .../add_test_units_for_backend_ops.md | 22 +- docs/en/06-developer-guide/partition_model.md | 89 ++++ .../06-developer-guide/support_new_backend.md | 12 +- .../06-developer-guide/support_new_model.md | 8 +- docs/en/appendix/cross_build_snpe_service.md | 166 ++++++++ docs/en/conf.py | 1 + docs/en/faq.md | 34 ++ docs/en/get_started.md | 319 +++++++------- docs/en/index.rst | 7 + docs/en/ops/tensorrt.md | 42 ++ docs/en/useful_tools.md | 70 ++- docs/zh_cn/01-how-to-build/android.md | 85 ++-- .../01-how-to-build/build_from_docker.md | 2 +- .../01-how-to-build/build_from_source.md | 3 +- docs/zh_cn/01-how-to-build/jetsons.md | 16 +- docs/zh_cn/01-how-to-build/linux-x86_64.md | 38 +- docs/zh_cn/01-how-to-build/snpe.md | 198 +++++++++ docs/zh_cn/01-how-to-build/windows.md | 33 +- .../02-how-to-run/prebuilt_package_windows.md | 400 ++++++++++++++++++ docs/zh_cn/02-how-to-run/quantize_model.md | 2 +- docs/zh_cn/03-benchmark/benchmark.md | 80 ++++ docs/zh_cn/03-benchmark/benchmark_edge.md | 58 +++ docs/zh_cn/03-benchmark/supported_models.md | 139 +++--- .../04-developer-guide/do_regression_test.md | 1 + .../04-developer-guide/partition_model.md | 85 ++++ .../04-developer-guide/support_new_backend.md | 14 +- .../04-developer-guide/support_new_model.md | 8 +- .../appendix/cross_build_snpe_service.md | 170 ++++++++ docs/zh_cn/conf.py | 1 + docs/zh_cn/faq.md | 34 ++ docs/zh_cn/get_started.md | 354 ++++++++-------- docs/zh_cn/index.rst | 7 + mmdeploy/apis/calibration.py | 8 +- mmdeploy/apis/core/pipeline_manager.py | 4 +- mmdeploy/apis/onnx/export.py | 8 +- mmdeploy/apis/onnx/optimizer.py | 13 +- mmdeploy/apis/onnx/passes/optimize_onnx.py | 15 +- mmdeploy/apis/pytorch2onnx.py | 4 +- mmdeploy/apis/snpe/__init__.py | 16 + mmdeploy/apis/torch_jit/trace.py | 5 +- mmdeploy/apis/visualize.py | 3 +- mmdeploy/backend/ncnn/init_plugins.py | 17 +- mmdeploy/backend/ncnn/onnx2ncnn.py | 13 +- mmdeploy/backend/ncnn/wrapper.py | 2 +- mmdeploy/backend/onnxruntime/wrapper.py | 2 +- mmdeploy/backend/openvino/wrapper.py | 2 +- mmdeploy/backend/pplnn/onnx2pplnn.py | 9 +- mmdeploy/backend/pplnn/utils.py | 86 ++-- mmdeploy/backend/pplnn/wrapper.py | 16 +- mmdeploy/backend/sdk/export_info.py | 2 + mmdeploy/backend/sdk/wrapper.py | 2 +- mmdeploy/backend/snpe/__init__.py | 30 ++ mmdeploy/backend/snpe/init_plugins.py | 11 + mmdeploy/backend/snpe/onnx2dlc.py | 78 ++++ mmdeploy/backend/snpe/wrapper.py | 250 +++++++++++ mmdeploy/backend/tensorrt/init_plugins.py | 4 +- mmdeploy/backend/tensorrt/wrapper.py | 2 +- mmdeploy/backend/torchscript/wrapper.py | 2 +- mmdeploy/codebase/base/backend_model.py | 9 +- .../codebase/mmcls/deploy/classification.py | 12 +- .../mmcls/deploy/classification_model.py | 8 +- .../mmcls/models/backbones/__init__.py | 4 +- .../mmcls/models/backbones/shufflenet_v2.py | 19 +- .../models/backbones/vision_transformer.py | 2 +- .../codebase/mmcls/models/utils/attention.py | 51 +-- mmdeploy/codebase/mmdet/core/__init__.py | 2 + mmdeploy/codebase/mmdet/core/anchor.py | 98 +++++ mmdeploy/codebase/mmdet/core/bbox/__init__.py | 1 + .../core/bbox/distance_point_bbox_coder.py | 44 ++ .../codebase/mmdet/core/bbox/transforms.py | 7 +- .../codebase/mmdet/core/point_generator.py | 63 +++ .../mmdet/deploy/object_detection_model.py | 2 +- mmdeploy/codebase/mmdet/models/__init__.py | 1 + mmdeploy/codebase/mmdet/models/backbones.py | 219 ++++++++++ .../mmdet/models/dense_heads/yolo_head.py | 9 +- .../codebase/mmdet/models/detectors/base.py | 23 +- .../models/roi_heads/cascade_roi_head.py | 7 +- .../roi_heads/single_level_roi_extractor.py | 2 + mmdeploy/codebase/mmdet/models/transformer.py | 50 +++ mmdeploy/codebase/mmdet3d/models/base.py | 2 +- .../mmedit/deploy/super_resolution.py | 6 +- .../mmedit/deploy/super_resolution_model.py | 22 +- .../codebase/mmocr/deploy/text_detection.py | 6 +- .../mmocr/deploy/text_detection_model.py | 13 +- .../mmocr/deploy/text_recognition_model.py | 6 +- .../codebase/mmpose/deploy/pose_detection.py | 8 +- .../mmpose/deploy/pose_detection_model.py | 17 +- .../codebase/mmrotate/core/bbox/__init__.py | 2 + .../core/bbox/gliding_vertex_coder.py | 31 ++ .../codebase/mmrotate/core/bbox/transforms.py | 102 +++++ .../mmrotate/core/post_processing/bbox_nms.py | 106 ++++- .../deploy/rotated_detection_model.py | 30 +- mmdeploy/codebase/mmrotate/models/__init__.py | 19 +- .../mmrotate/models/dense_heads/__init__.py | 9 + .../models/dense_heads/oriented_rpn_head.py | 141 ++++++ .../{ => dense_heads}/rotated_anchor_head.py | 0 .../{ => dense_heads}/rotated_rpn_head.py | 23 +- .../mmrotate/models/roi_heads/__init__.py | 16 + .../mmrotate/models/roi_heads/gv_bbox_head.py | 90 ++++ .../models/roi_heads/gv_ratio_roi_head.py | 73 ++++ .../oriented_standard_roi_head.py | 8 +- .../models/roi_heads/roi_extractors.py | 97 +++++ .../models/roi_heads/roi_trans_roi_head.py | 85 ++++ .../{ => roi_heads}/rotated_bbox_head.py | 9 + .../codebase/mmseg/deploy/segmentation.py | 6 +- .../mmseg/deploy/segmentation_model.py | 30 +- .../codebase/mmseg/models/segmentors/base.py | 10 +- mmdeploy/core/rewriters/rewriter_manager.py | 6 +- mmdeploy/core/rewriters/rewriter_utils.py | 8 + mmdeploy/mmcv/__init__.py | 1 + mmdeploy/mmcv/cnn/__init__.py | 5 + mmdeploy/mmcv/cnn/transformer.py | 146 +++++++ mmdeploy/mmcv/ops/nms.py | 20 +- mmdeploy/pytorch/functions/__init__.py | 12 +- mmdeploy/pytorch/functions/chunk.py | 17 + mmdeploy/pytorch/functions/expand.py | 16 + mmdeploy/pytorch/functions/linear.py | 32 +- mmdeploy/pytorch/functions/masked_fill.py | 25 ++ mmdeploy/pytorch/functions/normalize.py | 41 ++ mmdeploy/pytorch/functions/tensor_setitem.py | 57 +++ mmdeploy/pytorch/functions/triu.py | 14 +- mmdeploy/pytorch/ops/__init__.py | 5 +- mmdeploy/pytorch/ops/pad.py | 77 ++++ mmdeploy/pytorch/ops/roll.py | 33 ++ mmdeploy/utils/config_utils.py | 2 +- mmdeploy/utils/constants.py | 1 + mmdeploy/utils/test.py | 8 +- mmdeploy/utils/timer.py | 63 ++- mmdeploy/version.py | 2 +- requirements/codebases.txt | 7 + requirements/docs.txt | 4 +- requirements/optional.txt | 2 +- requirements/readthedocs.txt | 1 + requirements/runtime.txt | 1 + resources/introduction.png | Bin 193631 -> 61843 bytes service/snpe/client/inference_pb2.py | 106 +++++ service/snpe/client/inference_pb2_grpc.py | 209 +++++++++ service/snpe/inference.proto | 70 +++ service/snpe/server/CMakeLists.txt | 81 ++++ service/snpe/server/common.cmake | 123 ++++++ service/snpe/server/inference_server.cc | 109 +++++ service/snpe/server/scope_timer.h | 34 ++ service/snpe/server/service_impl.cpp | 358 ++++++++++++++++ service/snpe/server/service_impl.h | 78 ++++ service/snpe/server/text_table.h | 209 +++++++++ setup.cfg | 1 + setup.py | 80 +++- tests/regression/mmcls.yml | 29 +- tests/regression/mmdet.yml | 24 +- tests/regression/mmrotate.yml | 26 ++ tests/regression/mmseg.yml | 30 ++ tests/test_apis/test_onnx_passes.py | 104 ++++- .../test_mmdet/test_mmdet_core.py | 129 +++++- .../test_mmdet/test_mmdet_models.py | 137 +++++- .../test_mmrotate/test_mmrotate_core.py | 147 ++++++- .../test_mmrotate/test_mmrotate_models.py | 351 +++++++++++++++ tests/test_csrc/CMakeLists.txt | 10 +- tests/test_csrc/capi/test_classifier.cpp | 24 +- tests/test_csrc/capi/test_detector.cpp | 23 +- tests/test_csrc/capi/test_model.cpp | 10 +- tests/test_csrc/capi/test_restorer.cpp | 23 +- tests/test_csrc/capi/test_segmentor.cpp | 23 +- tests/test_csrc/capi/test_text_detector.cpp | 23 +- tests/test_csrc/capi/test_text_recognizer.cpp | 46 +- tests/test_csrc/core/test_execution.cpp | 20 +- .../test_csrc/model/test_directory_model.cpp | 2 +- tests/test_csrc/model/test_zip_model.cpp | 2 + tests/test_csrc/net/test_ncnn_net.cpp | 2 +- tests/test_csrc/net/test_openvino_net.cpp | 2 +- tests/test_csrc/net/test_ort_net.cpp | 2 +- tests/test_csrc/net/test_ppl_net.cpp | 2 +- tests/test_csrc/net/test_trt_net.cpp | 2 +- tests/test_csrc/preprocess/test_compose.cpp | 1 + tests/test_csrc/preprocess/test_crop.cpp | 1 + .../preprocess/test_default_format_bundle.cpp | 15 +- .../preprocess/test_image2tensor.cpp | 1 + tests/test_csrc/preprocess/test_load.cpp | 4 +- tests/test_csrc/preprocess/test_normalize.cpp | 2 + tests/test_csrc/preprocess/test_pad.cpp | 2 + tests/test_csrc/preprocess/test_resize.cpp | 6 +- tests/test_csrc/test_resource.h | 6 +- tests/test_mmcv/test_mmcv_cnn.py | 32 ++ tests/test_ops/test_ops.py | 165 ++++++++ tests/test_ops/utils.py | 14 +- tests/test_pytorch/test_pytorch_functions.py | 121 +++++- tests/test_pytorch/test_pytorch_ops.py | 8 +- tests/test_utils/test_timer.py | 4 +- tools/check_env.py | 11 +- tools/deploy.py | 46 +- tools/onnx2dlc.py | 37 ++ tools/onnx2ncnn.py | 10 +- tools/onnx2pplnn.py | 10 +- tools/onnx2tensorrt.py | 26 +- tools/package_tools/README.md | 47 ++ tools/package_tools/configs/linux_x64.yaml | 8 +- tools/package_tools/configs/windows_x64.yaml | 8 +- tools/package_tools/mmdeploy_builder.py | 125 ++++-- .../packaging/mmdeploy_python/version.py | 2 +- tools/profile.py | 155 +++++++ tools/scripts/build_linux_nvidia.sh | 4 +- tools/test.py | 11 + tools/torch2onnx.py | 76 ++-- 460 files changed, 15420 insertions(+), 2707 deletions(-) create mode 100644 .circleci/docker/Dockerfile create mode 100644 .circleci/scripts/linux/build.sh create mode 100644 .circleci/scripts/linux/convert_onnxruntime.sh create mode 100644 .circleci/scripts/linux/install_onnxruntime.sh create mode 100644 .circleci/scripts/linux/install_python.sh create mode 100644 .circleci/scripts/windows/install_onnxruntime.ps1 create mode 100644 .circleci/scripts/windows/install_opencv.ps1 create mode 100644 .circleci/test.yml create mode 100644 .github/scripts/test_java_demo.py create mode 100644 .github/workflows/backend-snpe.yml create mode 100644 .github/workflows/java_api.yml create mode 100644 configs/_base_/backends/snpe.py create mode 100644 configs/mmcls/classification_snpe_static.py create mode 100644 configs/mmcls/classification_tensorrt-fp16_static-384x384.py rename configs/mmdet/detection/{detection_tensorrt-fp16_dynamic-160x160-608x608.py => detection_tensorrt-fp16_dynamic-64x64-608x608.py} (88%) rename configs/mmdet/detection/{detection_tensorrt-int8_dynamic-160x160-608x608.py => detection_tensorrt-int8_dynamic-64x64-608x608.py} (88%) rename configs/mmdet/detection/{detection_tensorrt_dynamic-160x160-608x608.py => detection_tensorrt_dynamic-64x64-608x608.py} (87%) create mode 100644 configs/mmdet/detection/yolov3_partition_onnxruntime_static.py create mode 100644 configs/mmedit/super-resolution/super-resolution_snpe_static-256x256.py create mode 100644 configs/mmocr/text-detection/text-detection_snpe_static.py create mode 100644 configs/mmpose/pose-detection_snpe_static-256x256.py create mode 100644 configs/mmpose/pose-detection_tensorrt_dynamic-256x192.py create mode 100644 configs/mmseg/segmentation_snpe_static-512x1024.py delete mode 100644 csrc/mmdeploy/apis/c/CMakeLists.txt delete mode 100644 csrc/mmdeploy/apis/c/common.h create mode 100644 csrc/mmdeploy/apis/c/mmdeploy/CMakeLists.txt rename csrc/mmdeploy/apis/c/{ => mmdeploy}/classifier.cpp (57%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/classifier.h (64%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/common.cpp (66%) create mode 100644 csrc/mmdeploy/apis/c/mmdeploy/common.h rename csrc/mmdeploy/apis/c/{ => mmdeploy}/common_internal.h (77%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/detector.cpp (64%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/detector.h (65%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/executor.cpp (93%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/executor.h (100%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/executor_internal.h (100%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/handle.h (85%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/model.cpp (51%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/model.h (86%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/pipeline.cpp (57%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/pipeline.h (69%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/pose_detector.cpp (63%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/pose_detector.h (55%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/restorer.cpp (55%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/restorer.h (59%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/rotated_detector.cpp (51%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/rotated_detector.h (67%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/segmentor.cpp (58%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/segmentor.h (61%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/text_detector.cpp (62%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/text_detector.h (65%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/text_recognizer.cpp (61%) rename csrc/mmdeploy/apis/c/{ => mmdeploy}/text_recognizer.h (60%) create mode 100644 csrc/mmdeploy/apis/cxx/CMakeLists.txt create mode 100644 csrc/mmdeploy/apis/cxx/mmdeploy/classifier.hpp create mode 100644 csrc/mmdeploy/apis/cxx/mmdeploy/common.hpp create mode 100644 csrc/mmdeploy/apis/cxx/mmdeploy/detector.hpp create mode 100644 csrc/mmdeploy/apis/cxx/mmdeploy/pose_detector.hpp create mode 100644 csrc/mmdeploy/apis/cxx/mmdeploy/restorer.hpp create mode 100644 csrc/mmdeploy/apis/cxx/mmdeploy/rotated_detector.hpp create mode 100644 csrc/mmdeploy/apis/cxx/mmdeploy/segmentor.hpp create mode 100644 csrc/mmdeploy/apis/cxx/mmdeploy/text_detector.hpp create mode 100644 csrc/mmdeploy/apis/cxx/mmdeploy/text_recognizer.hpp create mode 100644 csrc/mmdeploy/apis/java/CMakeLists.txt create mode 100644 csrc/mmdeploy/apis/java/README.md create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/Classifier.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/DataType.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/Detector.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/InstanceMask.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/Mat.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/PixelFormat.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/PointF.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/PoseDetector.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/Rect.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/Restorer.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/Segmentor.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/TextDetector.java create mode 100644 csrc/mmdeploy/apis/java/mmdeploy/TextRecognizer.java create mode 100644 csrc/mmdeploy/apis/java/native/CMakeLists.txt create mode 100644 csrc/mmdeploy/apis/java/native/common.h create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_Classifier.cpp create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_Classifier.h create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_Detector.cpp create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_Detector.h create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_PoseDetector.cpp create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_PoseDetector.h create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_Restorer.cpp create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_Restorer.h create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_Segmentor.cpp create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_Segmentor.h create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_TextDetector.cpp create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_TextDetector.h create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_TextRecognizer.cpp create mode 100644 csrc/mmdeploy/apis/java/native/mmdeploy_TextRecognizer.h create mode 100644 csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors.cpp create mode 100644 csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors.hpp create mode 100644 csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors_kernel.cu create mode 100644 csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors_kernel.hpp create mode 100644 csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align.cpp create mode 100644 csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align.hpp create mode 100644 csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align_kernel.cu create mode 100644 csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align_kernel.hpp create mode 100644 csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/common_subgraph_elimination.cpp create mode 100644 csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/common_subgraph_elimination.h create mode 100644 csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/fuse_select_assign.cpp create mode 100644 csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/fuse_select_assign.h create mode 100644 csrc/mmdeploy/net/snpe/CMakeLists.txt create mode 100644 csrc/mmdeploy/net/snpe/snpe_net.cpp create mode 100644 csrc/mmdeploy/net/snpe/snpe_net.h create mode 100644 demo/csrc/classifier.cxx create mode 100644 demo/csrc/detector.cxx create mode 100644 demo/csrc/pose_detector.cxx create mode 100644 demo/csrc/restorer.cxx create mode 100644 demo/csrc/rotated_detector.cxx create mode 100644 demo/csrc/segmentor.cxx create mode 100644 demo/csrc/text_ocr.cxx create mode 100644 demo/java/ImageClassification.java create mode 100644 demo/java/ImageRestorer.java create mode 100644 demo/java/ImageSegmentation.java create mode 100644 demo/java/ObjectDetection.java create mode 100644 demo/java/Ocr.java create mode 100644 demo/java/PoseDetection.java create mode 100644 demo/java/README.md create mode 100644 demo/java/Utils.java create mode 100644 docs/en/01-how-to-build/snpe.md create mode 100644 docs/en/02-how-to-run/prebuilt_package_windows.md create mode 100644 docs/en/03-benchmark/benchmark_edge.md create mode 100644 docs/en/06-developer-guide/partition_model.md create mode 100644 docs/en/appendix/cross_build_snpe_service.md create mode 100644 docs/zh_cn/01-how-to-build/snpe.md create mode 100644 docs/zh_cn/02-how-to-run/prebuilt_package_windows.md create mode 100644 docs/zh_cn/03-benchmark/benchmark_edge.md create mode 100644 docs/zh_cn/04-developer-guide/partition_model.md create mode 100644 docs/zh_cn/appendix/cross_build_snpe_service.md create mode 100644 mmdeploy/apis/snpe/__init__.py create mode 100644 mmdeploy/backend/snpe/__init__.py create mode 100644 mmdeploy/backend/snpe/init_plugins.py create mode 100644 mmdeploy/backend/snpe/onnx2dlc.py create mode 100644 mmdeploy/backend/snpe/wrapper.py create mode 100644 mmdeploy/codebase/mmdet/core/anchor.py create mode 100644 mmdeploy/codebase/mmdet/core/bbox/distance_point_bbox_coder.py create mode 100644 mmdeploy/codebase/mmdet/core/point_generator.py create mode 100644 mmdeploy/codebase/mmdet/models/transformer.py create mode 100644 mmdeploy/codebase/mmrotate/core/bbox/gliding_vertex_coder.py create mode 100644 mmdeploy/codebase/mmrotate/core/bbox/transforms.py create mode 100644 mmdeploy/codebase/mmrotate/models/dense_heads/__init__.py create mode 100644 mmdeploy/codebase/mmrotate/models/dense_heads/oriented_rpn_head.py rename mmdeploy/codebase/mmrotate/models/{ => dense_heads}/rotated_anchor_head.py (100%) rename mmdeploy/codebase/mmrotate/models/{ => dense_heads}/rotated_rpn_head.py (91%) create mode 100644 mmdeploy/codebase/mmrotate/models/roi_heads/__init__.py create mode 100644 mmdeploy/codebase/mmrotate/models/roi_heads/gv_bbox_head.py create mode 100644 mmdeploy/codebase/mmrotate/models/roi_heads/gv_ratio_roi_head.py rename mmdeploy/codebase/mmrotate/models/{ => roi_heads}/oriented_standard_roi_head.py (93%) create mode 100644 mmdeploy/codebase/mmrotate/models/roi_heads/roi_extractors.py create mode 100644 mmdeploy/codebase/mmrotate/models/roi_heads/roi_trans_roi_head.py rename mmdeploy/codebase/mmrotate/models/{ => roi_heads}/rotated_bbox_head.py (87%) create mode 100644 mmdeploy/mmcv/cnn/__init__.py create mode 100644 mmdeploy/mmcv/cnn/transformer.py create mode 100644 mmdeploy/pytorch/functions/expand.py create mode 100644 mmdeploy/pytorch/functions/masked_fill.py create mode 100644 mmdeploy/pytorch/functions/normalize.py create mode 100644 mmdeploy/pytorch/functions/tensor_setitem.py create mode 100644 mmdeploy/pytorch/ops/pad.py create mode 100644 mmdeploy/pytorch/ops/roll.py create mode 100644 requirements/codebases.txt create mode 100644 service/snpe/client/inference_pb2.py create mode 100644 service/snpe/client/inference_pb2_grpc.py create mode 100644 service/snpe/inference.proto create mode 100644 service/snpe/server/CMakeLists.txt create mode 100644 service/snpe/server/common.cmake create mode 100644 service/snpe/server/inference_server.cc create mode 100644 service/snpe/server/scope_timer.h create mode 100644 service/snpe/server/service_impl.cpp create mode 100644 service/snpe/server/service_impl.h create mode 100644 service/snpe/server/text_table.h create mode 100644 tests/test_mmcv/test_mmcv_cnn.py create mode 100644 tools/onnx2dlc.py create mode 100644 tools/package_tools/README.md create mode 100644 tools/profile.py diff --git a/.circleci/config.yml b/.circleci/config.yml index cca73cd0e..a7405f052 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,36 +1,39 @@ -# Use the latest 2.1 version of CircleCI pipeline process engine. -# See: https://circleci.com/docs/2.0/configuration-reference version: 2.1 -# Define a job to be invoked later in a workflow. -# See: https://circleci.com/docs/2.0/configuration-reference/#jobs -jobs: - lint: - # Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub. - # See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor - docker: - - image: cimg/python:3.7.4 - # Add steps to the job - # See: https://circleci.com/docs/2.0/configuration-reference/#steps - steps: - - checkout - - run: - name: Install pre-commit hook - command: | - pip install pre-commit - pre-commit install - - run: - name: Linting - command: pre-commit run --all-files - - run: - name: Check docstring coverage - command: | - pip install interrogate - interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 80 mmdeploy +# this allows you to use CircleCI's dynamic configuration feature +setup: true + +# the path-filtering orb is required to continue a pipeline based on +# the path of an updated fileset +orbs: + path-filtering: circleci/path-filtering@0.1.2 -# Invoke jobs via workflows -# See: https://circleci.com/docs/2.0/configuration-reference/#workflows workflows: - pr_stage_test: + # the always-run workflow is always triggered, regardless of the pipeline parameters. + always-run: jobs: - - lint + # the path-filtering/filter job determines which pipeline + # parameters to update. + - path-filtering/filter: + name: check-updated-files + # 3-column, whitespace-delimited mapping. One mapping per + # line: + # + mapping: | + .circle/.* lint_only false + cmake/.* lint_only false + configs/.* lint_only false + csrc/.* lint_only false + demo/csrc/.* lint_only false + docker/.* lint_only false + mmdeploy/.* lint_only false + requirements/.* lint_only false + tests/.* lint_only false + third_party/.* lint_only false + tools/.* lint_only false + base-revision: master + # this is the path of the configuration we should trigger once + # path filtering and pipeline parameter value updates are + # complete. In this case, we are using the parent dynamic + # configuration itself. + config-path: .circleci/test.yml diff --git a/.circleci/docker/Dockerfile b/.circleci/docker/Dockerfile new file mode 100644 index 000000000..979195e9b --- /dev/null +++ b/.circleci/docker/Dockerfile @@ -0,0 +1,41 @@ +FROM nvcr.io/nvidia/tensorrt:21.04-py3 + +ARG CUDA=11.3 +ARG PYTHON_VERSION=3.8 +ARG TORCH_VERSION=1.10.0 +ARG TORCHVISION_VERSION=0.11.0 +ARG MMCV_VERSION=1.5.0 +ARG PPLCV_VERSION=0.7.0 +ENV FORCE_CUDA="1" + +ENV DEBIAN_FRONTEND=noninteractive + +### update apt and install libs +RUN apt-get update &&\ + apt-get install -y libopencv-dev --no-install-recommends &&\ + rm -rf /var/lib/apt/lists/* + +RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + chmod +x ~/miniconda.sh && \ + ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} && \ + /opt/conda/bin/conda clean -ya + +### pytorch +RUN /opt/conda/bin/conda install pytorch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} cudatoolkit=${CUDA} -c pytorch -c conda-forge +ENV PATH /opt/conda/bin:$PATH + +### install mmcv-full +RUN /opt/conda/bin/pip install mmcv-full==${MMCV_VERSION} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${TORCH_VERSION}/index.html + +WORKDIR /workspace + +### build ppl.cv +RUN git clone https://github.com/openppl-public/ppl.cv.git &&\ + cd ppl.cv &&\ + git checkout tags/v${PPLCV_VERSION} -b v${PPLCV_VERSION} &&\ + ./build.sh cuda + +# RUN ln -sf /opt/conda /home/circleci/project/conda +ENV TENSORRT_DIR=/workspace/tensorrt diff --git a/.circleci/scripts/linux/build.sh b/.circleci/scripts/linux/build.sh new file mode 100644 index 000000000..fdd55ca60 --- /dev/null +++ b/.circleci/scripts/linux/build.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +ARGS=("$@") + +cd mmdeploy +MMDEPLOY_DIR=$(pwd) +mkdir -p build && cd build +cmake .. -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_BUILD_TEST=ON -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ + -DMMDEPLOY_BUILD_SDK_CXX_API=ON -DMMDEPLOY_BUILD_SDK_CSHARP_API=ON \ + -DMMDEPLOY_TARGET_DEVICES="$1" -DMMDEPLOY_TARGET_BACKENDS="$2" "${ARGS[@]:2}" + +make -j$(nproc) && make install +cd install/example +mkdir -p build +cd build +cmake ../cpp -DMMDeploy_DIR="$MMDEPLOY_DIR"/build/install/lib/cmake/MMDeploy "${ARGS[@]:2}" && make -j$(nproc) diff --git a/.circleci/scripts/linux/convert_onnxruntime.sh b/.circleci/scripts/linux/convert_onnxruntime.sh new file mode 100644 index 000000000..4ac57f453 --- /dev/null +++ b/.circleci/scripts/linux/convert_onnxruntime.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [ $# != 2 ]; then + echo "wrong command. usage: bash converter.sh " + exit 1 +fi + +if [ "$1" == 'mmcls' ]; then + python3 -m pip install mmcls + git clone --recursive https://github.com/open-mmlab/mmclassification.git + wget https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth + python3 mmdeploy/tools/deploy.py \ + mmdeploy/configs/mmcls/classification_onnxruntime_dynamic.py \ + mmclassification/configs/resnet/resnet18_8xb32_in1k.py \ + resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + mmclassification/demo/demo.JPEG \ + --work-dir "$2" --dump-info +fi diff --git a/.circleci/scripts/linux/install_onnxruntime.sh b/.circleci/scripts/linux/install_onnxruntime.sh new file mode 100644 index 000000000..797673b6b --- /dev/null +++ b/.circleci/scripts/linux/install_onnxruntime.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +if [ $# != 2 ]; then + echo "wrong command. usage: bash install_onnxruntime.sh " + exit 1 +fi + +PLATFORM=$1 +VERSION=$2 + +if [ "$PLATFORM" == 'cpu' ]; then + python -m pip install onnxruntime=="$VERSION" + + wget https://github.com/microsoft/onnxruntime/releases/download/v"$VERSION"/onnxruntime-linux-x64-"$VERSION".tgz + tar -zxvf onnxruntime-linux-x64-"$VERSION".tgz + ln -sf onnxruntime-linux-x64-"$VERSION" onnxruntime +elif [ "$PLATFORM" == 'cuda' ]; then + pip install onnxruntime-gpu=="$VERSION" + + wget https://github.com/microsoft/onnxruntime/releases/download/v"$VERSION"/onnxruntime-linux-x64-gpu-"$VERSION".tgz + tar -zxvf onnxruntime-linux-x64-gpu-"$VERSION".tgz + ln -sf onnxruntime-linux-x64-gpu-"$VERSION" onnxruntime +else + echo "'$PLATFORM' is not supported" + exit 1 +fi + +export ONNXRUNTIME_DIR=$(pwd)/onnxruntime +echo "export ONNXRUNTIME_DIR=${ONNXRUNTIME_DIR}" >> ~/.bashrc +echo "export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH" >> ~/.bashrc diff --git a/.circleci/scripts/linux/install_python.sh b/.circleci/scripts/linux/install_python.sh new file mode 100644 index 000000000..dda31121e --- /dev/null +++ b/.circleci/scripts/linux/install_python.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +if [ $# -lt 1 ]; then + echo 'use python 3.8.5 as default' + PYTHON_VERSION=3.8.5 +else + PYTHON_VERSION=$1 +fi + +sudo apt-get update +# liblzma-dev need to be installed. Refer to https://github.com/pytorch/vision/issues/2921 +# python3-tk tk-dev is for 'import tkinter' +sudo apt-get install -y liblzma-dev python3-tk tk-dev +# python3+ need to be reinstalled due to https://github.com/pytorch/vision/issues/2921 +pyenv uninstall -f "$PYTHON_VERSION" +pyenv install "$PYTHON_VERSION" +pyenv global "$PYTHON_VERSION" diff --git a/.circleci/scripts/windows/install_onnxruntime.ps1 b/.circleci/scripts/windows/install_onnxruntime.ps1 new file mode 100644 index 000000000..f9ded89c5 --- /dev/null +++ b/.circleci/scripts/windows/install_onnxruntime.ps1 @@ -0,0 +1,19 @@ +if ($args.Count -lt 2) { + Write-Host "wrong command. usage: intall_onnxruntime.ps1 " + Exit 1 +} + +$platform = $args[0] +$version = $args[1] + +if ($platform -eq "cpu") { + python -m pip install onnxruntime==$version + Invoke-WebRequest -Uri https://github.com/microsoft/onnxruntime/releases/download/v$version/onnxruntime-win-x64-$version.zip -OutFile onnxruntime.zip + Expand-Archive onnxruntime.zip . + Move-Item onnxruntime-win-x64-$version onnxruntime +} elseif ($platform == "cuda") { + Write-Host "TODO: install onnxruntime-gpu" + Exit +} else { + Write-Host "'$platform' is not supported" +} diff --git a/.circleci/scripts/windows/install_opencv.ps1 b/.circleci/scripts/windows/install_opencv.ps1 new file mode 100644 index 000000000..2e5cae5f7 --- /dev/null +++ b/.circleci/scripts/windows/install_opencv.ps1 @@ -0,0 +1,3 @@ +Invoke-WebRequest -Uri https://download.openmmlab.com/mmdeploy/library/opencv-4.5.5.zip -OutFile opencv.zip +Expand-Archive opencv.zip . +Move-Item opencv-4.5.5 opencv diff --git a/.circleci/test.yml b/.circleci/test.yml new file mode 100644 index 000000000..d751ae6a6 --- /dev/null +++ b/.circleci/test.yml @@ -0,0 +1,313 @@ +# Use the latest 2.1 version of CircleCI pipeline process engine. +# See: https://circleci.com/docs/2.0/configuration-reference +version: 2.1 + +orbs: + win: circleci/windows@4.1 + +# the default pipeline parameters, which will be updated according to +# the results of the path-filtering orb +parameters: + lint_only: + type: boolean + default: true + +executors: + ubuntu-2004-cpu: + machine: + image: ubuntu-2004:202010-01 + resource_class: large + working_directory: ~ + ubuntu-2004-cu114: + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + docker_layer_caching: true + resource_class: gpu.nvidia.medium + working_directory: ~ + +# MMDeploy Rules +# - In the command section, each command is requested to be os platform independent. Any command related to OS platform should be put in `scripts` folder +# - Use `python` instead of `python3` since there is no `python3` on Windows platform +# - DO NOT use `\` to break the line, as it is not identified correctly on Windows platform. So just don't break the line :) +commands: + checkout_full: + description: "Checkout mmdeploy" + steps: + - checkout: + path: mmdeploy # relative to `working_directory` + - run: + name: Checkout submodule + command: | + cd mmdeploy + git submodule sync + git submodule update --init + upgrade_pip: + steps: + - run: + name: Upgrade pip + command: python -m pip install --upgrade pip + install_pytorch: + parameters: + platform: + type: string + default: cpu + torch: + type: string + default: 1.8.0 + torchvision: + type: string + default: 0.9.0 + steps: + - run: + name: Install PyTorch + command: | + python -m pip install torch==<< parameters.torch >>+<< parameters.platform >> torchvision==<< parameters.torchvision >>+<< parameters.platform >> -f https://download.pytorch.org/whl/torch_stable.html + install_mmcv_cpu: + parameters: + version: + type: string + default: 1.5.0 + torch: + type: string + default: 1.8.0 + steps: + - run: + name: Install mmcv-full + command: | + python -m pip install opencv-python==4.5.4.60 + python -m pip install mmcv-full==<< parameters.version >> -f https://download.openmmlab.com/mmcv/dist/cpu/torch<< parameters.torch >>/index.html + install_mmcv_cuda: + parameters: + version: + type: string + default: 1.5.0 + cuda: + type: string + default: cu111 + torch: + type: string + default: 1.8.0 + steps: + - run: + name: Install mmcv-full + command: | + python -m pip install opencv-python==4.5.4.60 + python -m pip install mmcv-full==<< parameters.version >> -f https://download.openmmlab.com/mmcv/dist/<< parameters.cuda >>/torch<< parameters.torch >>/index.html + install_mmdeploy: + description: "Install MMDeploy" + steps: + - run: + name: Install MMDeploy + command: | + cd mmdeploy + python -m pip install -v -e . + install_model_converter_req: + steps: + - run: + name: Install requirements + command: | + cd mmdeploy + python -m pip install -r requirements/codebases.txt + python -m pip install -r requirements/tests.txt + python -m pip install -r requirements/runtime.txt + python -m pip install -U numpy + cd .. + perform_model_converter_ut: + steps: + - run: + name: Perform Model Converter unittests + command: | + cd mmdeploy + coverage run --branch --source mmdeploy -m pytest -rsE tests + coverage xml + coverage report -m + cd .. +jobs: + lint: + # Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub. + # See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor + docker: + - image: cimg/python:3.7.4 + # Add steps to the job + # See: https://circleci.com/docs/2.0/configuration-reference/#steps + steps: + - checkout + - run: + name: Install pre-commit hook + command: | + pip install pre-commit + pre-commit install + - run: + name: Linting + command: pre-commit run --all-files + - run: + name: Check docstring coverage + command: | + pip install interrogate + interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 80 mmdeploy + + test_linux_tensorrt: + executor: ubuntu-2004-cu114 + steps: + - checkout_full + - run: + name: Build docker + command: | + docker build mmdeploy/.circleci/docker/ -t mmdeploy:gpu + - run: + name: Build MMDeploy + command: | + docker run --gpus all -t -d -v /home/circleci/project/:/project -w /project --name mmdeploy mmdeploy:gpu + docker exec mmdeploy bash mmdeploy/.circleci/scripts/linux/build.sh cuda trt -Dpplcv_DIR=/workspace/ppl.cv/cuda-build/install/lib/cmake/ppl + - run: + name: Install MMDeploy + # https://stackoverflow.com/questions/28037802/docker-exec-failed-cd-executable-file-not-found-in-path + command: | + docker exec -i mmdeploy bash -c "cd mmdeploy && pip install -v -e ." + - run: + name: Install requirements + command: | + docker exec mmdeploy pip install onnxruntime==1.8.1 + docker exec mmdeploy pip install -r mmdeploy/requirements/codebases.txt + docker exec mmdeploy pip install -r mmdeploy/requirements/tests.txt + docker exec mmdeploy pip install -r mmdeploy/requirements/runtime.txt + docker exec mmdeploy pip install -U numpy + - run: + name: Perform Model Converter unittests + command: | + docker exec -i mmdeploy bash -c "cd mmdeploy && coverage run --branch --source mmdeploy -m pytest -rsE tests && coverage xml && coverage report -m" + - run: + name: Run SDK unittests + command: | + docker exec mmdeploy mkdir -p mmdeploy_test_resources/transform + docker exec mmdeploy cp mmdeploy/demo/resources/human-pose.jpg mmdeploy_test_resources/transform + docker exec mmdeploy ./mmdeploy/build/bin/mmdeploy_tests + + test_windows_onnxruntime: + parameters: + version: + type: string + default: 1.8.1 + executor: + name: win/default + steps: + - checkout_full + - upgrade_pip + - install_pytorch + - install_mmcv_cpu + - run: + name: Install ONNX Runtime + command: mmdeploy/.circleci/scripts/windows/install_onnxruntime.ps1 cpu << parameters.version >> + - run: + name: Install OpenCV + command: mmdeploy/.circleci/scripts/windows/install_opencv.ps1 + - run: + name: Build MMDeploy + command: | + $env:path = "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin;" + $env:path + $env:ONNXRUNTIME_DIR = "$pwd\onnxruntime" + $env:OPENCV_PACKAGE_DIR = "$(pwd)\opencv" + $env:MMDEPLOY_DIR = "$(pwd)\mmdeploy" + cd mmdeploy + mkdir build -ErrorAction SilentlyContinue + cd build + cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 ` + -DCMAKE_SYSTEM_VERSION="10.0.18362.0" ` + -DMMDEPLOY_BUILD_SDK=ON ` + -DMMDEPLOY_BUILD_TEST=ON ` + -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON ` + -DMMDEPLOY_BUILD_SDK_CXX_API=ON ` + -DMMDEPLOY_BUILD_SDK_CSHARP_API=ON ` + -DMMDEPLOY_TARGET_BACKENDS="ort" ` + -DOpenCV_DIR="$env:OPENCV_PACKAGE_DIR" + cmake --build . --config Release -- /m + cmake --install . --config Release + cd install/example + mkdir build -ErrorAction SilentlyContinue + cd build + cmake ../cpp -G "Visual Studio 16 2019" -A x64 -T v142 ` + -DMMDeploy_DIR="$env:MMDEPLOY_DIR/build/install/lib/cmake/MMDeploy" ` + -DOpenCV_DIR="$env:OPENCV_PACKAGE_DIR" + cmake --build . --config Release -- /m + - install_mmdeploy + - install_model_converter_req + - perform_model_converter_ut + - run: + name: Perform SDK Unittests + command: | + $env:path = "$(pwd)\onnxruntime\lib;" + $env:path + $env:path = "$(pwd)\opencv\x64\vc16\bin;" + $env:path + mkdir mmdeploy_test_resources\transform + cp .\mmdeploy\demo\resources\human-pose.jpg mmdeploy_test_resources\transform + .\mmdeploy\build\bin\Release\mmdeploy_tests.exe + + test_linux_onnxruntime: + parameters: + version: + type: string + default: 1.8.1 + executor: ubuntu-2004-cpu + steps: + - checkout_full + - run: + name: Re-install Python + command: bash mmdeploy/.circleci/scripts/linux/install_python.sh + - upgrade_pip + - install_pytorch + - install_mmcv_cpu + - run: + name: Install ONNX Runtime + command: bash mmdeploy/.circleci/scripts/linux/install_onnxruntime.sh cpu << parameters.version >> + - run: + name: Build MMDeploy + command: | + sudo apt-get update + sudo apt-get install libopencv-dev libpython3.8 python3.8-dev + bash mmdeploy/.circleci/scripts/linux/build.sh cpu ort + - install_mmdeploy + - install_model_converter_req + - perform_model_converter_ut + - run: + name: Perform SDK unittests + command: | + mkdir -p mmdeploy_test_resources/transform + cp -rf ./mmdeploy/demo/resources/human-pose.jpg mmdeploy_test_resources/transform + ./mmdeploy/build/bin/mmdeploy_tests + - run: + name: Convert model + command: | + bash mmdeploy/.circleci/scripts/linux/convert_onnxruntime.sh mmcls mmdeploy-models/mmcls/onnxruntime + - run: + name: Inference model by SDK + command: | + mmdeploy/build/install/example/build/image_classification cpu mmdeploy-models/mmcls/onnxruntime mmclassification/demo/demo.JPEG + + +# See: https://circleci.com/docs/2.0/configuration-reference/#workflows +workflows: + pr_stage_lint: + when: << pipeline.parameters.lint_only >> + jobs: + - lint + pr_stage_test: + when: + not: + << pipeline.parameters.lint_only >> + jobs: + - lint + - test_linux_onnxruntime: + version: 1.8.1 + requires: + - lint + - test_windows_onnxruntime: + version: 1.8.1 + requires: + - lint + - hold: + type: approval + requires: + - test_linux_onnxruntime + - test_windows_onnxruntime + - test_linux_tensorrt: + requires: + - hold diff --git a/.github/scripts/test_java_demo.py b/.github/scripts/test_java_demo.py new file mode 100644 index 000000000..432cfd4f7 --- /dev/null +++ b/.github/scripts/test_java_demo.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import os + +# list of dict: task name and deploy configs. + +PARAMS = [ + { + 'task': + 'ImageClassification', + 'configs': [ + 'https://media.githubusercontent.com/media/hanrui1sensetime/mmdeploy-javaapi-testdata/master/resnet.tar' # noqa: E501 + ] + }, + { + 'task': + 'ObjectDetection', + 'configs': [ + 'https://media.githubusercontent.com/media/hanrui1sensetime/mmdeploy-javaapi-testdata/master/mobilessd.tar' # noqa: E501 + ] + }, + { + 'task': + 'ImageSegmentation', + 'configs': [ + 'https://media.githubusercontent.com/media/hanrui1sensetime/mmdeploy-javaapi-testdata/master/fcn.tar' # noqa: E501 + ] + }, + { + 'task': + 'ImageRestorer', + 'configs': [ + 'https://media.githubusercontent.com/media/hanrui1sensetime/mmdeploy-javaapi-testdata/master/srcnn.tar' # noqa: E501 + ] + }, + { + 'task': + 'Ocr', + 'configs': [ + 'https://media.githubusercontent.com/media/hanrui1sensetime/mmdeploy-javaapi-testdata/master/dbnet.tar', # noqa: E501 + 'https://media.githubusercontent.com/media/hanrui1sensetime/mmdeploy-javaapi-testdata/master/crnn.tar' # noqa: E501 + ] + }, + { + 'task': + 'PoseDetection', + 'configs': [ + 'https://media.githubusercontent.com/media/hanrui1sensetime/mmdeploy-javaapi-testdata/master/litehrnet.tar' # noqa: E501 + ] + } +] + + +def main(): + """test java apis and demos. + + Run all java demos for test. + """ + + for params in PARAMS: + task = params['task'] + configs = params['configs'] + java_demo_cmd = [ + 'java', '-cp', 'csrc/mmdeploy/apis/java:demo/java', + 'demo/java/' + task + '.java', 'cpu' + ] + for config in configs: + model_url = config + os.system('wget {} && tar xvf {}'.format(model_url, + model_url.split('/')[-1])) + model_dir = model_url.split('/')[-1].split('.')[0] + java_demo_cmd.append(model_dir) + java_demo_cmd.append('/home/runner/work/mmdeploy/mmdeploy/demo' + + '/resources/human-pose.jpg') + java_demo_cmd_str = ' '.join(java_demo_cmd) + os.system('export JAVA_HOME=/home/runner/work/mmdeploy/mmdeploy/' + + 'jdk-18 && export PATH=${JAVA_HOME}/bin:${PATH} && java' + + ' --version && export LD_LIBRARY_PATH=/home/runner/work/' + + 'mmdeploy/mmdeploy/build/lib:${LD_LIBRARY_PATH} && ' + + java_demo_cmd_str) + + +if __name__ == '__main__': + main() diff --git a/.github/scripts/test_onnx2ncnn.py b/.github/scripts/test_onnx2ncnn.py index 539304534..bdc50f315 100644 --- a/.github/scripts/test_onnx2ncnn.py +++ b/.github/scripts/test_onnx2ncnn.py @@ -33,7 +33,8 @@ CONFIGS = [ def parse_args(): parser = argparse.ArgumentParser( description='MMDeploy onnx2ncnn test tool.') - parser.add_argument('--run', type=bool, help='Execute onnx2ncnn bin.') + parser.add_argument( + '--run', type=bool, help='Execute mmdeploy_onnx2ncnn bin.') parser.add_argument( '--repo-dir', type=str, default='~/', help='mmcls directory.') parser.add_argument( @@ -77,14 +78,16 @@ def run(args): # show processbar os.system(' '.join(download_cmd)) - convert_cmd = ['./onnx2ncnn', filename, 'onnx.param', 'onnx.bin'] + convert_cmd = [ + './mmdeploy_onnx2ncnn', filename, 'onnx.param', 'onnx.bin' + ] subprocess.run(convert_cmd, capture_output=True, check=True) def main(): """test `onnx2ncnn.cpp` - First generate onnx model then convert it with `onnx2ncnn`. + First generate onnx model then convert it with `mmdeploy_onnx2ncnn`. """ args = parse_args() if args.generate_onnx: diff --git a/.github/workflows/backend-ncnn.yml b/.github/workflows/backend-ncnn.yml index d00596358..1a387e3fe 100644 --- a/.github/workflows/backend-ncnn.yml +++ b/.github/workflows/backend-ncnn.yml @@ -1,4 +1,4 @@ -name: backend +name: backend-ncnn on: push: @@ -23,7 +23,6 @@ jobs: matrix: python-version: [3.7] torch: [1.9.0] - mmcv: [1.4.2] include: - torch: 1.9.0 torch_version: torch1.9 @@ -59,10 +58,10 @@ jobs: mkdir -p build && pushd build export LD_LIBRARY_PATH=/home/runner/work/mmdeploy/mmdeploy/ncnn-20220420/install/lib/:$LD_LIBRARY_PATH cmake -DMMDEPLOY_TARGET_BACKENDS=ncnn -Dncnn_DIR=/home/runner/work/mmdeploy/mmdeploy/ncnn-20220420/install/lib/cmake/ncnn/ .. - make onnx2ncnn -j2 + make mmdeploy_onnx2ncnn -j2 popd - name: Test onnx2ncnn run: | echo $(pwd) - ln -s build/bin/onnx2ncnn ./ + ln -s build/bin/mmdeploy_onnx2ncnn ./ python3 .github/scripts/test_onnx2ncnn.py --run 1 diff --git a/.github/workflows/backend-snpe.yml b/.github/workflows/backend-snpe.yml new file mode 100644 index 000000000..1391bf498 --- /dev/null +++ b/.github/workflows/backend-snpe.yml @@ -0,0 +1,60 @@ +name: backend-snpe + +on: + push: + paths-ignore: + - "demo/**" + - "tools/**" + + pull_request: + paths-ignore: + - "demo/**" + - "tools/**" + - "docs/**" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_sdk_demo: + runs-on: ubuntu-18.04 + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: update + run: sudo apt update + - name: Install dependencies + run: | + sudo apt install wget libprotobuf-dev protobuf-compiler + sudo apt update + sudo apt install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev libc++1-9 libc++abi1-9 + sudo add-apt-repository ppa:ignaciovizzo/opencv3-nonfree + sudo apt install libopencv-dev + pkg-config --libs opencv + - name: Install snpe + run: | + wget https://media.githubusercontent.com/media/tpoisonooo/mmdeploy_snpe_testdata/main/snpe-1.59.tar.gz + tar xf snpe-1.59.tar.gz + pushd snpe-1.59.0.3230 + pwd + popd + - name: Build SDK Demo with SNPE backend + run: | + mkdir -p build && pushd build + export SNPE_ROOT=/home/runner/work/mmdeploy/mmdeploy/snpe-1.59.0.3230 + export LD_LIBRARY_PATH=${SNPE_ROOT}/lib/x86_64-linux-clang:${LD_LIBRARY_PATH} + export MMDEPLOY_SNPE_X86_CI=1 + cmake .. -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_SHARED_LIBS=ON -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_BUILD_SDK_PYTHON_API=OFF -DMMDEPLOY_TARGET_DEVICES=cpu -DMMDEPLOY_TARGET_BACKENDS=snpe -DMMDEPLOY_CODEBASES=all + make -j2 + make install + pushd install/example + mkdir build && pushd build + cmake ../cpp -DMMDeploy_DIR=${PWD}/../../lib/cmake/MMDeploy + make -j2 + ls ./* + popd + popd + popd diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3bb0fa915..216befb86 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,7 +21,7 @@ concurrency: cancel-in-progress: true jobs: - build_cpu: + build_cpu_model_convert: runs-on: ubuntu-18.04 strategy: matrix: @@ -53,12 +53,41 @@ jobs: pip install -U numpy - name: Build and install run: rm -rf .eggs && pip install -e . - - name: Run unittests and generate coverage report + - name: Run python unittests and generate coverage report run: | coverage run --branch --source mmdeploy -m pytest -rsE tests coverage xml coverage report -m + build_cpu_sdk: + runs-on: ubuntu-18.04 + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: update + run: sudo apt update + - name: gcc-multilib + run: | + sudo apt install gcc-multilib g++-multilib wget libprotobuf-dev protobuf-compiler + sudo apt update + sudo apt install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libxrender-dev libc++1-9 libc++abi1-9 + sudo add-apt-repository ppa:ignaciovizzo/opencv3-nonfree + sudo apt install libopencv-dev lcov wget + pkg-config --libs opencv + - name: Build and run SDK unit test without backend + run: | + mkdir -p build && pushd build + cmake .. -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_CODEBASES=all -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_BUILD_SDK_PYTHON_API=OFF -DMMDEPLOY_TARGET_DEVICES=cpu -DMMDEPLOY_COVERAGE=ON -DMMDEPLOY_BUILD_TEST=ON + make -j2 + mkdir -p mmdeploy_test_resources/transform + cp ../tests/data/tiger.jpeg mmdeploy_test_resources/transform/ + ./bin/mmdeploy_tests + lcov --capture --directory . --output-file coverage.info + ls -lah coverage.info + cp coverage.info ../ + build_cuda102: runs-on: ubuntu-18.04 container: @@ -153,8 +182,8 @@ jobs: - name: Upload coverage to Codecov uses: codecov/codecov-action@v2 with: - file: ./coverage.xml + file: ./coverage.xml,./coverage.info flags: unittests - env_vars: OS,PYTHON + env_vars: OS,PYTHON,CPLUS name: codecov-umbrella fail_ci_if_error: false diff --git a/.github/workflows/java_api.yml b/.github/workflows/java_api.yml new file mode 100644 index 000000000..c2e5b9317 --- /dev/null +++ b/.github/workflows/java_api.yml @@ -0,0 +1,72 @@ +name: java_api + +on: + push: + paths-ignore: + - "tools/**" + + pull_request: + paths-ignore: + - "tools/**" + - "docs/**" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + test_java_api: + runs-on: ubuntu-18.04 + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: Set up Python 3.7 + uses: actions/setup-python@v2 + with: + python-version: 3.7 + - name: Install unittest dependencies + run: | + pip install cmake onnx + - name: update + run: sudo apt update + - name: Install OpenJDK + run: | + wget https://download.java.net/java/GA/jdk18/43f95e8614114aeaa8e8a5fcf20a682d/36/GPL/openjdk-18_linux-x64_bin.tar.gz + tar xvf openjdk-18_linux-x64_bin.tar.gz + - name: gcc-multilib + run: sudo apt install gcc-multilib g++-multilib wget libprotobuf-dev protobuf-compiler + - name: Install onnxruntime + run: | + wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz + tar -zxvf onnxruntime-linux-x64-1.8.1.tgz + pushd onnxruntime-linux-x64-1.8.1 + export ONNXRUNTIME_DIR=${PWD} + export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH + popd + - name: Install opencv + run: | + sudo apt-get install libopencv-dev + - name: Build java class + run: | + pushd csrc/mmdeploy/apis/java + javac mmdeploy/*.java + popd + pushd demo/java + javac -classpath ../../csrc/mmdeploy/apis/java/ Utils.java + popd + - name: Install mmdeploy with onnxruntime backend and java api + run: | + mkdir -p build && pushd build + export LD_LIBRARY_PATH=/home/runner/work/mmdeploy/mmdeploy/ncnn/install/lib/:$LD_LIBRARY_PATH + cmake -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_BUILD_SDK_JAVA_API=ON -DMMDEPLOY_TARGET_BACKENDS=ort -DMMDEPLOY_CODEBASES=all -DONNXRUNTIME_DIR=~/work/mmdeploy/mmdeploy/onnxruntime-linux-x64-1.8.1 .. + make install + popd + - name: Test javademo + run: | + export JAVA_HOME=${PWD}/jdk-18 + export PATH=${JAVA_HOME}/bin:${PATH} + export LD_LIBRARY_PATH=/build/lib:${LD_LIBRARY_PATH} + java --version + python3 .github/scripts/test_java_demo.py diff --git a/.gitignore b/.gitignore index 324f44bd2..7dea9d9ea 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,10 @@ __pycache__/ # C extensions *.so onnx2ncnn +mmdeploy_onnx2ncnn + +# Java classes +*.class # Distribution / packaging .Python @@ -148,3 +152,7 @@ bin/ mmdeploy/backend/ncnn/onnx2ncnn /mmdeploy-* + +# snpe +grpc-cpp-plugin +service/snpe/grpc_cpp_plugin diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0bb388f28..236e01a0c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,6 +3,7 @@ repos: rev: 4.0.1 hooks: - id: flake8 + args: ["--exclude=*/client/inference_pb2.py,*/client/inference_pb2_grpc.py"] - repo: https://github.com/PyCQA/isort rev: 5.10.1 hooks: diff --git a/CMakeLists.txt b/CMakeLists.txt index 1a79e7941..cfde11ec0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,10 +5,13 @@ endif () message(STATUS "CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}") cmake_minimum_required(VERSION 3.14) -project(MMDeploy VERSION 0.5.0) +project(MMDeploy VERSION 0.7.0) set(CMAKE_CXX_STANDARD 17) +set(MMDEPLOY_VERSION_MAJOR ${PROJECT_VERSION_MAJOR}) +set(MMDEPLOY_VERSION_MINOR ${PROJECT_VERSION_MINOR}) +set(MMDEPLOY_VERSION_PATCH ${PROJECT_VERSION_PATCH}) set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) if (MSVC) @@ -21,12 +24,17 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) # options option(MMDEPLOY_SHARED_LIBS "build shared libs" ON) option(MMDEPLOY_BUILD_SDK "build MMDeploy SDK" OFF) +option(MMDEPLOY_BUILD_SDK_MONOLITHIC "build single lib for SDK API" OFF) option(MMDEPLOY_BUILD_TEST "build unittests" OFF) option(MMDEPLOY_BUILD_SDK_PYTHON_API "build SDK Python API" OFF) option(MMDEPLOY_BUILD_SDK_CXX_API "build SDK C++ API" OFF) option(MMDEPLOY_BUILD_SDK_CSHARP_API "build SDK C# API support" OFF) +option(MMDEPLOY_BUILD_SDK_JAVA_API "build SDK JAVA API" OFF) +option(MMDEPLOY_BUILD_EXAMPLES "build examples" OFF) option(MMDEPLOY_SPDLOG_EXTERNAL "use external spdlog" OFF) option(MMDEPLOY_ZIP_MODEL "support SDK model in zip format" OFF) +option(MMDEPLOY_COVERAGE "build SDK for coverage" OFF) + set(MMDEPLOY_TARGET_DEVICES "cpu" CACHE STRING "target devices to support") set(MMDEPLOY_TARGET_BACKENDS "" CACHE STRING "target inference engines to support") set(MMDEPLOY_CODEBASES "all" CACHE STRING "select OpenMMLab codebases") @@ -43,6 +51,11 @@ endif () set(MMDEPLOY_TASKS "" CACHE INTERNAL "") +if (MMDEPLOY_COVERAGE) + add_compile_options(-coverage -fprofile-arcs -ftest-coverage) + add_link_options(-coverage -lgcov) +endif () + # when CUDA devices are enabled, the environment variable ASAN_OPTIONS=protect_shadow_gap=0 # must be set at runtime if (MMDEPLOY_ASAN_ENABLE) @@ -92,6 +105,15 @@ if (MMDEPLOY_BUILD_SDK) add_subdirectory(csrc/mmdeploy/apis/python) endif () + if (MMDEPLOY_BUILD_SDK_JAVA_API) + add_subdirectory(csrc/mmdeploy/apis/java) + endif () + + if (MMDEPLOY_BUILD_EXAMPLES) + include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) + add_subdirectory(demo/csrc) + endif () + # export MMDeploy package install(EXPORT MMDeployTargets FILE MMDeployTargets.cmake @@ -105,7 +127,10 @@ if (MMDEPLOY_BUILD_SDK) mmdeploy_add_deps(ort BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS ONNXRUNTIME) mmdeploy_add_deps(ncnn BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS ncnn) mmdeploy_add_deps(openvino BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS InferenceEngine) - mmdeploy_add_deps(pplnn BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS pplnn) + if (NOT MMDEPLOY_SHARED_LIBS) + mmdeploy_add_deps(pplnn BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS pplnn) + endif () + mmdeploy_add_deps(snpe BACKENDS ${MMDEPLOY_TARGET_BACKENDS} DEPS snpe) include(CMakePackageConfigHelpers) # generate the config file that is includes the exports @@ -141,8 +166,6 @@ if (MMDEPLOY_BUILD_SDK) DESTINATION lib/cmake/MMDeploy ) - install(DIRECTORY ${CMAKE_SOURCE_DIR}/demo/csrc/ DESTINATION example) - if (${CMAKE_VERSION} VERSION_LESS "3.17.0") install(SCRIPT cmake/post-install.cmake) endif () diff --git a/MANIFEST.in b/MANIFEST.in index 7c85a3240..f3427de2f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,3 +5,6 @@ include mmdeploy/backend/ncnn/*.pyd include mmdeploy/lib/*.so include mmdeploy/lib/*.dll include mmdeploy/lib/*.pyd +include mmdeploy/backend/torchscript/*.so +include mmdeploy/backend/torchscript/*.dll +include mmdeploy/backend/torchscript/*.pyd diff --git a/README.md b/README.md index fcb00fb0a..ab966e8be 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@
OpenMMLab website - + HOT @@ -55,9 +55,9 @@ The currently supported codebases and models are as follows, and more will be in Models can be exported and run in the following backends, and more will be compatible -| ONNX Runtime | TensorRT | ppl.nn | ncnn | OpenVINO | LibTorch | more | -| ------------ | -------- | ------ | ---- | -------- | -------- | ---------------------------------------------- | -| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | [benchmark](docs/en/03-benchmark/benchmark.md) | +| ONNX Runtime | TensorRT | ppl.nn | ncnn | OpenVINO | LibTorch | snpe | more | +| ------------ | -------- | ------ | ---- | -------- | -------- | ---- | ---------------------------------------------- | +| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | [benchmark](docs/en/03-benchmark/benchmark.md) | ### Efficient and scalable C/C++ SDK Framework @@ -73,6 +73,7 @@ Please read [getting_started.md](docs/en/get_started.md) for the basic usage of - [Build for Win10](docs/en/01-how-to-build/windows.md) - [Build for Android](docs/en/01-how-to-build/android.md) - [Build for Jetson](docs/en/01-how-to-build/jetsons.md) + - [Build for SNPE](docs/en/01-how-to-build/snpe.md) - User Guide - [How to convert model](docs/en/02-how-to-run/convert_model.md) - [How to write config](docs/en/02-how-to-run/write_config.md) diff --git a/README_zh-CN.md b/README_zh-CN.md index 5718b4763..e77b4da14 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -53,9 +53,9 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为 ### 支持多种推理后端 -| ONNX Runtime | TensorRT | ppl.nn | ncnn | OpenVINO | more | -| ------------ | -------- | ------ | ---- | -------- | ------------------------------------------------- | -| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | [benchmark](docs/zh_cn/03-benchmark/benchmark.md) | +| ONNX Runtime | TensorRT | ppl.nn | ncnn | OpenVINO | LibTorch | snpe | more | +| ------------ | -------- | ------ | ---- | -------- | -------- | ---- | ------------------------------------------------- | +| ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | ✔️ | [benchmark](docs/zh_cn/03-benchmark/benchmark.md) | ### SDK 可高度定制化 @@ -71,6 +71,7 @@ MMDeploy 是 [OpenMMLab](https://openmmlab.com/) 模型部署工具箱,**为 - [Build for Win10](docs/zh_cn/01-how-to-build/windows.md) - [Build for Android](docs/zh_cn/01-how-to-build/android.md) - [Build for Jetson](docs/en/01-how-to-build/jetsons.md) + - [Build for SNPE](docs/zh_cn/01-how-to-build/snpe.md) - 使用 - [把模型转换到推理 Backend](docs/zh_cn/02-how-to-run/convert_model.md) - [配置转换参数](docs/zh_cn/02-how-to-run/write_config.md) diff --git a/cmake/MMDeploy.cmake b/cmake/MMDeploy.cmake index 0983dc1f0..bd982e183 100644 --- a/cmake/MMDeploy.cmake +++ b/cmake/MMDeploy.cmake @@ -126,10 +126,16 @@ function (mmdeploy_load_static NAME) target_link_libraries(${NAME} PRIVATE ${ARGN}) else () _mmdeploy_flatten_modules(_MODULE_LIST ${ARGN}) - target_link_libraries(${NAME} PRIVATE - -Wl,--whole-archive - ${_MODULE_LIST} - -Wl,--no-whole-archive) + if (APPLE) + foreach (module IN LISTS _MODULE_LIST) + target_link_libraries(${NAME} PRIVATE -force_load ${module}) + endforeach () + else () + target_link_libraries(${NAME} PRIVATE + -Wl,--whole-archive + ${_MODULE_LIST} + -Wl,--no-whole-archive) + endif () endif () endfunction () @@ -158,6 +164,8 @@ function (mmdeploy_load_dynamic NAME) mmdeploy_add_module(${_LOADER_NAME} STATIC EXCLUDE ${_LOADER_PATH}) mmdeploy_load_static(${NAME} ${_LOADER_NAME}) + elseif (APPLE) + target_link_libraries(${NAME} PRIVATE ${_MODULE_LIST}) else () target_link_libraries(${NAME} PRIVATE -Wl,--no-as-needed diff --git a/cmake/MMDeployConfig.cmake.in b/cmake/MMDeployConfig.cmake.in index 23e8e87cc..23e728f7a 100644 --- a/cmake/MMDeployConfig.cmake.in +++ b/cmake/MMDeployConfig.cmake.in @@ -10,6 +10,11 @@ set(MMDEPLOY_TARGET_DEVICES @MMDEPLOY_TARGET_DEVICES@) set(MMDEPLOY_TARGET_BACKENDS @MMDEPLOY_TARGET_BACKENDS@) set(MMDEPLOY_BUILD_TYPE @CMAKE_BUILD_TYPE@) set(MMDEPLOY_BUILD_SHARED @MMDEPLOY_SHARED_LIBS@) +set(MMDEPLOY_BUILD_SDK_CXX_API @MMDEPLOY_BUILD_SDK_CXX_API@) +set(MMDEPLOY_BUILD_SDK_MONOLITHIC @MMDEPLOY_BUILD_SDK_MONOLITHIC@) +set(MMDEPLOY_VERSION_MAJOR @MMDEPLOY_VERSION_MAJOR@) +set(MMDEPLOY_VERSION_MINOR @MMDEPLOY_VERSION_MINOR@) +set(MMDEPLOY_VERSION_PATCH @MMDEPLOY_VERSION_PATCH@) if (NOT MMDEPLOY_BUILD_SHARED) if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) diff --git a/configs/_base_/backends/snpe.py b/configs/_base_/backends/snpe.py new file mode 100644 index 000000000..a96bee993 --- /dev/null +++ b/configs/_base_/backends/snpe.py @@ -0,0 +1 @@ +backend_config = dict(type='snpe') diff --git a/configs/_base_/onnx_config.py b/configs/_base_/onnx_config.py index bf48e7ab7..43621b12b 100644 --- a/configs/_base_/onnx_config.py +++ b/configs/_base_/onnx_config.py @@ -6,4 +6,5 @@ onnx_config = dict( save_file='end2end.onnx', input_names=['input'], output_names=['output'], - input_shape=None) + input_shape=None, + optimize=True) diff --git a/configs/mmcls/classification_snpe_static.py b/configs/mmcls/classification_snpe_static.py new file mode 100644 index 000000000..f80140a3a --- /dev/null +++ b/configs/mmcls/classification_snpe_static.py @@ -0,0 +1,3 @@ +_base_ = ['./classification_static.py', '../_base_/backends/snpe.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmcls/classification_tensorrt-fp16_static-384x384.py b/configs/mmcls/classification_tensorrt-fp16_static-384x384.py new file mode 100644 index 000000000..307aede9c --- /dev/null +++ b/configs/mmcls/classification_tensorrt-fp16_static-384x384.py @@ -0,0 +1,13 @@ +_base_ = ['./classification_static.py', '../_base_/backends/tensorrt-fp16.py'] + +onnx_config = dict(input_shape=[384, 384]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 384, 384], + opt_shape=[1, 3, 384, 384], + max_shape=[1, 3, 384, 384]))) + ]) diff --git a/configs/mmdet/detection/detection_tensorrt-fp16_dynamic-160x160-608x608.py b/configs/mmdet/detection/detection_tensorrt-fp16_dynamic-64x64-608x608.py similarity index 88% rename from configs/mmdet/detection/detection_tensorrt-fp16_dynamic-160x160-608x608.py rename to configs/mmdet/detection/detection_tensorrt-fp16_dynamic-64x64-608x608.py index 59c5e125b..7e48c6695 100644 --- a/configs/mmdet/detection/detection_tensorrt-fp16_dynamic-160x160-608x608.py +++ b/configs/mmdet/detection/detection_tensorrt-fp16_dynamic-64x64-608x608.py @@ -8,7 +8,7 @@ backend_config = dict( dict( input_shapes=dict( input=dict( - min_shape=[1, 3, 160, 160], + min_shape=[1, 3, 64, 64], opt_shape=[1, 3, 608, 608], max_shape=[1, 3, 608, 608]))) ]) diff --git a/configs/mmdet/detection/detection_tensorrt-int8_dynamic-160x160-608x608.py b/configs/mmdet/detection/detection_tensorrt-int8_dynamic-64x64-608x608.py similarity index 88% rename from configs/mmdet/detection/detection_tensorrt-int8_dynamic-160x160-608x608.py rename to configs/mmdet/detection/detection_tensorrt-int8_dynamic-64x64-608x608.py index 5a2fa9469..2a2e633df 100644 --- a/configs/mmdet/detection/detection_tensorrt-int8_dynamic-160x160-608x608.py +++ b/configs/mmdet/detection/detection_tensorrt-int8_dynamic-64x64-608x608.py @@ -8,7 +8,7 @@ backend_config = dict( dict( input_shapes=dict( input=dict( - min_shape=[1, 3, 160, 160], + min_shape=[1, 3, 64, 64], opt_shape=[1, 3, 608, 608], max_shape=[1, 3, 608, 608]))) ]) diff --git a/configs/mmdet/detection/detection_tensorrt_dynamic-160x160-608x608.py b/configs/mmdet/detection/detection_tensorrt_dynamic-64x64-608x608.py similarity index 87% rename from configs/mmdet/detection/detection_tensorrt_dynamic-160x160-608x608.py rename to configs/mmdet/detection/detection_tensorrt_dynamic-64x64-608x608.py index b23547e0b..d2655314f 100644 --- a/configs/mmdet/detection/detection_tensorrt_dynamic-160x160-608x608.py +++ b/configs/mmdet/detection/detection_tensorrt_dynamic-64x64-608x608.py @@ -6,7 +6,7 @@ backend_config = dict( dict( input_shapes=dict( input=dict( - min_shape=[1, 3, 160, 160], + min_shape=[1, 3, 64, 64], opt_shape=[1, 3, 608, 608], max_shape=[1, 3, 608, 608]))) ]) diff --git a/configs/mmdet/detection/yolov3_partition_onnxruntime_static.py b/configs/mmdet/detection/yolov3_partition_onnxruntime_static.py new file mode 100644 index 000000000..20e10a256 --- /dev/null +++ b/configs/mmdet/detection/yolov3_partition_onnxruntime_static.py @@ -0,0 +1,12 @@ +_base_ = ['./detection_onnxruntime_static.py'] + +onnx_config = dict(input_shape=[608, 608]) +partition_config = dict( + type='yolov3_partition', + apply_marks=True, + partition_cfg=[ + dict( + save_file='yolov3.onnx', + start=['detector_forward:input'], + end=['yolo_head:input']) + ]) diff --git a/configs/mmedit/super-resolution/super-resolution_snpe_static-256x256.py b/configs/mmedit/super-resolution/super-resolution_snpe_static-256x256.py new file mode 100644 index 000000000..2d1291646 --- /dev/null +++ b/configs/mmedit/super-resolution/super-resolution_snpe_static-256x256.py @@ -0,0 +1,2 @@ +_base_ = ['./super-resolution_static.py', '../../_base_/backends/snpe.py'] +onnx_config = dict(input_shape=[256, 256]) diff --git a/configs/mmocr/text-detection/text-detection_snpe_static.py b/configs/mmocr/text-detection/text-detection_snpe_static.py new file mode 100644 index 000000000..a47ef9464 --- /dev/null +++ b/configs/mmocr/text-detection/text-detection_snpe_static.py @@ -0,0 +1,3 @@ +_base_ = ['./text-detection_static.py', '../../_base_/backends/snpe.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmpose/pose-detection_snpe_static-256x256.py b/configs/mmpose/pose-detection_snpe_static-256x256.py new file mode 100644 index 000000000..4b2e6791d --- /dev/null +++ b/configs/mmpose/pose-detection_snpe_static-256x256.py @@ -0,0 +1,3 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/snpe.py'] + +onnx_config = dict(input_shape=[256, 256]) diff --git a/configs/mmpose/pose-detection_tensorrt_dynamic-256x192.py b/configs/mmpose/pose-detection_tensorrt_dynamic-256x192.py new file mode 100644 index 000000000..c230992d9 --- /dev/null +++ b/configs/mmpose/pose-detection_tensorrt_dynamic-256x192.py @@ -0,0 +1,23 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/tensorrt.py'] + +onnx_config = dict( + input_shape=[192, 256], + dynamic_axes={ + 'input': { + 0: 'batch', + }, + 'output': { + 0: 'batch' + } + }) + +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 256, 192], + opt_shape=[2, 3, 256, 192], + max_shape=[4, 3, 256, 192]))) + ]) diff --git a/configs/mmrotate/rotated-detection_static.py b/configs/mmrotate/rotated-detection_static.py index 324de6f7f..b696260e2 100644 --- a/configs/mmrotate/rotated-detection_static.py +++ b/configs/mmrotate/rotated-detection_static.py @@ -6,4 +6,5 @@ codebase_config = dict( score_threshold=0.05, iou_threshold=0.1, pre_top_k=3000, - keep_top_k=2000)) + keep_top_k=2000, + max_output_boxes_per_class=2000)) diff --git a/configs/mmseg/segmentation_snpe_static-512x1024.py b/configs/mmseg/segmentation_snpe_static-512x1024.py new file mode 100644 index 000000000..7def73ce7 --- /dev/null +++ b/configs/mmseg/segmentation_snpe_static-512x1024.py @@ -0,0 +1,3 @@ +_base_ = ['./segmentation_static.py', '../_base_/backends/snpe.py'] + +onnx_config = dict(input_shape=[1024, 512]) diff --git a/csrc/mmdeploy/CMakeLists.txt b/csrc/mmdeploy/CMakeLists.txt index 3bbc50d99..6bf3524f7 100644 --- a/csrc/mmdeploy/CMakeLists.txt +++ b/csrc/mmdeploy/CMakeLists.txt @@ -16,5 +16,6 @@ if (MMDEPLOY_BUILD_SDK) add_subdirectory(preprocess) add_subdirectory(net) add_subdirectory(codebase) - add_subdirectory(apis/c) + add_subdirectory(apis/c/mmdeploy) + add_subdirectory(apis/cxx) endif () diff --git a/csrc/mmdeploy/apis/c/CMakeLists.txt b/csrc/mmdeploy/apis/c/CMakeLists.txt deleted file mode 100644 index 0b3b8e3c9..000000000 --- a/csrc/mmdeploy/apis/c/CMakeLists.txt +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -project(capis) - -set(COMMON_LIST - common - model - executor - pipeline) - -set(TASK_LIST ${MMDEPLOY_TASKS}) - -foreach (TASK ${COMMON_LIST}) - set(TARGET_NAME mmdeploy_${TASK}) - mmdeploy_add_library(${TARGET_NAME} ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.cpp) - target_link_libraries(${TARGET_NAME} PRIVATE mmdeploy::core) - target_include_directories(${TARGET_NAME} PUBLIC - $) - install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.h - DESTINATION include/mmdeploy/apis/c) -endforeach () - -target_link_libraries(mmdeploy_executor PUBLIC - mmdeploy_common) -target_link_libraries(mmdeploy_pipeline PUBLIC - mmdeploy_executor mmdeploy_model mmdeploy_common) - -foreach (TASK ${TASK_LIST}) - set(TARGET_NAME mmdeploy_${TASK}) - mmdeploy_add_library(${TARGET_NAME} ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.cpp) - - target_link_libraries(${TARGET_NAME} PRIVATE - mmdeploy_pipeline mmdeploy::core) - target_include_directories(${TARGET_NAME} PUBLIC - $) - install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.h - DESTINATION include/mmdeploy/apis/c) -endforeach () - -if (MMDEPLOY_BUILD_SDK_CSHARP_API) - # build MMDeployExtern.dll just for csharp nuget package. - # no Installation for c/c++ package. - file(GLOB SRCS "*.c" "*.cpp") - add_library(MMDeployExtern SHARED ${SRCS}) - target_compile_definitions(MMDeployExtern PRIVATE -DMMDEPLOY_API_EXPORTS=1) - mmdeploy_load_static(MMDeployExtern MMDeployStaticModules) - mmdeploy_load_dynamic(MMDeployExtern MMDeployDynamicModules) - target_link_libraries(MMDeployExtern PRIVATE MMDeployLibs) -endif () diff --git a/csrc/mmdeploy/apis/c/common.h b/csrc/mmdeploy/apis/c/common.h deleted file mode 100644 index 7e8bc95cb..000000000 --- a/csrc/mmdeploy/apis/c/common.h +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) OpenMMLab. All rights reserved. - -#ifndef MMDEPLOY_COMMON_H -#define MMDEPLOY_COMMON_H - -#include - -#ifndef MMDEPLOY_EXPORT -#ifdef _MSC_VER -#define MMDEPLOY_EXPORT __declspec(dllexport) -#else -#define MMDEPLOY_EXPORT __attribute__((visibility("default"))) -#endif -#endif - -#ifndef MMDEPLOY_API -#ifdef MMDEPLOY_API_EXPORTS -#define MMDEPLOY_API MMDEPLOY_EXPORT -#else -#define MMDEPLOY_API -#endif -#endif - -// clang-format off - -typedef enum { - MM_BGR, - MM_RGB, - MM_GRAYSCALE, - MM_NV12, - MM_NV21, - MM_BGRA, - MM_UNKNOWN_PIXEL_FORMAT -} mm_pixel_format_t; - -typedef enum { - MM_FLOAT, - MM_HALF, - MM_INT8, - MM_INT32, - MM_UNKNOWN_DATA_TYPE -} mm_data_type_t; - -enum mm_status_t { - MM_SUCCESS = 0, - MM_E_INVALID_ARG = 1, - MM_E_NOT_SUPPORTED = 2, - MM_E_OUT_OF_RANGE = 3, - MM_E_OUT_OF_MEMORY = 4, - MM_E_FILE_NOT_EXIST = 5, - MM_E_FAIL = 6, - MM_E_UNKNOWN = -1, -}; - -// clang-format on - -typedef void* mm_handle_t; - -typedef void* mm_model_t; - -typedef struct mm_mat_t { - uint8_t* data; - int height; - int width; - int channel; - mm_pixel_format_t format; - mm_data_type_t type; -} mm_mat_t; - -typedef struct mm_rect_t { - float left; - float top; - float right; - float bottom; -} mm_rect_t; - -typedef struct mm_pointi_t { - int x; - int y; -} mm_pointi_t; - -typedef struct mm_pointf_t { - float x; - float y; -} mm_pointf_t; - -typedef struct mmdeploy_value* mmdeploy_value_t; - -#if __cplusplus -extern "C" { -#endif - -MMDEPLOY_API mmdeploy_value_t mmdeploy_value_copy(mmdeploy_value_t input); - -MMDEPLOY_API int mmdeploy_value_destroy(mmdeploy_value_t value); - -#if __cplusplus -} -#endif - -#endif // MMDEPLOY_COMMON_H diff --git a/csrc/mmdeploy/apis/c/mmdeploy/CMakeLists.txt b/csrc/mmdeploy/apis/c/mmdeploy/CMakeLists.txt new file mode 100644 index 000000000..9863976b7 --- /dev/null +++ b/csrc/mmdeploy/apis/c/mmdeploy/CMakeLists.txt @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +project(capis) + +set(CAPI_OBJS) + +macro(add_object name) + add_library(${name} OBJECT ${ARGN}) + set_target_properties(${name} PROPERTIES POSITION_INDEPENDENT_CODE 1) + target_compile_definitions(${name} PRIVATE -DMMDEPLOY_API_EXPORTS=1) + if (NOT MSVC) + target_compile_options(${name} PRIVATE $<$:-fvisibility=hidden>) + endif () + target_link_libraries(${name} PRIVATE mmdeploy::core) + set(CAPI_OBJS ${CAPI_OBJS} ${name}) + mmdeploy_export(${name}) +endmacro() + +set(COMMON_LIST + common + model + executor + pipeline) + +set(TASK_LIST ${MMDEPLOY_TASKS}) + +foreach (TASK ${COMMON_LIST}) + set(TARGET_NAME mmdeploy_${TASK}) + set(OBJECT_NAME mmdeploy_${TASK}_obj) + add_object(${OBJECT_NAME} ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.cpp) + mmdeploy_add_library(${TARGET_NAME}) + target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_NAME}) + target_include_directories(${TARGET_NAME} PUBLIC + $) + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.h + DESTINATION include/mmdeploy) +endforeach () + +target_link_libraries(mmdeploy_executor PUBLIC + mmdeploy_common) +target_link_libraries(mmdeploy_pipeline PUBLIC + mmdeploy_executor mmdeploy_model mmdeploy_common) + +foreach (TASK ${TASK_LIST}) + set(TARGET_NAME mmdeploy_${TASK}) + set(OBJECT_NAME mmdeploy_${TASK}_obj) + add_object(${OBJECT_NAME} ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.cpp) + mmdeploy_add_library(${TARGET_NAME}) + target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_NAME} + mmdeploy_pipeline) + target_include_directories(${TARGET_NAME} PUBLIC + $ + $) + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.h + DESTINATION include/mmdeploy) +endforeach () + +install(DIRECTORY ${CMAKE_SOURCE_DIR}/demo/csrc/ DESTINATION example/cpp + FILES_MATCHING + PATTERN "*.cpp" + PATTERN "CMakeLists.txt" + ) + +if (MMDEPLOY_BUILD_SDK_CSHARP_API OR MMDEPLOY_BUILD_SDK_MONOLITHIC) + add_library(mmdeploy SHARED) + mmdeploy_load_static(mmdeploy MMDeployStaticModules) + mmdeploy_load_dynamic(mmdeploy MMDeployDynamicModules) + target_link_libraries(mmdeploy PRIVATE ${CAPI_OBJS}) + target_include_directories(mmdeploy PUBLIC + $ + $) + set(MMDEPLOY_VERSION ${MMDEPLOY_VERSION_MAJOR} + .${MMDEPLOY_VERSION_MINOR} + .${MMDEPLOY_VERSION_PATCH}) + string(REPLACE ";" "" MMDEPLOY_VERSION ${MMDEPLOY_VERSION}) + set_target_properties(mmdeploy PROPERTIES + VERSION ${MMDEPLOY_VERSION} + SOVERSION ${MMDEPLOY_VERSION_MAJOR}) + mmdeploy_export(mmdeploy) +endif () diff --git a/csrc/mmdeploy/apis/c/classifier.cpp b/csrc/mmdeploy/apis/c/mmdeploy/classifier.cpp similarity index 57% rename from csrc/mmdeploy/apis/c/classifier.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/classifier.cpp index 7ad9df229..6b89de5bc 100644 --- a/csrc/mmdeploy/apis/c/classifier.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/classifier.cpp @@ -4,14 +4,14 @@ #include -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/handle.h" -#include "mmdeploy/apis/c/pipeline.h" +#include "common_internal.h" +#include "handle.h" #include "mmdeploy/archive/value_archive.h" #include "mmdeploy/codebase/mmcls/mmcls.h" #include "mmdeploy/core/device.h" #include "mmdeploy/core/graph.h" #include "mmdeploy/core/utils/formatter.h" +#include "pipeline.h" using namespace mmdeploy; using namespace std; @@ -43,72 +43,77 @@ Value& config_template() { return v; } -int mmdeploy_classifier_create_impl(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { +int mmdeploy_classifier_create_impl(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, + mmdeploy_classifier_t* classifier) { auto config = config_template(); - config["pipeline"]["tasks"][0]["params"]["model"] = *static_cast(model); + config["pipeline"]["tasks"][0]["params"]["model"] = *Cast(model); - return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, handle); + return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, + (mmdeploy_pipeline_t*)classifier); } } // namespace -int mmdeploy_classifier_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { - return mmdeploy_classifier_create_impl(model, device_name, device_id, nullptr, handle); +int mmdeploy_classifier_create(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_classifier_t* classifier) { + return mmdeploy_classifier_create_impl(model, device_name, device_id, nullptr, classifier); } -int mmdeploy_classifier_create_v2(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { - return mmdeploy_classifier_create_impl(model, device_name, device_id, exec_info, handle); +int mmdeploy_classifier_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, + mmdeploy_classifier_t* classifier) { + return mmdeploy_classifier_create_impl(model, device_name, device_id, exec_info, classifier); } int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { - mm_model_t model{}; + int device_id, mmdeploy_classifier_t* classifier) { + mmdeploy_model_t model{}; if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) { return ec; } - auto ec = mmdeploy_classifier_create_impl(model, device_name, device_id, nullptr, handle); + auto ec = mmdeploy_classifier_create_impl(model, device_name, device_id, nullptr, classifier); mmdeploy_model_destroy(model); return ec; } -int mmdeploy_classifier_create_input(const mm_mat_t* mats, int mat_count, mmdeploy_value_t* value) { +int mmdeploy_classifier_create_input(const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_value_t* value) { return mmdeploy_common_create_input(mats, mat_count, value); } -int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_class_t** results, int** result_count) { +int mmdeploy_classifier_apply(mmdeploy_classifier_t classifier, const mmdeploy_mat_t* mats, + int mat_count, mmdeploy_classification_t** results, + int** result_count) { wrapped input; if (auto ec = mmdeploy_classifier_create_input(mats, mat_count, input.ptr())) { return ec; } wrapped output; - if (auto ec = mmdeploy_classifier_apply_v2(handle, input, output.ptr())) { + if (auto ec = mmdeploy_classifier_apply_v2(classifier, input, output.ptr())) { return ec; } if (auto ec = mmdeploy_classifier_get_result(output, results, result_count)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -int mmdeploy_classifier_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +int mmdeploy_classifier_apply_v2(mmdeploy_classifier_t classifier, mmdeploy_value_t input, mmdeploy_value_t* output) { - return mmdeploy_pipeline_apply(handle, input, output); + return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)classifier, input, output); } -int mmdeploy_classifier_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +int mmdeploy_classifier_apply_async(mmdeploy_classifier_t classifier, mmdeploy_sender_t input, mmdeploy_sender_t* output) { - return mmdeploy_pipeline_apply_async(handle, input, output); + return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)classifier, input, output); } -int mmdeploy_classifier_get_result(mmdeploy_value_t output, mm_class_t** results, +int mmdeploy_classifier_get_result(mmdeploy_value_t output, mmdeploy_classification_t** results, int** result_count) { if (!output || !results || !result_count) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { Value& value = Cast(output)->front(); @@ -127,7 +132,8 @@ int mmdeploy_classifier_get_result(mmdeploy_value_t output, mm_class_t** results std::unique_ptr result_count_data(new int[_result_count.size()]{}); std::copy(_result_count.begin(), _result_count.end(), result_count_data.get()); - std::unique_ptr result_data(new mm_class_t[total]{}); + std::unique_ptr result_data( + new mmdeploy_classification_t[total]{}); auto result_ptr = result_data.get(); for (const auto& cls_output : classify_outputs) { for (const auto& label : cls_output.labels) { @@ -140,23 +146,21 @@ int mmdeploy_classifier_get_result(mmdeploy_value_t output, mm_class_t** results *result_count = result_count_data.release(); *results = result_data.release(); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("unhandled exception: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } -void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, int count) { +void mmdeploy_classifier_release_result(mmdeploy_classification_t* results, const int* result_count, + int count) { delete[] results; delete[] result_count; } -void mmdeploy_classifier_destroy(mm_handle_t handle) { - if (handle != nullptr) { - auto classifier = static_cast(handle); - delete classifier; - } +void mmdeploy_classifier_destroy(mmdeploy_classifier_t classifier) { + mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)classifier); } diff --git a/csrc/mmdeploy/apis/c/classifier.h b/csrc/mmdeploy/apis/c/mmdeploy/classifier.h similarity index 64% rename from csrc/mmdeploy/apis/c/classifier.h rename to csrc/mmdeploy/apis/c/mmdeploy/classifier.h index ec8bebb39..23804528d 100644 --- a/csrc/mmdeploy/apis/c/classifier.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/classifier.h @@ -10,15 +10,18 @@ #include "common.h" #include "executor.h" +#include "model.h" #ifdef __cplusplus extern "C" { #endif -typedef struct mm_class_t { +typedef struct mmdeploy_classification_t { int label_id; float score; -} mm_class_t; +} mmdeploy_classification_t; + +typedef struct mmdeploy_classifier* mmdeploy_classifier_t; /** * @brief Create classifier's handle @@ -26,29 +29,30 @@ typedef struct mm_class_t { * \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle instance of a classifier, which must be destroyed + * @param[out] classifier instance of a classifier, which must be destroyed * by \ref mmdeploy_classifier_destroy * @return status of creating classifier's handle */ -MMDEPLOY_API int mmdeploy_classifier_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_classifier_create(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_classifier_t* classifier); /** * @brief Create classifier's handle * @param[in] model_path path of mmclassification sdk model exported by mmdeploy model converter * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle instance of a classifier, which must be destroyed + * @param[out] classifier instance of a classifier, which must be destroyed * by \ref mmdeploy_classifier_destroy * @return status of creating classifier's handle */ MMDEPLOY_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); + int device_id, + mmdeploy_classifier_t* classifier); /** * @brief Use classifier created by \ref mmdeploy_classifier_create_by_path to get label * information of each image in a batch - * @param[in] handle classifier's handle created by \ref mmdeploy_classifier_create_by_path + * @param[in] classifier classifier's handle created by \ref mmdeploy_classifier_create_by_path * @param[in] mats a batch of images * @param[in] mat_count number of images in the batch * @param[out] results a linear buffer to save classification results of each @@ -58,8 +62,9 @@ MMDEPLOY_API int mmdeploy_classifier_create_by_path(const char* model_path, cons * mmdeploy_classifier_release_result * @return status of inference */ -MMDEPLOY_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_class_t** results, int** result_count); +MMDEPLOY_API int mmdeploy_classifier_apply(mmdeploy_classifier_t classifier, + const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_classification_t** results, int** result_count); /** * @brief Release the inference result buffer created \ref mmdeploy_classifier_apply @@ -67,14 +72,14 @@ MMDEPLOY_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* m * @param[in] result_count \p results size buffer * @param[in] count length of \p result_count */ -MMDEPLOY_API void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, - int count); +MMDEPLOY_API void mmdeploy_classifier_release_result(mmdeploy_classification_t* results, + const int* result_count, int count); /** * @brief Destroy classifier's handle - * @param[in] handle classifier's handle created by \ref mmdeploy_classifier_create_by_path + * @param[in] classifier classifier's handle created by \ref mmdeploy_classifier_create_by_path */ -MMDEPLOY_API void mmdeploy_classifier_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_classifier_destroy(mmdeploy_classifier_t classifier); /****************************************************************************** * Experimental asynchronous APIs */ @@ -83,9 +88,9 @@ MMDEPLOY_API void mmdeploy_classifier_destroy(mm_handle_t handle); * @brief Same as \ref mmdeploy_classifier_create, but allows to control execution context of tasks * via exec_info */ -MMDEPLOY_API int mmdeploy_classifier_create_v2(mm_model_t model, const char* device_name, +MMDEPLOY_API int mmdeploy_classifier_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, mmdeploy_exec_info_t exec_info, - mm_handle_t* handle); + mmdeploy_classifier_t* classifier); /** * @brief Pack classifier inputs into mmdeploy_value_t @@ -94,24 +99,25 @@ MMDEPLOY_API int mmdeploy_classifier_create_v2(mm_model_t model, const char* dev * @param[out] value the packed value * @return status of the operation */ -MMDEPLOY_API int mmdeploy_classifier_create_input(const mm_mat_t* mats, int mat_count, +MMDEPLOY_API int mmdeploy_classifier_create_input(const mmdeploy_mat_t* mats, int mat_count, mmdeploy_value_t* value); /** * @brief Same as \ref mmdeploy_classifier_apply, but input and output are packed in \ref * mmdeploy_value_t. */ -MMDEPLOY_API int mmdeploy_classifier_apply_v2(mm_handle_t handle, mmdeploy_value_t input, - mmdeploy_value_t* output); +MMDEPLOY_API int mmdeploy_classifier_apply_v2(mmdeploy_classifier_t classifier, + mmdeploy_value_t input, mmdeploy_value_t* output); /** * @brief Apply classifier asynchronously - * @param[in] handle handle of the classifier + * @param[in] classifier handle of the classifier * @param[in] input input sender that will be consumed by the operation * @param[out] output output sender * @return status of the operation */ -MMDEPLOY_API int mmdeploy_classifier_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +MMDEPLOY_API int mmdeploy_classifier_apply_async(mmdeploy_classifier_t classifier, + mmdeploy_sender_t input, mmdeploy_sender_t* output); /** @@ -123,7 +129,8 @@ MMDEPLOY_API int mmdeploy_classifier_apply_async(mm_handle_t handle, mmdeploy_se * released by \ref mmdeploy_classifier_release_result * @return status of the operation */ -MMDEPLOY_API int mmdeploy_classifier_get_result(mmdeploy_value_t output, mm_class_t** results, +MMDEPLOY_API int mmdeploy_classifier_get_result(mmdeploy_value_t output, + mmdeploy_classification_t** results, int** result_count); #ifdef __cplusplus diff --git a/csrc/mmdeploy/apis/c/common.cpp b/csrc/mmdeploy/apis/c/mmdeploy/common.cpp similarity index 66% rename from csrc/mmdeploy/apis/c/common.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/common.cpp index 5e7d6e224..3673960ff 100644 --- a/csrc/mmdeploy/apis/c/common.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/common.cpp @@ -1,14 +1,14 @@ #include "common.h" -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/handle.h" +#include "common_internal.h" +#include "handle.h" #include "mmdeploy/core/mat.h" -mmdeploy_value_t mmdeploy_value_copy(mmdeploy_value_t input) { - if (!input) { +mmdeploy_value_t mmdeploy_value_copy(mmdeploy_value_t value) { + if (!value) { return nullptr; } - return Guard([&] { return Take(Value(*Cast(input))); }); + return Guard([&] { return Take(Value(*Cast(value))); }); } int mmdeploy_value_destroy(mmdeploy_value_t value) { @@ -16,9 +16,10 @@ int mmdeploy_value_destroy(mmdeploy_value_t value) { return 0; } -int mmdeploy_common_create_input(const mm_mat_t* mats, int mat_count, mmdeploy_value_t* value) { +int mmdeploy_common_create_input(const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_value_t* value) { if (mat_count && mats == nullptr) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { auto input = std::make_unique(Value{Value::kArray}); @@ -33,5 +34,5 @@ int mmdeploy_common_create_input(const mm_mat_t* mats, int mat_count, mmdeploy_v } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } diff --git a/csrc/mmdeploy/apis/c/mmdeploy/common.h b/csrc/mmdeploy/apis/c/mmdeploy/common.h new file mode 100644 index 000000000..f58be8b08 --- /dev/null +++ b/csrc/mmdeploy/apis/c/mmdeploy/common.h @@ -0,0 +1,92 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_COMMON_H +#define MMDEPLOY_COMMON_H + +#include // NOLINT + +#ifndef MMDEPLOY_EXPORT +#ifdef _MSC_VER +#define MMDEPLOY_EXPORT __declspec(dllexport) +#else +#define MMDEPLOY_EXPORT __attribute__((visibility("default"))) +#endif +#endif + +#ifndef MMDEPLOY_API +#ifdef MMDEPLOY_API_EXPORTS +#define MMDEPLOY_API MMDEPLOY_EXPORT +#else +#define MMDEPLOY_API +#endif +#endif + +// clang-format off + +typedef enum mmdeploy_pixel_format_t{ + MMDEPLOY_PIXEL_FORMAT_BGR, + MMDEPLOY_PIXEL_FORMAT_RGB, + MMDEPLOY_PIXEL_FORMAT_GRAYSCALE, + MMDEPLOY_PIXEL_FORMAT_NV12, + MMDEPLOY_PIXEL_FORMAT_NV21, + MMDEPLOY_PIXEL_FORMAT_BGRA, + MMDEPLOY_PIXEL_FORMAT_COUNT +} mmdeploy_pixel_format_t; + +typedef enum mmdeploy_data_type_t{ + MMDEPLOY_DATA_TYPE_FLOAT, + MMDEPLOY_DATA_TYPE_HALF, + MMDEPLOY_DATA_TYPE_UINT8, + MMDEPLOY_DATA_TYPE_INT32, + MMDEPLOY_DATA_TYPE_COUNT +} mmdeploy_data_type_t; + +typedef enum mmdeploy_status_t { + MMDEPLOY_SUCCESS = 0, + MMDEPLOY_E_INVALID_ARG = 1, + MMDEPLOY_E_NOT_SUPPORTED = 2, + MMDEPLOY_E_OUT_OF_RANGE = 3, + MMDEPLOY_E_OUT_OF_MEMORY = 4, + MMDEPLOY_E_FILE_NOT_EXIST = 5, + MMDEPLOY_E_FAIL = 6, + MMDEPLOY_STATUS_COUNT = 7 +} mmdeploy_status_t; + +// clang-format on + +typedef struct mmdeploy_mat_t { + uint8_t* data; + int height; + int width; + int channel; + mmdeploy_pixel_format_t format; + mmdeploy_data_type_t type; +} mmdeploy_mat_t; + +typedef struct mmdeploy_rect_t { + float left; + float top; + float right; + float bottom; +} mmdeploy_rect_t; + +typedef struct mmdeploy_point_t { + float x; + float y; +} mmdeploy_point_t; + +typedef struct mmdeploy_value* mmdeploy_value_t; + +#if __cplusplus +extern "C" { +#endif + +MMDEPLOY_API mmdeploy_value_t mmdeploy_value_copy(mmdeploy_value_t value); + +MMDEPLOY_API int mmdeploy_value_destroy(mmdeploy_value_t value); + +#if __cplusplus +} +#endif + +#endif // MMDEPLOY_COMMON_H diff --git a/csrc/mmdeploy/apis/c/common_internal.h b/csrc/mmdeploy/apis/c/mmdeploy/common_internal.h similarity index 77% rename from csrc/mmdeploy/apis/c/common_internal.h rename to csrc/mmdeploy/apis/c/mmdeploy/common_internal.h index 2ef9eb00e..8c2d8b123 100644 --- a/csrc/mmdeploy/apis/c/common_internal.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/common_internal.h @@ -3,9 +3,11 @@ #ifndef MMDEPLOY_CSRC_APIS_C_COMMON_INTERNAL_H_ #define MMDEPLOY_CSRC_APIS_C_COMMON_INTERNAL_H_ -#include "mmdeploy/apis/c/common.h" -#include "mmdeploy/apis/c/model.h" +#include "common.h" +#include "handle.h" #include "mmdeploy/core/value.h" +#include "model.h" +#include "pipeline.h" using namespace mmdeploy; @@ -25,6 +27,16 @@ mmdeploy_value_t Take(Value v) { return Cast(new Value(std::move(v))); // NOLINT } +mmdeploy_pipeline_t Cast(AsyncHandle* pipeline) { + return reinterpret_cast(pipeline); +} + +AsyncHandle* Cast(mmdeploy_pipeline_t pipeline) { return reinterpret_cast(pipeline); } + +mmdeploy_model_t Cast(Model* model) { return reinterpret_cast(model); } + +Model* Cast(mmdeploy_model_t model) { return reinterpret_cast(model); } + template std::invoke_result_t Guard(F f) { try { @@ -80,7 +92,7 @@ class wrapped > { } // namespace -MMDEPLOY_API int mmdeploy_common_create_input(const mm_mat_t* mats, int mat_count, +MMDEPLOY_API int mmdeploy_common_create_input(const mmdeploy_mat_t* mats, int mat_count, mmdeploy_value_t* value); #endif // MMDEPLOY_CSRC_APIS_C_COMMON_INTERNAL_H_ diff --git a/csrc/mmdeploy/apis/c/detector.cpp b/csrc/mmdeploy/apis/c/mmdeploy/detector.cpp similarity index 64% rename from csrc/mmdeploy/apis/c/detector.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/detector.cpp index 7a7a5bc37..f336965b2 100644 --- a/csrc/mmdeploy/apis/c/detector.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/detector.cpp @@ -4,16 +4,16 @@ #include -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/executor_internal.h" -#include "mmdeploy/apis/c/model.h" -#include "mmdeploy/apis/c/pipeline.h" +#include "common_internal.h" +#include "executor_internal.h" #include "mmdeploy/archive/value_archive.h" #include "mmdeploy/codebase/mmdet/mmdet.h" #include "mmdeploy/core/device.h" #include "mmdeploy/core/model.h" #include "mmdeploy/core/utils/formatter.h" #include "mmdeploy/core/value.h" +#include "model.h" +#include "pipeline.h" using namespace std; using namespace mmdeploy; @@ -45,72 +45,74 @@ Value& config_template() { return v; } -int mmdeploy_detector_create_impl(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { +int mmdeploy_detector_create_impl(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, mmdeploy_detector_t* detector) { auto config = config_template(); - config["pipeline"]["tasks"][0]["params"]["model"] = *static_cast(model); + config["pipeline"]["tasks"][0]["params"]["model"] = *Cast(model); - return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, handle); + return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, + (mmdeploy_pipeline_t*)detector); } } // namespace -int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { - return mmdeploy_detector_create_impl(model, device_name, device_id, nullptr, handle); +int mmdeploy_detector_create(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_detector_t* detector) { + return mmdeploy_detector_create_impl(model, device_name, device_id, nullptr, detector); } -int mmdeploy_detector_create_v2(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { - return mmdeploy_detector_create_impl(model, device_name, device_id, exec_info, handle); +int mmdeploy_detector_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, mmdeploy_detector_t* detector) { + return mmdeploy_detector_create_impl(model, device_name, device_id, exec_info, detector); } int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, int device_id, - mm_handle_t* handle) { - mm_model_t model{}; + mmdeploy_detector_t* detector) { + mmdeploy_model_t model{}; if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) { return ec; } - auto ec = mmdeploy_detector_create_impl(model, device_name, device_id, nullptr, handle); + auto ec = mmdeploy_detector_create_impl(model, device_name, device_id, nullptr, detector); mmdeploy_model_destroy(model); return ec; } -int mmdeploy_detector_create_input(const mm_mat_t* mats, int mat_count, mmdeploy_value_t* input) { +int mmdeploy_detector_create_input(const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_value_t* input) { return mmdeploy_common_create_input(mats, mat_count, input); } -int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_detect_t** results, int** result_count) { +int mmdeploy_detector_apply(mmdeploy_detector_t detector, const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_detection_t** results, int** result_count) { wrapped input; if (auto ec = mmdeploy_detector_create_input(mats, mat_count, input.ptr())) { return ec; } wrapped output; - if (auto ec = mmdeploy_detector_apply_v2(handle, input, output.ptr())) { + if (auto ec = mmdeploy_detector_apply_v2(detector, input, output.ptr())) { return ec; } if (auto ec = mmdeploy_detector_get_result(output, results, result_count)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -int mmdeploy_detector_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +int mmdeploy_detector_apply_v2(mmdeploy_detector_t detector, mmdeploy_value_t input, mmdeploy_value_t* output) { - return mmdeploy_pipeline_apply(handle, input, output); + return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)detector, input, output); } -int mmdeploy_detector_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +int mmdeploy_detector_apply_async(mmdeploy_detector_t detector, mmdeploy_sender_t input, mmdeploy_sender_t* output) { - return mmdeploy_pipeline_apply_async(handle, input, output); + return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)detector, input, output); } -int mmdeploy_detector_get_result(mmdeploy_value_t output, mm_detect_t** results, +int mmdeploy_detector_get_result(mmdeploy_value_t output, mmdeploy_detection_t** results, int** result_count) { if (!output || !results || !result_count) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { Value& value = Cast(output)->front(); @@ -127,11 +129,11 @@ int mmdeploy_detector_get_result(mmdeploy_value_t output, mm_detect_t** results, auto result_count_ptr = result_count_data.get(); std::copy(_result_count.begin(), _result_count.end(), result_count_data.get()); - auto deleter = [&](mm_detect_t* p) { + auto deleter = [&](mmdeploy_detection_t* p) { mmdeploy_detector_release_result(p, result_count_ptr, (int)detector_outputs.size()); }; - std::unique_ptr result_data(new mm_detect_t[total]{}, - deleter); + std::unique_ptr result_data( + new mmdeploy_detection_t[total]{}, deleter); // ownership transferred to result_data result_count_data.release(); @@ -146,7 +148,7 @@ int mmdeploy_detector_get_result(mmdeploy_value_t output, mm_detect_t** results, auto mask_byte_size = detection.mask.byte_size(); if (mask_byte_size) { auto& mask = detection.mask; - result_ptr->mask = new mm_instance_mask_t{}; + result_ptr->mask = new mmdeploy_instance_mask_t{}; result_ptr->mask->data = new char[mask_byte_size]; result_ptr->mask->width = mask.width(); result_ptr->mask->height = mask.height(); @@ -159,16 +161,17 @@ int mmdeploy_detector_get_result(mmdeploy_value_t output, mm_detect_t** results, *result_count = result_count_ptr; *results = result_data.release(); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("unhandled exception: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } -void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, int count) { +void mmdeploy_detector_release_result(mmdeploy_detection_t* results, const int* result_count, + int count) { auto result_ptr = results; for (int i = 0; i < count; ++i) { for (int j = 0; j < result_count[i]; ++j, ++result_ptr) { @@ -182,4 +185,6 @@ void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_co delete[] result_count; } -void mmdeploy_detector_destroy(mm_handle_t handle) { mmdeploy_pipeline_destroy(handle); } +void mmdeploy_detector_destroy(mmdeploy_detector_t detector) { + mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)detector); +} diff --git a/csrc/mmdeploy/apis/c/detector.h b/csrc/mmdeploy/apis/c/mmdeploy/detector.h similarity index 65% rename from csrc/mmdeploy/apis/c/detector.h rename to csrc/mmdeploy/apis/c/mmdeploy/detector.h index dbb7e4e33..5b09695ca 100644 --- a/csrc/mmdeploy/apis/c/detector.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/detector.h @@ -10,23 +10,26 @@ #include "common.h" #include "executor.h" +#include "model.h" #ifdef __cplusplus extern "C" { #endif -typedef struct mm_instance_mask_t { +typedef struct mmdeploy_instance_mask_t { char* data; int height; int width; -} mm_instance_mask_t; +} mmdeploy_instance_mask_t; -typedef struct mm_detect_t { +typedef struct mmdeploy_detection_t { int label_id; float score; - mm_rect_t bbox; - mm_instance_mask_t* mask; -} mm_detect_t; + mmdeploy_rect_t bbox; + mmdeploy_instance_mask_t* mask; +} mmdeploy_detection_t; + +typedef struct mmdeploy_detector* mmdeploy_detector_t; /** * @brief Create detector's handle @@ -34,26 +37,26 @@ typedef struct mm_detect_t { * \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle instance of a detector + * @param[out] detector instance of a detector * @return status of creating detector's handle */ -MMDEPLOY_API int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_detector_create(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_detector_t* detector); /** * @brief Create detector's handle * @param[in] model_path path of mmdetection sdk model exported by mmdeploy model converter * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle instance of a detector + * @param[out] detector instance of a detector * @return status of creating detector's handle */ MMDEPLOY_API int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); + int device_id, mmdeploy_detector_t* detector); /** * @brief Apply detector to batch images and get their inference results - * @param[in] handle detector's handle created by \ref mmdeploy_detector_create_by_path + * @param[in] detector detector's handle created by \ref mmdeploy_detector_create_by_path * @param[in] mats a batch of images * @param[in] mat_count number of images in the batch * @param[out] results a linear buffer to save detection results of each image. It must be released @@ -63,22 +66,23 @@ MMDEPLOY_API int mmdeploy_detector_create_by_path(const char* model_path, const * mmdeploy_detector_release_result * @return status of inference */ -MMDEPLOY_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_detect_t** results, int** result_count); +MMDEPLOY_API int mmdeploy_detector_apply(mmdeploy_detector_t detector, const mmdeploy_mat_t* mats, + int mat_count, mmdeploy_detection_t** results, + int** result_count); /** @brief Release the inference result buffer created by \ref mmdeploy_detector_apply * @param[in] results detection results buffer * @param[in] result_count \p results size buffer * @param[in] count length of \p result_count */ -MMDEPLOY_API void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, - int count); +MMDEPLOY_API void mmdeploy_detector_release_result(mmdeploy_detection_t* results, + const int* result_count, int count); /** * @brief Destroy detector's handle - * @param[in] handle detector's handle created by \ref mmdeploy_detector_create_by_path + * @param[in] detector detector's handle created by \ref mmdeploy_detector_create_by_path */ -MMDEPLOY_API void mmdeploy_detector_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_detector_destroy(mmdeploy_detector_t detector); /****************************************************************************** * Experimental asynchronous APIs */ @@ -87,9 +91,9 @@ MMDEPLOY_API void mmdeploy_detector_destroy(mm_handle_t handle); * @brief Same as \ref mmdeploy_detector_create, but allows to control execution context of tasks * via exec_info */ -MMDEPLOY_API int mmdeploy_detector_create_v2(mm_model_t model, const char* device_name, +MMDEPLOY_API int mmdeploy_detector_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, mmdeploy_exec_info_t exec_info, - mm_handle_t* handle); + mmdeploy_detector_t* detector); /** * @brief Pack detector inputs into mmdeploy_value_t @@ -97,24 +101,24 @@ MMDEPLOY_API int mmdeploy_detector_create_v2(mm_model_t model, const char* devic * @param[in] mat_count number of images in the batch * @return the created value */ -MMDEPLOY_API int mmdeploy_detector_create_input(const mm_mat_t* mats, int mat_count, +MMDEPLOY_API int mmdeploy_detector_create_input(const mmdeploy_mat_t* mats, int mat_count, mmdeploy_value_t* input); /** * @brief Same as \ref mmdeploy_detector_apply, but input and output are packed in \ref * mmdeploy_value_t. */ -MMDEPLOY_API int mmdeploy_detector_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +MMDEPLOY_API int mmdeploy_detector_apply_v2(mmdeploy_detector_t detector, mmdeploy_value_t input, mmdeploy_value_t* output); /** * @brief Apply detector asynchronously - * @param[in] handle handle to the detector + * @param[in] detector handle to the detector * @param[in] input input sender * @return output sender */ -MMDEPLOY_API int mmdeploy_detector_apply_async(mm_handle_t handle, mmdeploy_sender_t input, - mmdeploy_sender_t* output); +MMDEPLOY_API int mmdeploy_detector_apply_async(mmdeploy_detector_t detector, + mmdeploy_sender_t input, mmdeploy_sender_t* output); /** * @brief Unpack detector output from a mmdeploy_value_t @@ -126,8 +130,8 @@ MMDEPLOY_API int mmdeploy_detector_apply_async(mm_handle_t handle, mmdeploy_send * mmdeploy_detector_release_result * @return status of the operation */ -MMDEPLOY_API int mmdeploy_detector_get_result(mmdeploy_value_t output, mm_detect_t** results, - int** result_count); +MMDEPLOY_API int mmdeploy_detector_get_result(mmdeploy_value_t output, + mmdeploy_detection_t** results, int** result_count); #ifdef __cplusplus } diff --git a/csrc/mmdeploy/apis/c/executor.cpp b/csrc/mmdeploy/apis/c/mmdeploy/executor.cpp similarity index 93% rename from csrc/mmdeploy/apis/c/executor.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/executor.cpp index 2ff240391..998df35cd 100644 --- a/csrc/mmdeploy/apis/c/executor.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/executor.cpp @@ -1,6 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/executor.h" +#include "executor.h" #include "common.h" #include "common_internal.h" @@ -15,12 +15,13 @@ mmdeploy_scheduler_t CreateScheduler(const char* type, const Value& config = Val try { auto creator = Registry::Get().GetCreator(type); if (!creator) { - MMDEPLOY_ERROR("creator for {} not found.", type); + MMDEPLOY_ERROR("Creator for {} not found. Available schedulers: {}", type, + Registry::Get().List()); return nullptr; } return Cast(new SchedulerType(creator->Create(config))); } catch (const std::exception& e) { - MMDEPLOY_ERROR("failed to create {}, error: {}", type, e.what()); + MMDEPLOY_ERROR("failed to create Scheduler: {} ({}), config: {}", type, e.what(), config); return nullptr; } } @@ -168,14 +169,14 @@ mmdeploy_sender_t mmdeploy_executor_ensure_started(mmdeploy_sender_t input) { int mmdeploy_executor_start_detached(mmdeploy_sender_t input) { if (!input) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { StartDetached(Take(input)); return 0; } catch (...) { } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } mmdeploy_value_t mmdeploy_executor_sync_wait(mmdeploy_sender_t input) { @@ -187,18 +188,18 @@ mmdeploy_value_t mmdeploy_executor_sync_wait(mmdeploy_sender_t input) { int mmdeploy_executor_sync_wait_v2(mmdeploy_sender_t sender, mmdeploy_value_t* value) { if (!sender) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } auto result = mmdeploy_executor_sync_wait(sender); if (!result) { - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } if (value) { *value = result; } else { mmdeploy_value_destroy(result); } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } void mmdeploy_executor_execute(mmdeploy_scheduler_t scheduler, void (*fn)(void*), void* context) { diff --git a/csrc/mmdeploy/apis/c/executor.h b/csrc/mmdeploy/apis/c/mmdeploy/executor.h similarity index 100% rename from csrc/mmdeploy/apis/c/executor.h rename to csrc/mmdeploy/apis/c/mmdeploy/executor.h diff --git a/csrc/mmdeploy/apis/c/executor_internal.h b/csrc/mmdeploy/apis/c/mmdeploy/executor_internal.h similarity index 100% rename from csrc/mmdeploy/apis/c/executor_internal.h rename to csrc/mmdeploy/apis/c/mmdeploy/executor_internal.h diff --git a/csrc/mmdeploy/apis/c/handle.h b/csrc/mmdeploy/apis/c/mmdeploy/handle.h similarity index 85% rename from csrc/mmdeploy/apis/c/handle.h rename to csrc/mmdeploy/apis/c/mmdeploy/handle.h index 379bc615f..48e596af5 100644 --- a/csrc/mmdeploy/apis/c/handle.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/handle.h @@ -22,12 +22,13 @@ class AsyncHandle { config["context"].update({{"device", device_}, {"stream", stream_}}); auto creator = Registry::Get().GetCreator("Pipeline"); if (!creator) { - MMDEPLOY_ERROR("failed to find Pipeline creator"); + MMDEPLOY_ERROR("Failed to find Pipeline creator. Available nodes: {}", + Registry::Get().List()); throw_exception(eEntryNotFound); } pipeline_ = creator->Create(config); if (!pipeline_) { - MMDEPLOY_ERROR("create pipeline failed"); + MMDEPLOY_ERROR("Failed to create pipeline, config: {}", config); throw_exception(eFail); } } diff --git a/csrc/mmdeploy/apis/c/model.cpp b/csrc/mmdeploy/apis/c/mmdeploy/model.cpp similarity index 51% rename from csrc/mmdeploy/apis/c/model.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/model.cpp index 75c1eca1f..2f22e9ffb 100644 --- a/csrc/mmdeploy/apis/c/model.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/model.cpp @@ -11,30 +11,30 @@ using namespace mmdeploy; -int mmdeploy_model_create_by_path(const char *path, mm_model_t *model) { +int mmdeploy_model_create_by_path(const char* path, mmdeploy_model_t* model) { try { auto ptr = std::make_unique(path); - *model = ptr.release(); - return MM_SUCCESS; - } catch (const std::exception &e) { + *model = reinterpret_cast(ptr.release()); + return MMDEPLOY_SUCCESS; + } catch (const std::exception& e) { MMDEPLOY_ERROR("failed to create model: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } -int mmdeploy_model_create(const void *buffer, int size, mm_model_t *model) { +int mmdeploy_model_create(const void* buffer, int size, mmdeploy_model_t* model) { try { auto ptr = std::make_unique(buffer, size); - *model = ptr.release(); - return MM_SUCCESS; - } catch (const std::exception &e) { + *model = reinterpret_cast(ptr.release()); + return MMDEPLOY_SUCCESS; + } catch (const std::exception& e) { MMDEPLOY_ERROR("failed to create model: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } -void mmdeploy_model_destroy(mm_model_t model) { delete static_cast(model); } +void mmdeploy_model_destroy(mmdeploy_model_t model) { delete reinterpret_cast(model); } diff --git a/csrc/mmdeploy/apis/c/model.h b/csrc/mmdeploy/apis/c/mmdeploy/model.h similarity index 86% rename from csrc/mmdeploy/apis/c/model.h rename to csrc/mmdeploy/apis/c/mmdeploy/model.h index 6151ba43a..32d3ef3b9 100644 --- a/csrc/mmdeploy/apis/c/model.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/model.h @@ -14,13 +14,15 @@ extern "C" { #endif +typedef struct mmdeploy_model* mmdeploy_model_t; + /** * @brief Create SDK Model instance from given model path * @param[in] path model path * @param[out] model sdk model instance that must be destroyed by \ref mmdeploy_model_destroy * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_model_create_by_path(const char* path, mm_model_t* model); +MMDEPLOY_API int mmdeploy_model_create_by_path(const char* path, mmdeploy_model_t* model); /** * @brief Create SDK Model instance from memory @@ -29,14 +31,14 @@ MMDEPLOY_API int mmdeploy_model_create_by_path(const char* path, mm_model_t* mod * @param[out] model sdk model instance that must be destroyed by \ref mmdeploy_model_destroy * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_model_create(const void* buffer, int size, mm_model_t* model); +MMDEPLOY_API int mmdeploy_model_create(const void* buffer, int size, mmdeploy_model_t* model); /** * @brief Destroy model instance * @param[in] model sdk model instance created by \ref mmdeploy_model_create_by_path or \ref * mmdeploy_model_create */ -MMDEPLOY_API void mmdeploy_model_destroy(mm_model_t model); +MMDEPLOY_API void mmdeploy_model_destroy(mmdeploy_model_t model); #ifdef __cplusplus } diff --git a/csrc/mmdeploy/apis/c/pipeline.cpp b/csrc/mmdeploy/apis/c/mmdeploy/pipeline.cpp similarity index 57% rename from csrc/mmdeploy/apis/c/pipeline.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/pipeline.cpp index b7ba22ea6..e3c1f7fae 100644 --- a/csrc/mmdeploy/apis/c/pipeline.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/pipeline.cpp @@ -2,12 +2,12 @@ #include "pipeline.h" -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/executor_internal.h" -#include "mmdeploy/apis/c/handle.h" +#include "common_internal.h" +#include "executor_internal.h" +#include "handle.h" int mmdeploy_pipeline_create(mmdeploy_value_t config, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { + mmdeploy_exec_info_t exec_info, mmdeploy_pipeline_t* pipeline) { try { auto _config = *Cast(config); if (exec_info) { @@ -16,57 +16,58 @@ int mmdeploy_pipeline_create(mmdeploy_value_t config, const char* device_name, i info[p->task_name] = *Cast(p->scheduler); if (p->next == exec_info) { MMDEPLOY_ERROR("circle detected in exec_info list."); - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } } } auto _handle = std::make_unique(device_name, device_id, std::move(_config)); - *handle = _handle.release(); - return MM_SUCCESS; + *pipeline = Cast(_handle.release()); + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } -int mmdeploy_pipeline_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +int mmdeploy_pipeline_apply_async(mmdeploy_pipeline_t pipeline, mmdeploy_sender_t input, mmdeploy_sender_t* output) { - if (!handle || !input || !output) { - return MM_E_INVALID_ARG; + if (!pipeline || !input || !output) { + return MMDEPLOY_E_INVALID_ARG; } try { - auto h = static_cast(handle); + auto h = Cast(pipeline); *output = Take(h->Process(Take(input))); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } -void mmdeploy_pipeline_destroy(mm_handle_t handle) { - if (handle != nullptr) { - delete static_cast(handle); +void mmdeploy_pipeline_destroy(mmdeploy_pipeline_t pipeline) { + if (pipeline != nullptr) { + delete Cast(pipeline); } } -int mmdeploy_pipeline_apply(mm_handle_t handle, mmdeploy_value_t input, mmdeploy_value_t* output) { +int mmdeploy_pipeline_apply(mmdeploy_pipeline_t pipeline, mmdeploy_value_t input, + mmdeploy_value_t* output) { auto input_sender = mmdeploy_executor_just(input); if (!input_sender) { - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } mmdeploy_sender_t output_sender{}; - if (auto ec = mmdeploy_pipeline_apply_async(handle, input_sender, &output_sender)) { + if (auto ec = mmdeploy_pipeline_apply_async(pipeline, input_sender, &output_sender)) { return ec; } auto _output = mmdeploy_executor_sync_wait(output_sender); if (!_output) { - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } *output = _output; - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } diff --git a/csrc/mmdeploy/apis/c/pipeline.h b/csrc/mmdeploy/apis/c/mmdeploy/pipeline.h similarity index 69% rename from csrc/mmdeploy/apis/c/pipeline.h rename to csrc/mmdeploy/apis/c/mmdeploy/pipeline.h index 450b3c6da..c262753b8 100644 --- a/csrc/mmdeploy/apis/c/pipeline.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/pipeline.h @@ -13,44 +13,46 @@ extern "C" { /****************************************************************************** * Experimental pipeline APIs */ +typedef struct mmdeploy_pipeline* mmdeploy_pipeline_t; + /** * @brief Create pipeline * @param[in] config config of the pipeline * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. * @param[in] exec_info execution options - * @param[out] handle handle of the pipeline + * @param[out] pipeline handle of the pipeline * @return status of the operation */ MMDEPLOY_API int mmdeploy_pipeline_create(mmdeploy_value_t config, const char* device_name, int device_id, mmdeploy_exec_info_t exec_info, - mm_handle_t* handle); + mmdeploy_pipeline_t* pipeline); /** * @brief Apply pipeline - * @param[in] handle handle of the pipeline + * @param[in] pipeline handle of the pipeline * @param[in] input input value * @param[out] output output value * @return status of the operation */ -MMDEPLOY_API int mmdeploy_pipeline_apply(mm_handle_t handle, mmdeploy_value_t input, +MMDEPLOY_API int mmdeploy_pipeline_apply(mmdeploy_pipeline_t pipeline, mmdeploy_value_t input, mmdeploy_value_t* output); /** * Apply pipeline asynchronously - * @param handle handle of the pipeline + * @param pipeline handle of the pipeline * @param input input sender that will be consumed by the operation * @param output output sender * @return status of the operation */ -MMDEPLOY_API int mmdeploy_pipeline_apply_async(mm_handle_t handle, mmdeploy_sender_t input, - mmdeploy_sender_t* output); +MMDEPLOY_API int mmdeploy_pipeline_apply_async(mmdeploy_pipeline_t pipeline, + mmdeploy_sender_t input, mmdeploy_sender_t* output); /** * @brief destroy pipeline - * @param[in] handle + * @param[in] pipeline */ -MMDEPLOY_API void mmdeploy_pipeline_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_pipeline_destroy(mmdeploy_pipeline_t pipeline); #ifdef __cplusplus } diff --git a/csrc/mmdeploy/apis/c/pose_detector.cpp b/csrc/mmdeploy/apis/c/mmdeploy/pose_detector.cpp similarity index 63% rename from csrc/mmdeploy/apis/c/pose_detector.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/pose_detector.cpp index 0725e7c66..0a86c52b3 100644 --- a/csrc/mmdeploy/apis/c/pose_detector.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/pose_detector.cpp @@ -4,14 +4,14 @@ #include -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/handle.h" -#include "mmdeploy/apis/c/pipeline.h" +#include "common_internal.h" +#include "handle.h" #include "mmdeploy/codebase/mmpose/mmpose.h" #include "mmdeploy/core/device.h" #include "mmdeploy/core/graph.h" #include "mmdeploy/core/mat.h" #include "mmdeploy/core/utils/formatter.h" +#include "pipeline.h" using namespace std; using namespace mmdeploy; @@ -55,56 +55,58 @@ const Value& config_template() { return v; } -int mmdeploy_pose_detector_create_impl(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { +int mmdeploy_pose_detector_create_impl(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_exec_info_t exec_info, + mmdeploy_pose_detector_t* detector) { auto config = config_template(); - config["pipeline"]["tasks"][1]["params"]["model"] = *static_cast(model); + config["pipeline"]["tasks"][1]["params"]["model"] = *Cast(model); - return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, handle); + return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, + (mmdeploy_pipeline_t*)detector); } } // namespace -int mmdeploy_pose_detector_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { - return mmdeploy_pose_detector_create_impl(model, device_name, device_id, nullptr, handle); +int mmdeploy_pose_detector_create(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_pose_detector_t* detector) { + return mmdeploy_pose_detector_create_impl(model, device_name, device_id, nullptr, detector); } int mmdeploy_pose_detector_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { - mm_model_t model{}; + int device_id, mmdeploy_pose_detector_t* detector) { + mmdeploy_model_t model{}; if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) { return ec; } - auto ec = mmdeploy_pose_detector_create_impl(model, device_name, device_id, nullptr, handle); + auto ec = mmdeploy_pose_detector_create_impl(model, device_name, device_id, nullptr, detector); mmdeploy_model_destroy(model); return ec; } -int mmdeploy_pose_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_pose_detect_t** results) { - return mmdeploy_pose_detector_apply_bbox(handle, mats, mat_count, nullptr, nullptr, results); +int mmdeploy_pose_detector_apply(mmdeploy_pose_detector_t detector, const mmdeploy_mat_t* mats, + int mat_count, mmdeploy_pose_detection_t** results) { + return mmdeploy_pose_detector_apply_bbox(detector, mats, mat_count, nullptr, nullptr, results); } -int mmdeploy_pose_detector_apply_bbox(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - const mm_rect_t* bboxes, const int* bbox_count, - mm_pose_detect_t** results) { +int mmdeploy_pose_detector_apply_bbox(mmdeploy_pose_detector_t detector, const mmdeploy_mat_t* mats, + int mat_count, const mmdeploy_rect_t* bboxes, + const int* bbox_count, mmdeploy_pose_detection_t** results) { wrapped input; if (auto ec = mmdeploy_pose_detector_create_input(mats, mat_count, bboxes, bbox_count, input.ptr())) { return ec; } wrapped output; - if (auto ec = mmdeploy_pose_detector_apply_v2(handle, input, output.ptr())) { + if (auto ec = mmdeploy_pose_detector_apply_v2(detector, input, output.ptr())) { return ec; } if (auto ec = mmdeploy_pose_detector_get_result(output, results)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -void mmdeploy_pose_detector_release_result(mm_pose_detect_t* results, int count) { +void mmdeploy_pose_detector_release_result(mmdeploy_pose_detection_t* results, int count) { if (results == nullptr) { return; } @@ -115,17 +117,18 @@ void mmdeploy_pose_detector_release_result(mm_pose_detect_t* results, int count) delete[] results; } -void mmdeploy_pose_detector_destroy(mm_handle_t handle) { - delete static_cast(handle); +void mmdeploy_pose_detector_destroy(mmdeploy_pose_detector_t detector) { + mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)detector); } -int mmdeploy_pose_detector_create_v2(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { - return mmdeploy_pose_detector_create_impl(model, device_name, device_id, exec_info, handle); +int mmdeploy_pose_detector_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, + mmdeploy_pose_detector_t* detector) { + return mmdeploy_pose_detector_create_impl(model, device_name, device_id, exec_info, detector); } -int mmdeploy_pose_detector_create_input(const mm_mat_t* mats, int mat_count, - const mm_rect_t* bboxes, const int* bbox_count, +int mmdeploy_pose_detector_create_input(const mmdeploy_mat_t* mats, int mat_count, + const mmdeploy_rect_t* bboxes, const int* bbox_count, mmdeploy_value_t* value) { try { Value input{Value::kArray}; @@ -161,28 +164,29 @@ int mmdeploy_pose_detector_create_input(const mm_mat_t* mats, int mat_count, input.front().push_back(img_with_boxes); } *value = Take(std::move(input)); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("unhandled exception: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } -int mmdeploy_pose_detector_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +int mmdeploy_pose_detector_apply_v2(mmdeploy_pose_detector_t detector, mmdeploy_value_t input, mmdeploy_value_t* output) { - return mmdeploy_pipeline_apply(handle, input, output); + return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)detector, input, output); } -int mmdeploy_pose_detector_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +int mmdeploy_pose_detector_apply_async(mmdeploy_pose_detector_t detector, mmdeploy_sender_t input, mmdeploy_sender_t* output) { - return mmdeploy_pipeline_apply_async(handle, input, output); + return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)detector, input, output); } -int mmdeploy_pose_detector_get_result(mmdeploy_value_t output, mm_pose_detect_t** results) { +int mmdeploy_pose_detector_get_result(mmdeploy_value_t output, + mmdeploy_pose_detection_t** results) { if (!output || !results) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { Value& value = Cast(output)->front(); @@ -195,12 +199,12 @@ int mmdeploy_pose_detector_get_result(mmdeploy_value_t output, mm_pose_detect_t* result_count += v.size(); } - auto deleter = [&](mm_pose_detect_t* p) { + auto deleter = [&](mmdeploy_pose_detection_t* p) { mmdeploy_pose_detector_release_result(p, static_cast(result_count)); }; - std::unique_ptr _results( - new mm_pose_detect_t[result_count]{}, deleter); + std::unique_ptr _results( + new mmdeploy_pose_detection_t[result_count]{}, deleter); size_t result_idx = 0; for (const auto& img_result : pose_outputs) { @@ -208,7 +212,7 @@ int mmdeploy_pose_detector_get_result(mmdeploy_value_t output, mm_pose_detect_t* auto& res = _results[result_idx++]; auto size = box_result.key_points.size(); - res.point = new mm_pointf_t[size]; + res.point = new mmdeploy_point_t[size]; res.score = new float[size]; res.length = static_cast(size); @@ -220,11 +224,11 @@ int mmdeploy_pose_detector_get_result(mmdeploy_value_t output, mm_pose_detect_t* } } *results = _results.release(); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("unhandled exception: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } diff --git a/csrc/mmdeploy/apis/c/pose_detector.h b/csrc/mmdeploy/apis/c/mmdeploy/pose_detector.h similarity index 55% rename from csrc/mmdeploy/apis/c/pose_detector.h rename to csrc/mmdeploy/apis/c/mmdeploy/pose_detector.h index 7cafbc718..90cb54b73 100644 --- a/csrc/mmdeploy/apis/c/pose_detector.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/pose_detector.h @@ -10,16 +10,19 @@ #include "common.h" #include "executor.h" +#include "model.h" #ifdef __cplusplus extern "C" { #endif -typedef struct mm_pose_detect_t { - mm_pointf_t* point; ///< keypoint - float* score; ///< keypoint score - int length; ///< number of keypoint -} mm_pose_detect_t; +typedef struct mmdeploy_pose_detection_t { + mmdeploy_point_t* point; ///< keypoint + float* score; ///< keypoint score + int length; ///< number of keypoint +} mmdeploy_pose_detection_t; + +typedef struct mmdeploy_pose_detector* mmdeploy_pose_detector_t; /** * @brief Create a pose detector instance @@ -27,29 +30,29 @@ typedef struct mm_pose_detect_t { * \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle handle of the created pose detector, which must be destroyed + * @param[out] detector handle of the created pose detector, which must be destroyed * by \ref mmdeploy_pose_detector_destroy * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_pose_detector_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_pose_detector_create(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_pose_detector_t* detector); /** * @brief Create a pose detector instance * @param[in] model_path path to pose detection model * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle handle of the created pose detector, which must be destroyed + * @param[out] detector handle of the created pose detector, which must be destroyed * by \ref mmdeploy_pose_detector_destroy * @return status code of the operation */ MMDEPLOY_API int mmdeploy_pose_detector_create_by_path(const char* model_path, const char* device_name, int device_id, - mm_handle_t* handle); + mmdeploy_pose_detector_t* detector); /** * @brief Apply pose detector to a batch of images with full image roi - * @param[in] handle pose detector's handle created by \ref + * @param[in] detector pose detector's handle created by \ref * mmdeploy_pose_detector_create_by_path * @param[in] images a batch of images * @param[in] count number of images in the batch @@ -57,12 +60,13 @@ MMDEPLOY_API int mmdeploy_pose_detector_create_by_path(const char* model_path, * by \ref mmdeploy_pose_detector_release_result * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_pose_detector_apply(mm_handle_t handle, const mm_mat_t* mats, - int mat_count, mm_pose_detect_t** results); +MMDEPLOY_API int mmdeploy_pose_detector_apply(mmdeploy_pose_detector_t detector, + const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_pose_detection_t** results); /** * @brief Apply pose detector to a batch of images supplied with bboxes(roi) - * @param[in] handle pose detector's handle created by \ref + * @param[in] detector pose detector's handle created by \ref * mmdeploy_pose_detector_create_by_path * @param[in] images a batch of images * @param[in] image_count number of images in the batch @@ -72,44 +76,48 @@ MMDEPLOY_API int mmdeploy_pose_detector_apply(mm_handle_t handle, const mm_mat_t * bboxes, must be release by \ref mmdeploy_pose_detector_release_result * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_pose_detector_apply_bbox(mm_handle_t handle, const mm_mat_t* mats, - int mat_count, const mm_rect_t* bboxes, +MMDEPLOY_API int mmdeploy_pose_detector_apply_bbox(mmdeploy_pose_detector_t detector, + const mmdeploy_mat_t* mats, int mat_count, + const mmdeploy_rect_t* bboxes, const int* bbox_count, - mm_pose_detect_t** results); + mmdeploy_pose_detection_t** results); /** @brief Release result buffer returned by \ref mmdeploy_pose_detector_apply or \ref * mmdeploy_pose_detector_apply_bbox * @param[in] results result buffer by pose detector * @param[in] count length of \p result */ -MMDEPLOY_API void mmdeploy_pose_detector_release_result(mm_pose_detect_t* results, int count); +MMDEPLOY_API void mmdeploy_pose_detector_release_result(mmdeploy_pose_detection_t* results, + int count); /** * @brief destroy pose_detector - * @param[in] handle handle of pose_detector created by \ref + * @param[in] detector handle of pose_detector created by \ref * mmdeploy_pose_detector_create_by_path or \ref mmdeploy_pose_detector_create */ -MMDEPLOY_API void mmdeploy_pose_detector_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_pose_detector_destroy(mmdeploy_pose_detector_t detector); /****************************************************************************** * Experimental asynchronous APIs */ -MMDEPLOY_API int mmdeploy_pose_detector_create_v2(mm_model_t model, const char* device_name, +MMDEPLOY_API int mmdeploy_pose_detector_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, mmdeploy_exec_info_t exec_info, - mm_handle_t* handle); + mmdeploy_pose_detector_t* detector); -MMDEPLOY_API int mmdeploy_pose_detector_create_input(const mm_mat_t* mats, int mat_count, - const mm_rect_t* bboxes, const int* bbox_count, +MMDEPLOY_API int mmdeploy_pose_detector_create_input(const mmdeploy_mat_t* mats, int mat_count, + const mmdeploy_rect_t* bboxes, + const int* bbox_count, mmdeploy_value_t* value); -MMDEPLOY_API int mmdeploy_pose_detector_apply_v2(mm_handle_t handle, mmdeploy_value_t input, - mmdeploy_value_t* output); +MMDEPLOY_API int mmdeploy_pose_detector_apply_v2(mmdeploy_pose_detector_t detector, + mmdeploy_value_t input, mmdeploy_value_t* output); -MMDEPLOY_API int mmdeploy_pose_detector_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +MMDEPLOY_API int mmdeploy_pose_detector_apply_async(mmdeploy_pose_detector_t detector, + mmdeploy_sender_t input, mmdeploy_sender_t* output); MMDEPLOY_API int mmdeploy_pose_detector_get_result(mmdeploy_value_t output, - mm_pose_detect_t** results); + mmdeploy_pose_detection_t** results); #ifdef __cplusplus } diff --git a/csrc/mmdeploy/apis/c/restorer.cpp b/csrc/mmdeploy/apis/c/mmdeploy/restorer.cpp similarity index 55% rename from csrc/mmdeploy/apis/c/restorer.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/restorer.cpp index 77b21e300..7248d2058 100644 --- a/csrc/mmdeploy/apis/c/restorer.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/restorer.cpp @@ -2,14 +2,14 @@ #include "restorer.h" -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/executor_internal.h" -#include "mmdeploy/apis/c/handle.h" -#include "mmdeploy/apis/c/pipeline.h" +#include "common_internal.h" +#include "executor_internal.h" +#include "handle.h" #include "mmdeploy/codebase/mmedit/mmedit.h" #include "mmdeploy/core/device.h" #include "mmdeploy/core/graph.h" #include "mmdeploy/core/utils/formatter.h" +#include "pipeline.h" using namespace mmdeploy; @@ -40,79 +40,83 @@ const Value& config_template() { return v; } -int mmdeploy_restorer_create_impl(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { +int mmdeploy_restorer_create_impl(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, mmdeploy_restorer_t* restorer) { auto config = config_template(); - config["pipeline"]["tasks"][0]["params"]["model"] = *static_cast(model); + config["pipeline"]["tasks"][0]["params"]["model"] = *Cast(model); - return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, handle); + return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, + (mmdeploy_pipeline_t*)restorer); } } // namespace -int mmdeploy_restorer_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { - return mmdeploy_restorer_create_impl(model, device_name, device_id, nullptr, handle); +int mmdeploy_restorer_create(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_restorer_t* restorer) { + return mmdeploy_restorer_create_impl(model, device_name, device_id, nullptr, restorer); } int mmdeploy_restorer_create_by_path(const char* model_path, const char* device_name, int device_id, - mm_handle_t* handle) { - mm_model_t model{}; + mmdeploy_restorer_t* restorer) { + mmdeploy_model_t model{}; if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) { return ec; } - auto ec = mmdeploy_restorer_create_impl(model, device_name, device_id, nullptr, handle); + auto ec = mmdeploy_restorer_create_impl(model, device_name, device_id, nullptr, restorer); mmdeploy_model_destroy(model); return ec; } -int mmdeploy_restorer_apply(mm_handle_t handle, const mm_mat_t* images, int count, - mm_mat_t** results) { +int mmdeploy_restorer_apply(mmdeploy_restorer_t restorer, const mmdeploy_mat_t* images, int count, + mmdeploy_mat_t** results) { wrapped input; if (auto ec = mmdeploy_restorer_create_input(images, count, input.ptr())) { return ec; } wrapped output; - if (auto ec = mmdeploy_restorer_apply_v2(handle, input, output.ptr())) { + if (auto ec = mmdeploy_restorer_apply_v2(restorer, input, output.ptr())) { return ec; } if (auto ec = mmdeploy_restorer_get_result(output, results)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -void mmdeploy_restorer_release_result(mm_mat_t* results, int count) { +void mmdeploy_restorer_release_result(mmdeploy_mat_t* results, int count) { for (int i = 0; i < count; ++i) { delete[] results[i].data; } delete[] results; } -void mmdeploy_restorer_destroy(mm_handle_t handle) { delete static_cast(handle); } - -int mmdeploy_restorer_create_v2(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { - return mmdeploy_restorer_create_impl(model, device_name, device_id, exec_info, handle); +void mmdeploy_restorer_destroy(mmdeploy_restorer_t restorer) { + mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)restorer); } -int mmdeploy_restorer_create_input(const mm_mat_t* mats, int mat_count, mmdeploy_value_t* value) { +int mmdeploy_restorer_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, mmdeploy_restorer_t* restorer) { + return mmdeploy_restorer_create_impl(model, device_name, device_id, exec_info, restorer); +} + +int mmdeploy_restorer_create_input(const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_value_t* value) { return mmdeploy_common_create_input(mats, mat_count, value); } -int mmdeploy_restorer_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +int mmdeploy_restorer_apply_v2(mmdeploy_restorer_t restorer, mmdeploy_value_t input, mmdeploy_value_t* output) { - return mmdeploy_pipeline_apply(handle, input, output); + return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)restorer, input, output); } -int mmdeploy_restorer_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +int mmdeploy_restorer_apply_async(mmdeploy_restorer_t restorer, mmdeploy_sender_t input, mmdeploy_sender_t* output) { - return mmdeploy_pipeline_apply_async(handle, input, output); + return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)restorer, input, output); } -int mmdeploy_restorer_get_result(mmdeploy_value_t output, mm_mat_t** results) { +int mmdeploy_restorer_get_result(mmdeploy_value_t output, mmdeploy_mat_t** results) { if (!output || !results) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { const Value& value = Cast(output)->front(); @@ -121,29 +125,30 @@ int mmdeploy_restorer_get_result(mmdeploy_value_t output, mm_mat_t** results) { auto count = restorer_output.size(); - auto deleter = [&](mm_mat_t* p) { + auto deleter = [&](mmdeploy_mat_t* p) { mmdeploy_restorer_release_result(p, static_cast(count)); }; - std::unique_ptr _results(new mm_mat_t[count]{}, deleter); + std::unique_ptr _results(new mmdeploy_mat_t[count]{}, + deleter); for (int i = 0; i < count; ++i) { auto upscale = restorer_output[i]; auto& res = _results[i]; res.data = new uint8_t[upscale.byte_size()]; memcpy(res.data, upscale.data(), upscale.byte_size()); - res.format = (mm_pixel_format_t)upscale.pixel_format(); + res.format = (mmdeploy_pixel_format_t)upscale.pixel_format(); res.height = upscale.height(); res.width = upscale.width(); res.channel = upscale.channel(); - res.type = (mm_data_type_t)upscale.type(); + res.type = (mmdeploy_data_type_t)upscale.type(); } *results = _results.release(); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("unhandled exception: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } diff --git a/csrc/mmdeploy/apis/c/restorer.h b/csrc/mmdeploy/apis/c/mmdeploy/restorer.h similarity index 59% rename from csrc/mmdeploy/apis/c/restorer.h rename to csrc/mmdeploy/apis/c/mmdeploy/restorer.h index e5db54d8e..28476d283 100644 --- a/csrc/mmdeploy/apis/c/restorer.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/restorer.h @@ -10,77 +10,80 @@ #include "common.h" #include "executor.h" +#include "model.h" #ifdef __cplusplus extern "C" { #endif +typedef struct mmdeploy_restorer* mmdeploy_restorer_t; + /** * @brief Create a restorer instance * @param[in] model an instance of image restoration model created by * \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle handle of the created restorer, which must be destroyed + * @param[out] restorer handle of the created restorer, which must be destroyed * by \ref mmdeploy_restorer_destroy * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_restorer_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_restorer_create(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_restorer_t* restorer); /** * @brief Create a restorer instance * @param[in] model_path path to image restoration model * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle handle of the created restorer, which must be destroyed + * @param[out] restorer handle of the created restorer, which must be destroyed * by \ref mmdeploy_restorer_destroy * @return status code of the operation */ MMDEPLOY_API int mmdeploy_restorer_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); + int device_id, mmdeploy_restorer_t* restorer); /** * @brief Apply restorer to a batch of images - * @param[in] handle restorer's handle created by \ref mmdeploy_restorer_create_by_path + * @param[in] restorer restorer's handle created by \ref mmdeploy_restorer_create_by_path * @param[in] images a batch of images * @param[in] count number of images in the batch * @param[out] results a linear buffer contains the restored images, must be release * by \ref mmdeploy_restorer_release_result * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_restorer_apply(mm_handle_t handle, const mm_mat_t* images, int count, - mm_mat_t** results); +MMDEPLOY_API int mmdeploy_restorer_apply(mmdeploy_restorer_t restorer, const mmdeploy_mat_t* images, + int count, mmdeploy_mat_t** results); /** @brief Release result buffer returned by \ref mmdeploy_restorer_apply * @param[in] results result buffer by restorer * @param[in] count length of \p result */ -MMDEPLOY_API void mmdeploy_restorer_release_result(mm_mat_t* results, int count); +MMDEPLOY_API void mmdeploy_restorer_release_result(mmdeploy_mat_t* results, int count); /** * @brief destroy restorer - * @param[in] handle handle of restorer created by \ref mmdeploy_restorer_create_by_path + * @param[in] restorer handle of restorer created by \ref mmdeploy_restorer_create_by_path */ -MMDEPLOY_API void mmdeploy_restorer_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_restorer_destroy(mmdeploy_restorer_t restorer); /****************************************************************************** * Experimental asynchronous APIs */ -MMDEPLOY_API int mmdeploy_restorer_create_v2(mm_model_t model, const char* device_name, +MMDEPLOY_API int mmdeploy_restorer_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, mmdeploy_exec_info_t exec_info, - mm_handle_t* handle); + mmdeploy_restorer_t* restorer); -MMDEPLOY_API int mmdeploy_restorer_create_input(const mm_mat_t* mats, int mat_count, +MMDEPLOY_API int mmdeploy_restorer_create_input(const mmdeploy_mat_t* mats, int mat_count, mmdeploy_value_t* value); -MMDEPLOY_API int mmdeploy_restorer_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +MMDEPLOY_API int mmdeploy_restorer_apply_v2(mmdeploy_restorer_t restorer, mmdeploy_value_t input, mmdeploy_value_t* output); -MMDEPLOY_API int mmdeploy_restorer_apply_async(mm_handle_t handle, mmdeploy_sender_t input, - mmdeploy_sender_t* output); +MMDEPLOY_API int mmdeploy_restorer_apply_async(mmdeploy_restorer_t restorer, + mmdeploy_sender_t input, mmdeploy_sender_t* output); -MMDEPLOY_API int mmdeploy_restorer_get_result(mmdeploy_value_t output, mm_mat_t** results); +MMDEPLOY_API int mmdeploy_restorer_get_result(mmdeploy_value_t output, mmdeploy_mat_t** results); #ifdef __cplusplus } diff --git a/csrc/mmdeploy/apis/c/rotated_detector.cpp b/csrc/mmdeploy/apis/c/mmdeploy/rotated_detector.cpp similarity index 51% rename from csrc/mmdeploy/apis/c/rotated_detector.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/rotated_detector.cpp index fd5f4c9e4..a559a0e02 100644 --- a/csrc/mmdeploy/apis/c/rotated_detector.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/rotated_detector.cpp @@ -4,13 +4,13 @@ #include -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/handle.h" -#include "mmdeploy/apis/c/pipeline.h" +#include "common_internal.h" +#include "handle.h" #include "mmdeploy/codebase/mmrotate/mmrotate.h" #include "mmdeploy/core/graph.h" #include "mmdeploy/core/mat.h" #include "mmdeploy/core/utils/formatter.h" +#include "pipeline.h" using namespace std; using namespace mmdeploy; @@ -42,89 +42,88 @@ Value& config_template() { return v; } -template -int mmdeploy_rotated_detector_create_impl(ModelType&& m, const char* device_name, int device_id, - mm_handle_t* handle) { - try { - auto value = config_template(); - value["pipeline"]["tasks"][0]["params"]["model"] = std::forward(m); +int mmdeploy_rotated_detector_create_impl(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_exec_info_t exec_info, + mmdeploy_rotated_detector_t* detector) { + auto config = config_template(); + config["pipeline"]["tasks"][0]["params"]["model"] = *Cast(model); - auto pose_estimator = std::make_unique(device_name, device_id, std::move(value)); - - *handle = pose_estimator.release(); - return MM_SUCCESS; - - } catch (const std::exception& e) { - MMDEPLOY_ERROR("exception caught: {}", e.what()); - } catch (...) { - MMDEPLOY_ERROR("unknown exception caught"); - } - return MM_E_FAIL; + return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, + (mmdeploy_pipeline_t*)detector); } } // namespace -int mmdeploy_rotated_detector_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { - return mmdeploy_rotated_detector_create_impl(*static_cast(model), device_name, device_id, - handle); +int mmdeploy_rotated_detector_create(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_rotated_detector_t* detector) { + return mmdeploy_rotated_detector_create_impl(model, device_name, device_id, nullptr, detector); } int mmdeploy_rotated_detector_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { - return mmdeploy_rotated_detector_create_impl(model_path, device_name, device_id, handle); + int device_id, mmdeploy_rotated_detector_t* detector) { + mmdeploy_model_t model{}; + + if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) { + return ec; + } + auto ec = mmdeploy_rotated_detector_create_impl(model, device_name, device_id, nullptr, detector); + mmdeploy_model_destroy(model); + return ec; } -int mmdeploy_rotated_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_rotated_detect_t** results, int** result_count) { +int mmdeploy_rotated_detector_apply(mmdeploy_rotated_detector_t detector, + const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_rotated_detection_t** results, int** result_count) { wrapped input; if (auto ec = mmdeploy_rotated_detector_create_input(mats, mat_count, input.ptr())) { return ec; } wrapped output; - if (auto ec = mmdeploy_rotated_detector_apply_v2(handle, input, output.ptr())) { + if (auto ec = mmdeploy_rotated_detector_apply_v2(detector, input, output.ptr())) { return ec; } if (auto ec = mmdeploy_rotated_detector_get_result(output, results, result_count)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -void mmdeploy_rotated_detector_release_result(mm_rotated_detect_t* results, +void mmdeploy_rotated_detector_release_result(mmdeploy_rotated_detection_t* results, const int* result_count) { delete[] results; delete[] result_count; } -void mmdeploy_rotated_detector_destroy(mm_handle_t handle) { - delete static_cast(handle); +void mmdeploy_rotated_detector_destroy(mmdeploy_rotated_detector_t detector) { + mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)detector); } -int mmdeploy_rotated_detector_create_v2(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { - return 0; +int mmdeploy_rotated_detector_create_v2(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_exec_info_t exec_info, + mmdeploy_rotated_detector_t* detector) { + return mmdeploy_rotated_detector_create_impl(model, device_name, device_id, exec_info, detector); } -int mmdeploy_rotated_detector_create_input(const mm_mat_t* mats, int mat_count, +int mmdeploy_rotated_detector_create_input(const mmdeploy_mat_t* mats, int mat_count, mmdeploy_value_t* input) { return mmdeploy_common_create_input(mats, mat_count, input); } -int mmdeploy_rotated_detector_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +int mmdeploy_rotated_detector_apply_v2(mmdeploy_rotated_detector_t detector, mmdeploy_value_t input, mmdeploy_value_t* output) { - return mmdeploy_pipeline_apply(handle, input, output); + return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)detector, input, output); } -int mmdeploy_rotated_detector_apply_async(mm_handle_t handle, mmdeploy_sender_t input, - mmdeploy_sender_t* output) { - return mmdeploy_pipeline_apply_async(handle, input, output); +int mmdeploy_rotated_detector_apply_async(mmdeploy_rotated_detector_t detector, + mmdeploy_sender_t input, mmdeploy_sender_t* output) { + return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)detector, input, output); } -int mmdeploy_rotated_detector_get_result(mmdeploy_value_t output, mm_rotated_detect_t** results, +int mmdeploy_rotated_detector_get_result(mmdeploy_value_t output, + mmdeploy_rotated_detection_t** results, int** result_count) { if (!output || !results || !result_count) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { @@ -142,7 +141,8 @@ int mmdeploy_rotated_detector_get_result(mmdeploy_value_t output, mm_rotated_det std::unique_ptr result_count_data(new int[_result_count.size()]{}); std::copy(_result_count.begin(), _result_count.end(), result_count_data.get()); - std::unique_ptr result_data(new mm_rotated_detect_t[total]{}); + std::unique_ptr result_data( + new mmdeploy_rotated_detection_t[total]{}); auto result_ptr = result_data.get(); for (const auto& det_output : detector_outputs) { @@ -160,12 +160,12 @@ int mmdeploy_rotated_detector_get_result(mmdeploy_value_t output, mm_rotated_det *result_count = result_count_data.release(); *results = result_data.release(); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("unhandled exception: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } diff --git a/csrc/mmdeploy/apis/c/rotated_detector.h b/csrc/mmdeploy/apis/c/mmdeploy/rotated_detector.h similarity index 67% rename from csrc/mmdeploy/apis/c/rotated_detector.h rename to csrc/mmdeploy/apis/c/mmdeploy/rotated_detector.h index 12e6de4a0..b7424e95f 100644 --- a/csrc/mmdeploy/apis/c/rotated_detector.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/rotated_detector.h @@ -10,16 +10,19 @@ #include "common.h" #include "executor.h" +#include "model.h" #ifdef __cplusplus extern "C" { #endif -typedef struct mm_rotated_detect_t { +typedef struct mmdeploy_rotated_detection_t { int label_id; float score; float rbbox[5]; // cx, cy, w, h, angle -} mm_rotated_detect_t; +} mmdeploy_rotated_detection_t; + +typedef struct mmdeploy_rotated_detector* mmdeploy_rotated_detector_t; /** * @brief Create rotated detector's handle @@ -27,27 +30,28 @@ typedef struct mm_rotated_detect_t { * \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle instance of a rotated detector + * @param[out] detector instance of a rotated detector * @return status of creating rotated detector's handle */ -MMDEPLOY_API int mmdeploy_rotated_detector_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_rotated_detector_create(mmdeploy_model_t model, const char* device_name, + int device_id, + mmdeploy_rotated_detector_t* detector); /** * @brief Create rotated detector's handle * @param[in] model_path path of mmrotate sdk model exported by mmdeploy model converter * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle instance of a rotated detector + * @param[out] detector instance of a rotated detector * @return status of creating rotated detector's handle */ MMDEPLOY_API int mmdeploy_rotated_detector_create_by_path(const char* model_path, const char* device_name, int device_id, - mm_handle_t* handle); + mmdeploy_rotated_detector_t* detector); /** * @brief Apply rotated detector to batch images and get their inference results - * @param[in] handle rotated detector's handle created by \ref + * @param[in] detector rotated detector's handle created by \ref * mmdeploy_rotated_detector_create_by_path * @param[in] mats a batch of images * @param[in] mat_count number of images in the batch @@ -58,23 +62,24 @@ MMDEPLOY_API int mmdeploy_rotated_detector_create_by_path(const char* model_path * mmdeploy_rotated_detector_release_result * @return status of inference */ -MMDEPLOY_API int mmdeploy_rotated_detector_apply(mm_handle_t handle, const mm_mat_t* mats, - int mat_count, mm_rotated_detect_t** results, +MMDEPLOY_API int mmdeploy_rotated_detector_apply(mmdeploy_rotated_detector_t detector, + const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_rotated_detection_t** results, int** result_count); /** @brief Release the inference result buffer created by \ref mmdeploy_rotated_detector_apply * @param[in] results rotated detection results buffer * @param[in] result_count \p results size buffer */ -MMDEPLOY_API void mmdeploy_rotated_detector_release_result(mm_rotated_detect_t* results, +MMDEPLOY_API void mmdeploy_rotated_detector_release_result(mmdeploy_rotated_detection_t* results, const int* result_count); /** * @brief Destroy rotated detector's handle - * @param[in] handle rotated detector's handle created by \ref + * @param[in] detector rotated detector's handle created by \ref * mmdeploy_rotated_detector_create_by_path or by \ref mmdeploy_rotated_detector_create */ -MMDEPLOY_API void mmdeploy_rotated_detector_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_rotated_detector_destroy(mmdeploy_rotated_detector_t detector); /****************************************************************************** * Experimental asynchronous APIs */ @@ -83,9 +88,10 @@ MMDEPLOY_API void mmdeploy_rotated_detector_destroy(mm_handle_t handle); * @brief Same as \ref mmdeploy_detector_create, but allows to control execution context of tasks * via exec_info */ -MMDEPLOY_API int mmdeploy_rotated_detector_create_v2(mm_model_t model, const char* device_name, - int device_id, mmdeploy_exec_info_t exec_info, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_rotated_detector_create_v2(mmdeploy_model_t model, + const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, + mmdeploy_rotated_detector_t* detector); /** * @brief Pack rotated detector inputs into mmdeploy_value_t @@ -93,23 +99,25 @@ MMDEPLOY_API int mmdeploy_rotated_detector_create_v2(mm_model_t model, const cha * @param[in] mat_count number of images in the batch * @return the created value */ -MMDEPLOY_API int mmdeploy_rotated_detector_create_input(const mm_mat_t* mats, int mat_count, +MMDEPLOY_API int mmdeploy_rotated_detector_create_input(const mmdeploy_mat_t* mats, int mat_count, mmdeploy_value_t* input); /** * @brief Same as \ref mmdeploy_rotated_detector_apply, but input and output are packed in \ref * mmdeploy_value_t. */ -MMDEPLOY_API int mmdeploy_rotated_detector_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +MMDEPLOY_API int mmdeploy_rotated_detector_apply_v2(mmdeploy_rotated_detector_t detector, + mmdeploy_value_t input, mmdeploy_value_t* output); /** * @brief Apply rotated detector asynchronously - * @param[in] handle handle to the detector + * @param[in] detector handle to the detector * @param[in] input input sender * @return output sender */ -MMDEPLOY_API int mmdeploy_rotated_detector_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +MMDEPLOY_API int mmdeploy_rotated_detector_apply_async(mmdeploy_rotated_detector_t detector, + mmdeploy_sender_t input, mmdeploy_sender_t* output); /** @@ -123,7 +131,7 @@ MMDEPLOY_API int mmdeploy_rotated_detector_apply_async(mm_handle_t handle, mmdep * @return status of the operation */ MMDEPLOY_API int mmdeploy_rotated_detector_get_result(mmdeploy_value_t output, - mm_rotated_detect_t** results, + mmdeploy_rotated_detection_t** results, int** result_count); #ifdef __cplusplus diff --git a/csrc/mmdeploy/apis/c/segmentor.cpp b/csrc/mmdeploy/apis/c/mmdeploy/segmentor.cpp similarity index 58% rename from csrc/mmdeploy/apis/c/segmentor.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/segmentor.cpp index 9ee4cdc9a..47536e32a 100644 --- a/csrc/mmdeploy/apis/c/segmentor.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/segmentor.cpp @@ -1,16 +1,16 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/segmentor.h" +#include "segmentor.h" -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/handle.h" -#include "mmdeploy/apis/c/pipeline.h" +#include "common_internal.h" +#include "handle.h" #include "mmdeploy/codebase/mmseg/mmseg.h" #include "mmdeploy/core/device.h" #include "mmdeploy/core/graph.h" #include "mmdeploy/core/mat.h" #include "mmdeploy/core/tensor.h" #include "mmdeploy/core/utils/formatter.h" +#include "pipeline.h" using namespace std; using namespace mmdeploy; @@ -42,49 +42,51 @@ Value& config_template() { return v; } -int mmdeploy_segmentor_create_impl(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { +int mmdeploy_segmentor_create_impl(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, + mmdeploy_segmentor_t* segmentor) { auto config = config_template(); - config["pipeline"]["tasks"][0]["params"]["model"] = *static_cast(model); + config["pipeline"]["tasks"][0]["params"]["model"] = *Cast(model); - return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, handle); + return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, + (mmdeploy_pipeline_t*)segmentor); } } // namespace -int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { - return mmdeploy_segmentor_create_impl(model, device_name, device_id, nullptr, handle); +int mmdeploy_segmentor_create(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_segmentor_t* segmentor) { + return mmdeploy_segmentor_create_impl(model, device_name, device_id, nullptr, segmentor); } int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { - mm_model_t model{}; + int device_id, mmdeploy_segmentor_t* segmentor) { + mmdeploy_model_t model{}; if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) { return ec; } - auto ec = mmdeploy_segmentor_create_impl(model, device_name, device_id, nullptr, handle); + auto ec = mmdeploy_segmentor_create_impl(model, device_name, device_id, nullptr, segmentor); mmdeploy_model_destroy(model); return ec; } -int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_segment_t** results) { +int mmdeploy_segmentor_apply(mmdeploy_segmentor_t segmentor, const mmdeploy_mat_t* mats, + int mat_count, mmdeploy_segmentation_t** results) { wrapped input; if (auto ec = mmdeploy_segmentor_create_input(mats, mat_count, input.ptr())) { return ec; } wrapped output; - if (auto ec = mmdeploy_segmentor_apply_v2(handle, input, output.ptr())) { + if (auto ec = mmdeploy_segmentor_apply_v2(segmentor, input, output.ptr())) { return ec; } if (auto ec = mmdeploy_segmentor_get_result(output, results)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -void mmdeploy_segmentor_release_result(mm_segment_t* results, int count) { +void mmdeploy_segmentor_release_result(mmdeploy_segmentation_t* results, int count) { if (results == nullptr) { return; } @@ -95,43 +97,41 @@ void mmdeploy_segmentor_release_result(mm_segment_t* results, int count) { delete[] results; } -void mmdeploy_segmentor_destroy(mm_handle_t handle) { - if (handle != nullptr) { - auto segmentor = static_cast(handle); - delete segmentor; - } +void mmdeploy_segmentor_destroy(mmdeploy_segmentor_t segmentor) { + mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)segmentor); } -int mmdeploy_segmentor_create_v2(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { - return mmdeploy_segmentor_create_impl(model, device_name, device_id, exec_info, handle); +int mmdeploy_segmentor_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, mmdeploy_segmentor_t* segmentor) { + return mmdeploy_segmentor_create_impl(model, device_name, device_id, exec_info, segmentor); } -int mmdeploy_segmentor_create_input(const mm_mat_t* mats, int mat_count, mmdeploy_value_t* value) { +int mmdeploy_segmentor_create_input(const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_value_t* value) { return mmdeploy_common_create_input(mats, mat_count, value); } -int mmdeploy_segmentor_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +int mmdeploy_segmentor_apply_v2(mmdeploy_segmentor_t segmentor, mmdeploy_value_t input, mmdeploy_value_t* output) { - return mmdeploy_pipeline_apply(handle, input, output); + return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)segmentor, input, output); } -int mmdeploy_segmentor_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +int mmdeploy_segmentor_apply_async(mmdeploy_segmentor_t segmentor, mmdeploy_sender_t input, mmdeploy_sender_t* output) { - return mmdeploy_pipeline_apply_async(handle, input, output); + return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)segmentor, input, output); } -int mmdeploy_segmentor_get_result(mmdeploy_value_t output, mm_segment_t** results) { +int mmdeploy_segmentor_get_result(mmdeploy_value_t output, mmdeploy_segmentation_t** results) { try { const auto& value = Cast(output)->front(); size_t image_count = value.size(); - auto deleter = [&](mm_segment_t* p) { + auto deleter = [&](mmdeploy_segmentation_t* p) { mmdeploy_segmentor_release_result(p, static_cast(image_count)); }; - unique_ptr _results(new mm_segment_t[image_count]{}, - deleter); + unique_ptr _results( + new mmdeploy_segmentation_t[image_count]{}, deleter); auto results_ptr = _results.get(); for (auto i = 0; i < image_count; ++i, ++results_ptr) { auto& output_item = value[i]; @@ -146,12 +146,12 @@ int mmdeploy_segmentor_get_result(mmdeploy_value_t output, mm_segment_t** result std::copy_n(mask.data(), mask_size, results_ptr->mask); } *results = _results.release(); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } diff --git a/csrc/mmdeploy/apis/c/segmentor.h b/csrc/mmdeploy/apis/c/mmdeploy/segmentor.h similarity index 61% rename from csrc/mmdeploy/apis/c/segmentor.h rename to csrc/mmdeploy/apis/c/mmdeploy/segmentor.h index 21580c86d..79e236e66 100644 --- a/csrc/mmdeploy/apis/c/segmentor.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/segmentor.h @@ -10,18 +10,21 @@ #include "common.h" #include "executor.h" +#include "model.h" #ifdef __cplusplus extern "C" { #endif -typedef struct mm_segment_t { +typedef struct mmdeploy_segmentation_t { int height; ///< height of \p mask that equals to the input image's height int width; ///< width of \p mask that equals to the input image's width int classes; ///< the number of labels in \p mask int* mask; ///< segmentation mask of the input image, in which mask[i * width + j] indicates ///< the label id of pixel at (i, j) -} mm_segment_t; +} mmdeploy_segmentation_t; + +typedef struct mmdeploy_segmentor* mmdeploy_segmentor_t; /** * @brief Create segmentor's handle @@ -29,28 +32,28 @@ typedef struct mm_segment_t { * \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle instance of a segmentor, which must be destroyed + * @param[out] segmentor instance of a segmentor, which must be destroyed * by \ref mmdeploy_segmentor_destroy * @return status of creating segmentor's handle */ -MMDEPLOY_API int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_segmentor_create(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_segmentor_t* segmentor); /** * @brief Create segmentor's handle * @param[in] model_path path of mmsegmentation sdk model exported by mmdeploy model converter * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle instance of a segmentor, which must be destroyed + * @param[out] segmentor instance of a segmentor, which must be destroyed * by \ref mmdeploy_segmentor_destroy * @return status of creating segmentor's handle */ MMDEPLOY_API int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); + int device_id, mmdeploy_segmentor_t* segmentor); /** * @brief Apply segmentor to batch images and get their inference results - * @param[in] handle segmentor's handle created by \ref mmdeploy_segmentor_create_by_path or \ref + * @param[in] segmentor segmentor's handle created by \ref mmdeploy_segmentor_create_by_path or \ref * mmdeploy_segmentor_create * @param[in] mats a batch of images * @param[in] mat_count number of images in the batch @@ -58,39 +61,41 @@ MMDEPLOY_API int mmdeploy_segmentor_create_by_path(const char* model_path, const * image. It must be released by \ref mmdeploy_segmentor_release_result * @return status of inference */ -MMDEPLOY_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_segment_t** results); +MMDEPLOY_API int mmdeploy_segmentor_apply(mmdeploy_segmentor_t segmentor, + const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_segmentation_t** results); /** * @brief Release result buffer returned by \ref mmdeploy_segmentor_apply * @param[in] results result buffer * @param[in] count length of \p results */ -MMDEPLOY_API void mmdeploy_segmentor_release_result(mm_segment_t* results, int count); +MMDEPLOY_API void mmdeploy_segmentor_release_result(mmdeploy_segmentation_t* results, int count); /** * @brief Destroy segmentor's handle - * @param[in] handle segmentor's handle created by \ref mmdeploy_segmentor_create_by_path + * @param[in] segmentor segmentor's handle created by \ref mmdeploy_segmentor_create_by_path */ -MMDEPLOY_API void mmdeploy_segmentor_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_segmentor_destroy(mmdeploy_segmentor_t segmentor); /****************************************************************************** * Experimental asynchronous APIs */ -MMDEPLOY_API int mmdeploy_segmentor_create_v2(mm_model_t model, const char* device_name, +MMDEPLOY_API int mmdeploy_segmentor_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, mmdeploy_exec_info_t exec_info, - mm_handle_t* handle); + mmdeploy_segmentor_t* segmentor); -MMDEPLOY_API int mmdeploy_segmentor_create_input(const mm_mat_t* mats, int mat_count, +MMDEPLOY_API int mmdeploy_segmentor_create_input(const mmdeploy_mat_t* mats, int mat_count, mmdeploy_value_t* value); -MMDEPLOY_API int mmdeploy_segmentor_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +MMDEPLOY_API int mmdeploy_segmentor_apply_v2(mmdeploy_segmentor_t segmentor, mmdeploy_value_t input, mmdeploy_value_t* output); -MMDEPLOY_API int mmdeploy_segmentor_apply_async(mm_handle_t handle, mmdeploy_sender_t input, - mmdeploy_sender_t* output); +MMDEPLOY_API int mmdeploy_segmentor_apply_async(mmdeploy_segmentor_t segmentor, + mmdeploy_sender_t input, mmdeploy_sender_t* output); -MMDEPLOY_API int mmdeploy_segmentor_get_result(mmdeploy_value_t output, mm_segment_t** results); +MMDEPLOY_API int mmdeploy_segmentor_get_result(mmdeploy_value_t output, + mmdeploy_segmentation_t** results); #ifdef __cplusplus } diff --git a/csrc/mmdeploy/apis/c/text_detector.cpp b/csrc/mmdeploy/apis/c/mmdeploy/text_detector.cpp similarity index 62% rename from csrc/mmdeploy/apis/c/text_detector.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/text_detector.cpp index cc567b918..eb10bbab7 100644 --- a/csrc/mmdeploy/apis/c/text_detector.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/text_detector.cpp @@ -4,14 +4,14 @@ #include -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/executor_internal.h" -#include "mmdeploy/apis/c/model.h" -#include "mmdeploy/apis/c/pipeline.h" +#include "common_internal.h" +#include "executor_internal.h" #include "mmdeploy/codebase/mmocr/mmocr.h" #include "mmdeploy/core/model.h" #include "mmdeploy/core/status_code.h" #include "mmdeploy/core/utils/formatter.h" +#include "model.h" +#include "pipeline.h" using namespace std; using namespace mmdeploy; @@ -43,72 +43,76 @@ const Value& config_template() { // clang-format on } -int mmdeploy_text_detector_create_impl(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { +int mmdeploy_text_detector_create_impl(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_exec_info_t exec_info, + mmdeploy_text_detector_t* detector) { auto config = config_template(); - config["pipeline"]["tasks"][0]["params"]["model"] = *static_cast(model); + config["pipeline"]["tasks"][0]["params"]["model"] = *Cast(model); - return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, handle); + return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, + (mmdeploy_pipeline_t*)detector); } } // namespace -int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { - return mmdeploy_text_detector_create_impl(model, device_name, device_id, nullptr, handle); +int mmdeploy_text_detector_create(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_text_detector_t* detector) { + return mmdeploy_text_detector_create_impl(model, device_name, device_id, nullptr, detector); } -int mmdeploy_text_detector_create_v2(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { - return mmdeploy_text_detector_create_impl(model, device_name, device_id, exec_info, handle); +int mmdeploy_text_detector_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_exec_info_t exec_info, + mmdeploy_text_detector_t* detector) { + return mmdeploy_text_detector_create_impl(model, device_name, device_id, exec_info, detector); } int mmdeploy_text_detector_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { - mm_model_t model{}; + int device_id, mmdeploy_text_detector_t* detector) { + mmdeploy_model_t model{}; if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) { return ec; } - auto ec = mmdeploy_text_detector_create_impl(model, device_name, device_id, nullptr, handle); + auto ec = mmdeploy_text_detector_create_impl(model, device_name, device_id, nullptr, detector); mmdeploy_model_destroy(model); return ec; } -int mmdeploy_text_detector_create_input(const mm_mat_t* mats, int mat_count, +int mmdeploy_text_detector_create_input(const mmdeploy_mat_t* mats, int mat_count, mmdeploy_value_t* input) { return mmdeploy_common_create_input(mats, mat_count, input); } -int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_text_detect_t** results, int** result_count) { +int mmdeploy_text_detector_apply(mmdeploy_text_detector_t detector, const mmdeploy_mat_t* mats, + int mat_count, mmdeploy_text_detection_t** results, + int** result_count) { wrapped input; if (auto ec = mmdeploy_text_detector_create_input(mats, mat_count, input.ptr())) { return ec; } wrapped output; - if (auto ec = mmdeploy_text_detector_apply_v2(handle, input, output.ptr())) { + if (auto ec = mmdeploy_text_detector_apply_v2(detector, input, output.ptr())) { return ec; } if (auto ec = mmdeploy_text_detector_get_result(output, results, result_count)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -int mmdeploy_text_detector_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +int mmdeploy_text_detector_apply_v2(mmdeploy_text_detector_t detector, mmdeploy_value_t input, mmdeploy_value_t* output) { - return mmdeploy_pipeline_apply(handle, input, output); + return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)detector, input, output); } -int mmdeploy_text_detector_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +int mmdeploy_text_detector_apply_async(mmdeploy_text_detector_t detector, mmdeploy_sender_t input, mmdeploy_sender_t* output) { - return mmdeploy_pipeline_apply_async(handle, input, output); + return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)detector, input, output); } -int mmdeploy_text_detector_get_result(mmdeploy_value_t output, mm_text_detect_t** results, +int mmdeploy_text_detector_get_result(mmdeploy_value_t output, mmdeploy_text_detection_t** results, int** result_count) { if (!output || !results || !result_count) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { Value& value = reinterpret_cast(output)->front(); @@ -125,7 +129,8 @@ int mmdeploy_text_detector_get_result(mmdeploy_value_t output, mm_text_detect_t* std::unique_ptr result_count_data(new int[_result_count.size()]{}); std::copy(_result_count.begin(), _result_count.end(), result_count_data.get()); - std::unique_ptr result_data(new mm_text_detect_t[total]{}); + std::unique_ptr result_data( + new mmdeploy_text_detection_t[total]{}); auto result_ptr = result_data.get(); for (const auto& det_output : detector_outputs) { @@ -142,7 +147,7 @@ int mmdeploy_text_detector_get_result(mmdeploy_value_t output, mm_text_detect_t* *result_count = result_count_data.release(); *results = result_data.release(); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("unhandled exception: {}", e.what()); @@ -152,38 +157,42 @@ int mmdeploy_text_detector_get_result(mmdeploy_value_t output, mm_text_detect_t* return 0; } -void mmdeploy_text_detector_release_result(mm_text_detect_t* results, const int* result_count, - int count) { +void mmdeploy_text_detector_release_result(mmdeploy_text_detection_t* results, + const int* result_count, int count) { delete[] results; delete[] result_count; } -void mmdeploy_text_detector_destroy(mm_handle_t handle) { mmdeploy_pipeline_destroy(handle); } +void mmdeploy_text_detector_destroy(mmdeploy_text_detector_t detector) { + mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)detector); +} -int mmdeploy_text_detector_apply_async_v2(mm_handle_t handle, const mm_mat_t* imgs, int img_count, +int mmdeploy_text_detector_apply_async_v2(mmdeploy_text_detector_t detector, + const mmdeploy_mat_t* imgs, int img_count, mmdeploy_text_detector_continue_t cont, void* context, mmdeploy_sender_t* output) { mmdeploy_sender_t result_sender{}; - if (auto ec = mmdeploy_text_detector_apply_async_v3(handle, imgs, img_count, &result_sender)) { + if (auto ec = mmdeploy_text_detector_apply_async_v3(detector, imgs, img_count, &result_sender)) { return ec; } if (auto ec = mmdeploy_text_detector_continue_async(result_sender, cont, context, output)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -int mmdeploy_text_detector_apply_async_v3(mm_handle_t handle, const mm_mat_t* imgs, int img_count, +int mmdeploy_text_detector_apply_async_v3(mmdeploy_text_detector_t detector, + const mmdeploy_mat_t* imgs, int img_count, mmdeploy_sender_t* output) { wrapped input_val; if (auto ec = mmdeploy_text_detector_create_input(imgs, img_count, input_val.ptr())) { return ec; } mmdeploy_sender_t input_sndr = mmdeploy_executor_just(input_val); - if (auto ec = mmdeploy_text_detector_apply_async(handle, input_sndr, output)) { + if (auto ec = mmdeploy_text_detector_apply_async(detector, input_sndr, output)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } int mmdeploy_text_detector_continue_async(mmdeploy_sender_t input, @@ -192,7 +201,7 @@ int mmdeploy_text_detector_continue_async(mmdeploy_sender_t input, auto sender = Guard([&] { return Take( LetValue(Take(input), [fn = cont, context](Value& value) -> TypeErasedSender { - mm_text_detect_t* results{}; + mmdeploy_text_detection_t* results{}; int* result_count{}; if (auto ec = mmdeploy_text_detector_get_result(Cast(&value), &results, &result_count)) { return Just(Value()); @@ -207,7 +216,7 @@ int mmdeploy_text_detector_continue_async(mmdeploy_sender_t input, }); if (sender) { *output = sender; - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } diff --git a/csrc/mmdeploy/apis/c/text_detector.h b/csrc/mmdeploy/apis/c/mmdeploy/text_detector.h similarity index 65% rename from csrc/mmdeploy/apis/c/text_detector.h rename to csrc/mmdeploy/apis/c/mmdeploy/text_detector.h index 489146852..bd17d819f 100644 --- a/csrc/mmdeploy/apis/c/text_detector.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/text_detector.h @@ -10,15 +10,18 @@ #include "common.h" #include "executor.h" +#include "model.h" #ifdef __cplusplus extern "C" { #endif -typedef struct mm_text_detect_t { - mm_pointf_t bbox[4]; ///< a text bounding box of which the vertex are in clock-wise +typedef struct mmdeploy_text_detection_t { + mmdeploy_point_t bbox[4]; ///< a text bounding box of which the vertex are in clock-wise float score; -} mm_text_detect_t; +} mmdeploy_text_detection_t; + +typedef struct mmdeploy_text_detector* mmdeploy_text_detector_t; /** * @brief Create text-detector's handle @@ -26,29 +29,29 @@ typedef struct mm_text_detect_t { * \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle instance of a text-detector, which must be destroyed + * @param[out] detector instance of a text-detector, which must be destroyed * by \ref mmdeploy_text_detector_destroy * @return status of creating text-detector's handle */ -MMDEPLOY_API int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_detector_create(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_text_detector_t* detector); /** * @brief Create text-detector's handle * @param[in] model_path path to text detection model * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device - * @param[out] handle instance of a text-detector, which must be destroyed + * @param[out] detector instance of a text-detector, which must be destroyed * by \ref mmdeploy_text_detector_destroy * @return status of creating text-detector's handle */ MMDEPLOY_API int mmdeploy_text_detector_create_by_path(const char* model_path, const char* device_name, int device_id, - mm_handle_t* handle); + mmdeploy_text_detector_t* detector); /** * @brief Apply text-detector to batch images and get their inference results - * @param[in] handle text-detector's handle created by \ref mmdeploy_text_detector_create_by_path + * @param[in] detector text-detector's handle created by \ref mmdeploy_text_detector_create_by_path * @param[in] mats a batch of images * @param[in] mat_count number of images in the batch * @param[out] results a linear buffer to save text detection results of each @@ -57,8 +60,9 @@ MMDEPLOY_API int mmdeploy_text_detector_create_by_path(const char* model_path, * results of each image. It must be released by \ref mmdeploy_detector_release_result * @return status of inference */ -MMDEPLOY_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, - int mat_count, mm_text_detect_t** results, +MMDEPLOY_API int mmdeploy_text_detector_apply(mmdeploy_text_detector_t detector, + const mmdeploy_mat_t* mats, int mat_count, + mmdeploy_text_detection_t** results, int** result_count); /** @brief Release the inference result buffer returned by \ref mmdeploy_text_detector_apply @@ -66,15 +70,15 @@ MMDEPLOY_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t * @param[in] result_count \p results size buffer * @param[in] count the length of buffer \p result_count */ -MMDEPLOY_API void mmdeploy_text_detector_release_result(mm_text_detect_t* results, +MMDEPLOY_API void mmdeploy_text_detector_release_result(mmdeploy_text_detection_t* results, const int* result_count, int count); /** * @brief Destroy text-detector's handle - * @param[in] handle text-detector's handle created by \ref mmdeploy_text_detector_create_by_path or - * \ref mmdeploy_text_detector_create + * @param[in] detector text-detector's handle created by \ref mmdeploy_text_detector_create_by_path + * or \ref mmdeploy_text_detector_create */ -MMDEPLOY_API void mmdeploy_text_detector_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_text_detector_destroy(mmdeploy_text_detector_t detector); /****************************************************************************** * Experimental asynchronous APIs */ @@ -83,9 +87,9 @@ MMDEPLOY_API void mmdeploy_text_detector_destroy(mm_handle_t handle); * @brief Same as \ref mmdeploy_text_detector_create, but allows to control execution context of * tasks via exec_info */ -MMDEPLOY_API int mmdeploy_text_detector_create_v2(mm_model_t model, const char* device_name, +MMDEPLOY_API int mmdeploy_text_detector_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, mmdeploy_exec_info_t exec_info, - mm_handle_t* handle); + mmdeploy_text_detector_t* detector); /** * @brief Pack text-detector inputs into mmdeploy_value_t @@ -93,23 +97,24 @@ MMDEPLOY_API int mmdeploy_text_detector_create_v2(mm_model_t model, const char* * @param[in] mat_count number of images in the batch * @return the created value */ -MMDEPLOY_API int mmdeploy_text_detector_create_input(const mm_mat_t* mats, int mat_count, +MMDEPLOY_API int mmdeploy_text_detector_create_input(const mmdeploy_mat_t* mats, int mat_count, mmdeploy_value_t* input); /** * @brief Same as \ref mmdeploy_text_detector_apply, but input and output are packed in \ref * mmdeploy_value_t. */ -MMDEPLOY_API int mmdeploy_text_detector_apply_v2(mm_handle_t handle, mmdeploy_value_t input, - mmdeploy_value_t* output); +MMDEPLOY_API int mmdeploy_text_detector_apply_v2(mmdeploy_text_detector_t detector, + mmdeploy_value_t input, mmdeploy_value_t* output); /** * @brief Apply text-detector asynchronously - * @param[in] handle handle to the detector + * @param[in] detector handle to the detector * @param[in] input input sender that will be consumed by the operation * @return output sender */ -MMDEPLOY_API int mmdeploy_text_detector_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +MMDEPLOY_API int mmdeploy_text_detector_apply_async(mmdeploy_text_detector_t detector, + mmdeploy_sender_t input, mmdeploy_sender_t* output); /** @@ -123,11 +128,12 @@ MMDEPLOY_API int mmdeploy_text_detector_apply_async(mm_handle_t handle, mmdeploy * @return status of the operation */ MMDEPLOY_API -int mmdeploy_text_detector_get_result(mmdeploy_value_t output, mm_text_detect_t** results, +int mmdeploy_text_detector_get_result(mmdeploy_value_t output, mmdeploy_text_detection_t** results, int** result_count); -typedef int (*mmdeploy_text_detector_continue_t)(mm_text_detect_t* results, int* result_count, - void* context, mmdeploy_sender_t* output); +typedef int (*mmdeploy_text_detector_continue_t)(mmdeploy_text_detection_t* results, + int* result_count, void* context, + mmdeploy_sender_t* output); // MMDEPLOY_API int mmdeploy_text_detector_apply_async_v2(mm_handle_t handle, const mm_mat_t* imgs, // int img_count, @@ -135,8 +141,9 @@ typedef int (*mmdeploy_text_detector_continue_t)(mm_text_detect_t* results, int* // cont, void* context, mmdeploy_sender_t* // output); -MMDEPLOY_API int mmdeploy_text_detector_apply_async_v3(mm_handle_t handle, const mm_mat_t* imgs, - int img_count, mmdeploy_sender_t* output); +MMDEPLOY_API int mmdeploy_text_detector_apply_async_v3(mmdeploy_text_detector_t detector, + const mmdeploy_mat_t* imgs, int img_count, + mmdeploy_sender_t* output); MMDEPLOY_API int mmdeploy_text_detector_continue_async(mmdeploy_sender_t input, mmdeploy_text_detector_continue_t cont, diff --git a/csrc/mmdeploy/apis/c/text_recognizer.cpp b/csrc/mmdeploy/apis/c/mmdeploy/text_recognizer.cpp similarity index 61% rename from csrc/mmdeploy/apis/c/text_recognizer.cpp rename to csrc/mmdeploy/apis/c/mmdeploy/text_recognizer.cpp index 71b83c25f..4f2dc9f3f 100644 --- a/csrc/mmdeploy/apis/c/text_recognizer.cpp +++ b/csrc/mmdeploy/apis/c/mmdeploy/text_recognizer.cpp @@ -4,10 +4,8 @@ #include -#include "mmdeploy/apis/c/common_internal.h" -#include "mmdeploy/apis/c/executor_internal.h" -#include "mmdeploy/apis/c/model.h" -#include "mmdeploy/apis/c/pipeline.h" +#include "common_internal.h" +#include "executor_internal.h" #include "mmdeploy/archive/value_archive.h" #include "mmdeploy/codebase/mmocr/mmocr.h" #include "mmdeploy/core/device.h" @@ -16,6 +14,8 @@ #include "mmdeploy/core/status_code.h" #include "mmdeploy/core/utils/formatter.h" #include "mmdeploy/core/value.h" +#include "model.h" +#include "pipeline.h" using namespace mmdeploy; @@ -65,47 +65,52 @@ const Value& config_template() { return v; } -int mmdeploy_text_recognizer_create_impl(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { +int mmdeploy_text_recognizer_create_impl(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_exec_info_t exec_info, + mmdeploy_text_recognizer_t* recognizer) { auto config = config_template(); - config["pipeline"]["tasks"][2]["params"]["model"] = *static_cast(model); + config["pipeline"]["tasks"][2]["params"]["model"] = *Cast(model); - return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, handle); + return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, + (mmdeploy_pipeline_t*)recognizer); } } // namespace -int mmdeploy_text_recognizer_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { - return mmdeploy_text_recognizer_create_impl(model, device_name, device_id, nullptr, handle); +int mmdeploy_text_recognizer_create(mmdeploy_model_t model, const char* device_name, int device_id, + mmdeploy_text_recognizer_t* recognizer) { + return mmdeploy_text_recognizer_create_impl(model, device_name, device_id, nullptr, recognizer); } -int mmdeploy_text_recognizer_create_v2(mm_model_t model, const char* device_name, int device_id, - mmdeploy_exec_info_t exec_info, mm_handle_t* handle) { - return mmdeploy_text_recognizer_create_impl(model, device_name, device_id, exec_info, handle); +int mmdeploy_text_recognizer_create_v2(mmdeploy_model_t model, const char* device_name, + int device_id, mmdeploy_exec_info_t exec_info, + mmdeploy_text_recognizer_t* recognizer) { + return mmdeploy_text_recognizer_create_impl(model, device_name, device_id, exec_info, recognizer); } int mmdeploy_text_recognizer_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { - mm_model_t model{}; + int device_id, mmdeploy_text_recognizer_t* recognizer) { + mmdeploy_model_t model{}; if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) { return ec; } - auto ec = mmdeploy_text_recognizer_create_impl(model, device_name, device_id, nullptr, handle); + auto ec = + mmdeploy_text_recognizer_create_impl(model, device_name, device_id, nullptr, recognizer); mmdeploy_model_destroy(model); return ec; } -int mmdeploy_text_recognizer_apply(mm_handle_t handle, const mm_mat_t* images, int count, - mm_text_recognize_t** results) { - return mmdeploy_text_recognizer_apply_bbox(handle, images, count, nullptr, nullptr, results); +int mmdeploy_text_recognizer_apply(mmdeploy_text_recognizer_t recognizer, + const mmdeploy_mat_t* images, int count, + mmdeploy_text_recognition_t** results) { + return mmdeploy_text_recognizer_apply_bbox(recognizer, images, count, nullptr, nullptr, results); } -int mmdeploy_text_recognizer_create_input(const mm_mat_t* images, int image_count, - const mm_text_detect_t* bboxes, const int* bbox_count, - mmdeploy_value_t* output) { +int mmdeploy_text_recognizer_create_input(const mmdeploy_mat_t* images, int image_count, + const mmdeploy_text_detection_t* bboxes, + const int* bbox_count, mmdeploy_value_t* output) { if (image_count && images == nullptr) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { Value::Array input_images; @@ -151,47 +156,49 @@ int mmdeploy_text_recognizer_create_input(const mm_mat_t* images, int image_coun Value input{std::move(input_images), std::move(input_bboxes)}; *output = Take(std::move(input)); - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } catch (const std::exception& e) { MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } -int mmdeploy_text_recognizer_apply_bbox(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - const mm_text_detect_t* bboxes, const int* bbox_count, - mm_text_recognize_t** results) { +int mmdeploy_text_recognizer_apply_bbox(mmdeploy_text_recognizer_t recognizer, + const mmdeploy_mat_t* images, int image_count, + const mmdeploy_text_detection_t* bboxes, + const int* bbox_count, + mmdeploy_text_recognition_t** results) { wrapped input; - if (auto ec = - mmdeploy_text_recognizer_create_input(mats, mat_count, bboxes, bbox_count, input.ptr())) { + if (auto ec = mmdeploy_text_recognizer_create_input(images, image_count, bboxes, bbox_count, + input.ptr())) { return ec; } wrapped output; - if (auto ec = mmdeploy_text_recognizer_apply_v2(handle, input, output.ptr())) { + if (auto ec = mmdeploy_text_recognizer_apply_v2(recognizer, input, output.ptr())) { return ec; } if (auto ec = mmdeploy_text_recognizer_get_result(output, results)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -int mmdeploy_text_recognizer_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +int mmdeploy_text_recognizer_apply_v2(mmdeploy_text_recognizer_t recognizer, mmdeploy_value_t input, mmdeploy_value_t* output) { - return mmdeploy_pipeline_apply(handle, input, output); + return mmdeploy_pipeline_apply((mmdeploy_pipeline_t)recognizer, input, output); } -int mmdeploy_text_recognizer_apply_async(mm_handle_t handle, mmdeploy_sender_t input, - mmdeploy_sender_t* output) { - return mmdeploy_pipeline_apply_async(handle, input, output); +int mmdeploy_text_recognizer_apply_async(mmdeploy_text_recognizer_t recognizer, + mmdeploy_sender_t input, mmdeploy_sender_t* output) { + return mmdeploy_pipeline_apply_async((mmdeploy_pipeline_t)recognizer, input, output); } MMDEPLOY_API int mmdeploy_text_recognizer_get_result(mmdeploy_value_t output, - mm_text_recognize_t** results) { + mmdeploy_text_recognition_t** results) { if (!output || !results) { - return MM_E_INVALID_ARG; + return MMDEPLOY_E_INVALID_ARG; } try { std::vector> recognizer_outputs; @@ -203,12 +210,12 @@ MMDEPLOY_API int mmdeploy_text_recognizer_get_result(mmdeploy_value_t output, result_count += img_outputs.size(); } - auto deleter = [&](mm_text_recognize_t* p) { + auto deleter = [&](mmdeploy_text_recognition_t* p) { mmdeploy_text_recognizer_release_result(p, static_cast(result_count)); }; - std::unique_ptr _results( - new mm_text_recognize_t[result_count]{}, deleter); + std::unique_ptr _results( + new mmdeploy_text_recognition_t[result_count]{}, deleter); size_t result_idx = 0; for (const auto& img_result : recognizer_outputs) { @@ -233,10 +240,10 @@ MMDEPLOY_API int mmdeploy_text_recognizer_get_result(mmdeploy_value_t output, } catch (...) { MMDEPLOY_ERROR("unknown exception caught"); } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } -void mmdeploy_text_recognizer_release_result(mm_text_recognize_t* results, int count) { +void mmdeploy_text_recognizer_release_result(mmdeploy_text_recognition_t* results, int count) { for (int i = 0; i < count; ++i) { delete[] results[i].score; delete[] results[i].text; @@ -244,21 +251,24 @@ void mmdeploy_text_recognizer_release_result(mm_text_recognize_t* results, int c delete[] results; } -void mmdeploy_text_recognizer_destroy(mm_handle_t handle) { mmdeploy_pipeline_destroy(handle); } +void mmdeploy_text_recognizer_destroy(mmdeploy_text_recognizer_t recognizer) { + mmdeploy_pipeline_destroy((mmdeploy_pipeline_t)recognizer); +} -int mmdeploy_text_recognizer_apply_async_v3(mm_handle_t handle, const mm_mat_t* imgs, int img_count, - const mm_text_detect_t* bboxes, const int* bbox_count, - mmdeploy_sender_t* output) { +int mmdeploy_text_recognizer_apply_async_v3(mmdeploy_text_recognizer_t recognizer, + const mmdeploy_mat_t* imgs, int img_count, + const mmdeploy_text_detection_t* bboxes, + const int* bbox_count, mmdeploy_sender_t* output) { wrapped input_val; if (auto ec = mmdeploy_text_recognizer_create_input(imgs, img_count, bboxes, bbox_count, input_val.ptr())) { return ec; } mmdeploy_sender_t input_sndr = mmdeploy_executor_just(input_val); - if (auto ec = mmdeploy_text_recognizer_apply_async(handle, input_sndr, output)) { + if (auto ec = mmdeploy_text_recognizer_apply_async(recognizer, input_sndr, output)) { return ec; } - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } int mmdeploy_text_recognizer_continue_async(mmdeploy_sender_t input, @@ -267,7 +277,7 @@ int mmdeploy_text_recognizer_continue_async(mmdeploy_sender_t input, auto sender = Guard([&] { return Take( LetValue(Take(input), [fn = cont, context](Value& value) -> TypeErasedSender { - mm_text_recognize_t* results{}; + mmdeploy_text_recognition_t* results{}; if (auto ec = mmdeploy_text_recognizer_get_result(Cast(&value), &results)) { return Just(Value()); } @@ -281,7 +291,7 @@ int mmdeploy_text_recognizer_continue_async(mmdeploy_sender_t input, }); if (sender) { *output = sender; - return MM_SUCCESS; + return MMDEPLOY_SUCCESS; } - return MM_E_FAIL; + return MMDEPLOY_E_FAIL; } diff --git a/csrc/mmdeploy/apis/c/text_recognizer.h b/csrc/mmdeploy/apis/c/mmdeploy/text_recognizer.h similarity index 60% rename from csrc/mmdeploy/apis/c/text_recognizer.h rename to csrc/mmdeploy/apis/c/mmdeploy/text_recognizer.h index 2e3ad2d78..3bf79c267 100644 --- a/csrc/mmdeploy/apis/c/text_recognizer.h +++ b/csrc/mmdeploy/apis/c/mmdeploy/text_recognizer.h @@ -16,11 +16,13 @@ extern "C" { #endif -typedef struct mm_text_recognize_t { +typedef struct mmdeploy_text_recognition_t { char* text; float* score; int length; -} mm_text_recognize_t; +} mmdeploy_text_recognition_t; + +typedef struct mmdeploy_text_recognizer* mmdeploy_text_recognizer_t; /** * @brief Create a text recognizer instance @@ -28,29 +30,30 @@ typedef struct mm_text_recognize_t { * \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle handle of the created text recognizer, which must be destroyed + * @param[out] recognizer handle of the created text recognizer, which must be destroyed * by \ref mmdeploy_text_recognizer_destroy * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_text_recognizer_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_recognizer_create(mmdeploy_model_t model, const char* device_name, + int device_id, + mmdeploy_text_recognizer_t* recognizer); /** * @brief Create a text recognizer instance * @param[in] model_path path to text recognition model * @param[in] device_name name of device, such as "cpu", "cuda", etc. * @param[in] device_id id of device. - * @param[out] handle handle of the created text recognizer, which must be destroyed + * @param[out] recognizer handle of the created text recognizer, which must be destroyed * by \ref mmdeploy_text_recognizer_destroy * @return status code of the operation */ MMDEPLOY_API int mmdeploy_text_recognizer_create_by_path(const char* model_path, const char* device_name, int device_id, - mm_handle_t* handle); + mmdeploy_text_recognizer_t* recognizer); /** * @brief Apply text recognizer to a batch of text images - * @param[in] handle text recognizer's handle created by \ref + * @param[in] recognizer text recognizer's handle created by \ref * mmdeploy_text_recognizer_create_by_path * @param[in] images a batch of text images * @param[in] count number of images in the batch @@ -58,12 +61,13 @@ MMDEPLOY_API int mmdeploy_text_recognizer_create_by_path(const char* model_path, * by \ref mmdeploy_text_recognizer_release_result * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_text_recognizer_apply(mm_handle_t handle, const mm_mat_t* images, - int count, mm_text_recognize_t** results); +MMDEPLOY_API int mmdeploy_text_recognizer_apply(mmdeploy_text_recognizer_t recognizer, + const mmdeploy_mat_t* images, int count, + mmdeploy_text_recognition_t** results); /** * @brief Apply text recognizer to a batch of images supplied with text bboxes - * @param[in] handle text recognizer's handle created by \ref + * @param[in] recognizer text recognizer's handle created by \ref * mmdeploy_text_recognizer_create_by_path * @param[in] images a batch of text images * @param[in] image_count number of images in the batch @@ -73,25 +77,26 @@ MMDEPLOY_API int mmdeploy_text_recognizer_apply(mm_handle_t handle, const mm_mat * bboxes, must be release by \ref mmdeploy_text_recognizer_release_result * @return status code of the operation */ -MMDEPLOY_API int mmdeploy_text_recognizer_apply_bbox(mm_handle_t handle, const mm_mat_t* images, - int image_count, - const mm_text_detect_t* bboxes, +MMDEPLOY_API int mmdeploy_text_recognizer_apply_bbox(mmdeploy_text_recognizer_t recognizer, + const mmdeploy_mat_t* images, int image_count, + const mmdeploy_text_detection_t* bboxes, const int* bbox_count, - mm_text_recognize_t** results); + mmdeploy_text_recognition_t** results); /** @brief Release result buffer returned by \ref mmdeploy_text_recognizer_apply or \ref * mmdeploy_text_recognizer_apply_bbox * @param[in] results result buffer by text recognizer * @param[in] count length of \p result */ -MMDEPLOY_API void mmdeploy_text_recognizer_release_result(mm_text_recognize_t* results, int count); +MMDEPLOY_API void mmdeploy_text_recognizer_release_result(mmdeploy_text_recognition_t* results, + int count); /** * @brief destroy text recognizer - * @param[in] handle handle of text recognizer created by \ref + * @param[in] recognizer handle of text recognizer created by \ref * mmdeploy_text_recognizer_create_by_path or \ref mmdeploy_text_recognizer_create */ -MMDEPLOY_API void mmdeploy_text_recognizer_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_text_recognizer_destroy(mmdeploy_text_recognizer_t recognizer); /****************************************************************************** * Experimental asynchronous APIs */ @@ -100,9 +105,9 @@ MMDEPLOY_API void mmdeploy_text_recognizer_destroy(mm_handle_t handle); * @brief Same as \ref mmdeploy_text_recognizer_create, but allows to control execution context of * tasks via exec_info */ -MMDEPLOY_API int mmdeploy_text_recognizer_create_v2(mm_model_t model, const char* device_name, +MMDEPLOY_API int mmdeploy_text_recognizer_create_v2(mmdeploy_model_t model, const char* device_name, int device_id, mmdeploy_exec_info_t exec_info, - mm_handle_t* handle); + mmdeploy_text_recognizer_t* recognizer); /** * @brief Pack text-recognizer inputs into mmdeploy_value_t @@ -112,27 +117,30 @@ MMDEPLOY_API int mmdeploy_text_recognizer_create_v2(mm_model_t model, const char * @param[in] bbox_count number of bboxes of each \p images, must be same length as \p images * @return value created */ -MMDEPLOY_API int mmdeploy_text_recognizer_create_input(const mm_mat_t* images, int image_count, - const mm_text_detect_t* bboxes, +MMDEPLOY_API int mmdeploy_text_recognizer_create_input(const mmdeploy_mat_t* images, + int image_count, + const mmdeploy_text_detection_t* bboxes, const int* bbox_count, mmdeploy_value_t* output); -MMDEPLOY_API int mmdeploy_text_recognizer_apply_v2(mm_handle_t handle, mmdeploy_value_t input, +MMDEPLOY_API int mmdeploy_text_recognizer_apply_v2(mmdeploy_text_recognizer_t recognizer, + mmdeploy_value_t input, mmdeploy_value_t* output); /** * @brief Same as \ref mmdeploy_text_recognizer_apply_bbox, but input and output are packed in \ref * mmdeploy_value_t. */ -MMDEPLOY_API int mmdeploy_text_recognizer_apply_async(mm_handle_t handle, mmdeploy_sender_t input, +MMDEPLOY_API int mmdeploy_text_recognizer_apply_async(mmdeploy_text_recognizer_t recognizer, + mmdeploy_sender_t input, mmdeploy_sender_t* output); -typedef int (*mmdeploy_text_recognizer_continue_t)(mm_text_recognize_t* results, void* context, - mmdeploy_sender_t* output); +typedef int (*mmdeploy_text_recognizer_continue_t)(mmdeploy_text_recognition_t* results, + void* context, mmdeploy_sender_t* output); -MMDEPLOY_API int mmdeploy_text_recognizer_apply_async_v3(mm_handle_t handle, const mm_mat_t* imgs, - int img_count, - const mm_text_detect_t* bboxes, +MMDEPLOY_API int mmdeploy_text_recognizer_apply_async_v3(mmdeploy_text_recognizer_t recognizer, + const mmdeploy_mat_t* imgs, int img_count, + const mmdeploy_text_detection_t* bboxes, const int* bbox_count, mmdeploy_sender_t* output); @@ -147,7 +155,7 @@ MMDEPLOY_API int mmdeploy_text_recognizer_continue_async(mmdeploy_sender_t input * @return status of the operation */ MMDEPLOY_API int mmdeploy_text_recognizer_get_result(mmdeploy_value_t output, - mm_text_recognize_t** results); + mmdeploy_text_recognition_t** results); #ifdef __cplusplus } diff --git a/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/Classifier.cs b/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/Classifier.cs index 5bdb15544..9a2e4d1c0 100644 --- a/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/Classifier.cs +++ b/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/Classifier.cs @@ -106,7 +106,7 @@ namespace MMDeploy private unsafe void FormatResult(int matCount, int* resultCount, Label* results, ref List output, out int total) { - total = 0; + total = matCount; for (int i = 0; i < matCount; i++) { ClassifierOutput outi = default; @@ -114,7 +114,6 @@ namespace MMDeploy { outi.Add(results->Id, results->Score); results++; - total++; } output.Add(outi); diff --git a/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/Detector.cs b/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/Detector.cs index 584c63aa8..852012aff 100644 --- a/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/Detector.cs +++ b/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/Detector.cs @@ -205,7 +205,7 @@ namespace MMDeploy private unsafe void FormatResult(int matCount, int* resultCount, CDetect* results, ref List output, out int total) { - total = 0; + total = matCount; for (int i = 0; i < matCount; i++) { DetectorOutput outi = default; @@ -213,7 +213,6 @@ namespace MMDeploy { outi.Add(results); results++; - total++; } output.Add(outi); diff --git a/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/TextDetector.cs b/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/TextDetector.cs index 958e62902..87026f9cd 100644 --- a/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/TextDetector.cs +++ b/csrc/mmdeploy/apis/csharp/MMDeploy/APIs/TextDetector.cs @@ -184,7 +184,7 @@ namespace MMDeploy private unsafe void FormatResult(int matCount, int* resultCount, TextDetect* results, ref List output, out int total) { - total = 0; + total = matCount; for (int i = 0; i < matCount; i++) { TextDetectorOutput outi = default; @@ -192,7 +192,6 @@ namespace MMDeploy { outi.Add(results); results++; - total++; } output.Add(outi); diff --git a/csrc/mmdeploy/apis/csharp/MMDeploy/MMDeploy.csproj b/csrc/mmdeploy/apis/csharp/MMDeploy/MMDeploy.csproj index 9d1ac81ee..8b723f683 100644 --- a/csrc/mmdeploy/apis/csharp/MMDeploy/MMDeploy.csproj +++ b/csrc/mmdeploy/apis/csharp/MMDeploy/MMDeploy.csproj @@ -14,10 +14,10 @@ - $(MSBuildThisFileDirectory)\..\..\..\.. + $(MSBuildThisFileDirectory)\..\..\..\..\.. - + diff --git a/csrc/mmdeploy/apis/csharp/MMDeploy/NativeAPIs/NativeMethods.cs b/csrc/mmdeploy/apis/csharp/MMDeploy/NativeAPIs/NativeMethods.cs index a5b62dede..e287232c7 100644 --- a/csrc/mmdeploy/apis/csharp/MMDeploy/NativeAPIs/NativeMethods.cs +++ b/csrc/mmdeploy/apis/csharp/MMDeploy/NativeAPIs/NativeMethods.cs @@ -5,6 +5,6 @@ /// internal static partial class NativeMethods { - public const string DllExtern = "MMDeployExtern"; + public const string DllExtern = "mmdeploy"; } } diff --git a/csrc/mmdeploy/apis/csharp/README.md b/csrc/mmdeploy/apis/csharp/README.md index 0d1a7c8e3..0a0239443 100644 --- a/csrc/mmdeploy/apis/csharp/README.md +++ b/csrc/mmdeploy/apis/csharp/README.md @@ -25,7 +25,7 @@ To use the nuget package, you also need to download the backend dependencies. Fo Before building the c# api, you need to build sdk first. Please follow this [tutorial](../../../docs/en/build/windows.md)/[教程](../../../docs/zh_cn/build/windows.md) to build sdk. Remember to set the MMDEPLOY_BUILD_SDK_CSHARP_API option to ON. We recommend setting `MMDEPLOY_SHARED_LIBS` to OFF and use the static third party libraries(pplcv, opencv, etc.). If so, you only need add the backend dependencies to your system path, or you need to add all dependencies. -If you follow the tutorial, the MMDeployExtern.dll will be built in `build\bin\release`. Make sure the expected dll is in that path or the next step will throw a file-not-exist error. +If you follow the tutorial, the mmdeploy.dll will be built in `build\bin\release`. Make sure the expected dll is in that path or the next step will throw a file-not-exist error. **Step 1.** Build MMDeploy nuget package. diff --git a/csrc/mmdeploy/apis/cxx/CMakeLists.txt b/csrc/mmdeploy/apis/cxx/CMakeLists.txt new file mode 100644 index 000000000..953e5a61e --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/CMakeLists.txt @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +cmake_minimum_required(VERSION 3.14) +project(mmdeploy_cxx_api) + +if (MMDEPLOY_BUILD_SDK_CXX_API) + add_library(${PROJECT_NAME} INTERFACE) + target_include_directories(${PROJECT_NAME} INTERFACE + $ + $) + target_compile_features(${PROJECT_NAME} INTERFACE cxx_std_17) + target_link_libraries(${PROJECT_NAME} INTERFACE mmdeploy::core) + foreach (task ${MMDEPLOY_TASKS}) + target_link_libraries(mmdeploy_${task} INTERFACE ${PROJECT_NAME}) + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/mmdeploy/${task}.hpp + DESTINATION include/mmdeploy) + endforeach () + if (TARGET mmdeploy) + target_link_libraries(mmdeploy INTERFACE ${PROJECT_NAME}) + endif () + mmdeploy_export(${PROJECT_NAME}) + install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/mmdeploy/common.hpp + DESTINATION include/mmdeploy) + install(DIRECTORY ${CMAKE_SOURCE_DIR}/demo/csrc/ DESTINATION example/cpp + FILES_MATCHING + PATTERN "*.cxx" + ) +endif () diff --git a/csrc/mmdeploy/apis/cxx/mmdeploy/classifier.hpp b/csrc/mmdeploy/apis/cxx/mmdeploy/classifier.hpp new file mode 100644 index 000000000..4c83be227 --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/mmdeploy/classifier.hpp @@ -0,0 +1,67 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_CLASSIFIER_HPP_ +#define MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_CLASSIFIER_HPP_ + +#include "mmdeploy/classifier.h" +#include "mmdeploy/common.hpp" + +namespace mmdeploy { + +using Classification = mmdeploy_classification_t; + +class Classifier : public NonMovable { + public: + Classifier(const Model& model, const Device& device) { + auto ec = mmdeploy_classifier_create(model, device.name(), device.index(), &classifier_); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + } + + ~Classifier() { + if (classifier_) { + mmdeploy_classifier_destroy(classifier_); + classifier_ = {}; + } + } + + using Result = Result_; + + std::vector Apply(Span images) { + if (images.empty()) { + return {}; + } + + Classification* results{}; + int* result_count{}; + auto ec = mmdeploy_classifier_apply(classifier_, reinterpret(images.data()), + static_cast(images.size()), &results, &result_count); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + + std::vector rets; + rets.reserve(images.size()); + + std::shared_ptr data(results, [result_count, count = images.size()](auto p) { + mmdeploy_classifier_release_result(p, result_count, count); + }); + + size_t offset = 0; + for (size_t i = 0; i < images.size(); ++i) { + offset += rets.emplace_back(offset, result_count[i], data).size(); + } + + return rets; + } + + Result Apply(const Mat& img) { return Apply(Span{img})[0]; } + + private: + mmdeploy_classifier_t classifier_{}; +}; + +} // namespace mmdeploy + +#endif // MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_CLASSIFIER_HPP_ diff --git a/csrc/mmdeploy/apis/cxx/mmdeploy/common.hpp b/csrc/mmdeploy/apis/cxx/mmdeploy/common.hpp new file mode 100644 index 000000000..baf81e7c5 --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/mmdeploy/common.hpp @@ -0,0 +1,145 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_COMMON_H_ +#define MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_COMMON_H_ + +#include +#include +#include + +#include "mmdeploy/common.h" +#include "mmdeploy/core/mpl/span.h" +#include "mmdeploy/core/status_code.h" +#include "mmdeploy/core/types.h" +#include "mmdeploy/model.h" + +#ifndef MMDEPLOY_CXX_USE_OPENCV +#define MMDEPLOY_CXX_USE_OPENCV 1 +#endif + +#if MMDEPLOY_CXX_USE_OPENCV +#include "opencv2/core/core.hpp" +#endif + +namespace mmdeploy { + +using Rect = mmdeploy_rect_t; + +namespace { // avoid conflict with internal classes, for now + +class Model { + public: + explicit Model(const char* path) { + mmdeploy_model_t model{}; + auto ec = mmdeploy_model_create_by_path(path, &model); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + model_.reset(model, [](auto p) { mmdeploy_model_destroy(p); }); + } + + Model(const void* buffer, size_t size) { + mmdeploy_model_t model{}; + auto ec = mmdeploy_model_create(buffer, static_cast(size), &model); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + model_.reset(model, [](auto p) { mmdeploy_model_destroy(p); }); + } + + operator mmdeploy_model_t() const noexcept { return model_.get(); } + + private: + std::shared_ptr model_{}; +}; + +class Device { + public: + explicit Device(std::string name, int index = 0) : name_(std::move(name)), index_(index) {} + + const char* name() const noexcept { return name_.c_str(); } + int index() const noexcept { return index_; } + + private: + std::string name_; + int index_; +}; + +class Mat { + public: + Mat() : desc_{} {} + + Mat(int height, int width, int channels, mmdeploy_pixel_format_t format, + mmdeploy_data_type_t type, uint8_t* data) + : desc_{data, height, width, channels, format, type} {} + + const mmdeploy_mat_t& desc() const noexcept { return desc_; } + +#if MMDEPLOY_CXX_USE_OPENCV + Mat(const cv::Mat& mat, mmdeploy_pixel_format_t pixel_format) + : desc_{mat.data, mat.rows, mat.cols, mat.channels(), pixel_format, GetCvType(mat.depth())} { + if (pixel_format == MMDEPLOY_PIXEL_FORMAT_COUNT) { + throw_exception(eNotSupported); + } + if (desc_.type == MMDEPLOY_DATA_TYPE_COUNT) { + throw_exception(eNotSupported); + } + } + Mat(const cv::Mat& mat) : Mat(mat, GetCvFormat(mat.channels())) {} + + static mmdeploy_data_type_t GetCvType(int depth) { + switch (depth) { + case CV_8U: + return MMDEPLOY_DATA_TYPE_UINT8; + case CV_32F: + return MMDEPLOY_DATA_TYPE_FLOAT; + default: + return MMDEPLOY_DATA_TYPE_COUNT; + } + } + static mmdeploy_pixel_format_t GetCvFormat(int channels) { + switch (channels) { + case 1: + return MMDEPLOY_PIXEL_FORMAT_GRAYSCALE; + case 3: + return MMDEPLOY_PIXEL_FORMAT_BGR; + case 4: + return MMDEPLOY_PIXEL_FORMAT_BGRA; + default: + return MMDEPLOY_PIXEL_FORMAT_COUNT; + } + } +#endif + private: + mmdeploy_mat_t desc_; +}; + +template +class Result_ { + public: + Result_(size_t offset, size_t size, std::shared_ptr data) + : offset_(offset), size_(size), data_(std::move(data)) {} + + T& operator[](size_t index) const noexcept { return *(data_.get() + offset_ + index); } + size_t size() const noexcept { return size_; } + T* begin() const noexcept { return data_.get() + offset_; } + T* end() const noexcept { return begin() + size_; } + + T* operator->() const noexcept { return data_.get(); } + T& operator*() const noexcept { return *data_; } + + private: + size_t offset_; + size_t size_; + std::shared_ptr data_; +}; + +inline const mmdeploy_mat_t* reinterpret(const Mat* p) { + return reinterpret_cast(p); +} + +} // namespace + +} // namespace mmdeploy + +#endif // MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_COMMON_H_ diff --git a/csrc/mmdeploy/apis/cxx/mmdeploy/detector.hpp b/csrc/mmdeploy/apis/cxx/mmdeploy/detector.hpp new file mode 100644 index 000000000..e98d2aefc --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/mmdeploy/detector.hpp @@ -0,0 +1,67 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_DETECTOR_HPP_ +#define MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_DETECTOR_HPP_ + +#include "mmdeploy/common.hpp" +#include "mmdeploy/detector.h" + +namespace mmdeploy { + +using Detection = mmdeploy_detection_t; + +class Detector : public NonMovable { + public: + Detector(const Model& model, const Device& device) { + auto ec = mmdeploy_detector_create(model, device.name(), device.index(), &detector_); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + } + + ~Detector() { + if (detector_) { + mmdeploy_detector_destroy(detector_); + detector_ = {}; + } + } + + using Result = Result_; + + std::vector Apply(Span images) { + if (images.empty()) { + return {}; + } + + Detection* results{}; + int* result_count{}; + auto ec = mmdeploy_detector_apply(detector_, reinterpret(images.data()), + static_cast(images.size()), &results, &result_count); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + + std::shared_ptr data(results, [result_count, count = images.size()](auto p) { + mmdeploy_detector_release_result(p, result_count, count); + }); + + std::vector rets; + rets.reserve(images.size()); + + size_t offset = 0; + for (size_t i = 0; i < images.size(); ++i) { + offset += rets.emplace_back(offset, result_count[i], data).size(); + } + + return rets; + } + + Result Apply(const Mat& image) { return Apply(Span{image})[0]; } + + private: + mmdeploy_detector_t detector_{}; +}; + +} // namespace mmdeploy + +#endif // MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_DETECTOR_HPP_ diff --git a/csrc/mmdeploy/apis/cxx/mmdeploy/pose_detector.hpp b/csrc/mmdeploy/apis/cxx/mmdeploy/pose_detector.hpp new file mode 100644 index 000000000..a2c7d5380 --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/mmdeploy/pose_detector.hpp @@ -0,0 +1,78 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_MMDEPLOY_POSE_DETECTOR_HPP_ +#define MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_MMDEPLOY_POSE_DETECTOR_HPP_ + +#include "mmdeploy/common.hpp" +#include "mmdeploy/pose_detector.h" + +namespace mmdeploy { + +using PoseDetection = mmdeploy_pose_detection_t; + +class PoseDetector : public NonMovable { + public: + PoseDetector(const Model& model, const Device& device) { + auto ec = mmdeploy_pose_detector_create(model, device.name(), device.index(), &detector_); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + } + + ~PoseDetector() { + if (detector_) { + mmdeploy_pose_detector_destroy(detector_); + detector_ = {}; + } + } + + using Result = Result_; + + std::vector Apply(Span images, Span bboxes, + Span bbox_count) { + if (images.empty()) { + return {}; + } + + const mmdeploy_rect_t* p_bboxes{}; + const int* p_bbox_count{}; + + if (!bboxes.empty()) { + p_bboxes = bboxes.data(); + p_bbox_count = bbox_count.data(); + } + + PoseDetection* results{}; + auto ec = mmdeploy_pose_detector_apply_bbox(detector_, reinterpret(images.data()), + static_cast(images.size()), p_bboxes, + p_bbox_count, &results); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + + std::shared_ptr data(results, [count = images.size()](auto p) { + mmdeploy_pose_detector_release_result(p, count); + }); + + std::vector rets; + rets.reserve(images.size()); + + size_t offset = 0; + for (size_t i = 0; i < images.size(); ++i) { + offset += rets.emplace_back(offset, bboxes.empty() ? 1 : bbox_count[i], data).size(); + } + + return rets; + } + + Result Apply(const Mat& image, Span bboxes = {}) { + return Apply(Span{image}, bboxes, {static_cast(bboxes.size())})[0]; + } + + private: + mmdeploy_pose_detector_t detector_{}; +}; + +} // namespace mmdeploy + +#endif // MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_MMDEPLOY_POSE_DETECTOR_HPP_ diff --git a/csrc/mmdeploy/apis/cxx/mmdeploy/restorer.hpp b/csrc/mmdeploy/apis/cxx/mmdeploy/restorer.hpp new file mode 100644 index 000000000..5cbd28a6d --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/mmdeploy/restorer.hpp @@ -0,0 +1,62 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_RESTORER_HPP_ +#define MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_RESTORER_HPP_ + +#include "mmdeploy/common.hpp" +#include "mmdeploy/restorer.h" + +namespace mmdeploy { + +class Restorer : public NonMovable { + public: + Restorer(const Model& model, const Device& device) { + auto ec = mmdeploy_restorer_create(model, device.name(), device.index(), &restorer_); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + } + + ~Restorer() { + if (restorer_) { + mmdeploy_restorer_destroy(restorer_); + restorer_ = {}; + } + } + + using Result = Result_; + + std::vector Apply(Span images) { + if (images.empty()) { + return {}; + } + + mmdeploy_mat_t* results{}; + auto ec = mmdeploy_restorer_apply(restorer_, reinterpret(images.data()), + static_cast(images.size()), &results); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + + std::vector rets; + rets.reserve(images.size()); + + std::shared_ptr data( + results, [count = images.size()](auto p) { mmdeploy_restorer_release_result(p, count); }); + + for (size_t i = 0; i < images.size(); ++i) { + rets.emplace_back(i, 1, data); + } + + return rets; + } + + Result Apply(const Mat& image) { return Apply(Span{image})[0]; } + + private: + mmdeploy_restorer_t restorer_{}; +}; + +} // namespace mmdeploy + +#endif // MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_RESTORER_HPP_ diff --git a/csrc/mmdeploy/apis/cxx/mmdeploy/rotated_detector.hpp b/csrc/mmdeploy/apis/cxx/mmdeploy/rotated_detector.hpp new file mode 100644 index 000000000..53b4365bd --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/mmdeploy/rotated_detector.hpp @@ -0,0 +1,68 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_MMDEPLOY_ROTATED_DETECTOR_HPP_ +#define MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_MMDEPLOY_ROTATED_DETECTOR_HPP_ + +#include "mmdeploy/common.hpp" +#include "mmdeploy/rotated_detector.h" + +namespace mmdeploy { + +using RotatedDetection = mmdeploy_rotated_detection_t; + +class RotatedDetector : public NonMovable { + public: + RotatedDetector(const Model& model, const Device& device) { + auto ec = mmdeploy_rotated_detector_create(model, device.name(), device.index(), &detector_); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + } + + ~RotatedDetector() { + if (detector_) { + mmdeploy_rotated_detector_destroy(detector_); + detector_ = {}; + } + } + + using Result = Result_; + + std::vector Apply(Span images) { + if (images.empty()) { + return {}; + } + + RotatedDetection* results{}; + int* result_count{}; + auto ec = + mmdeploy_rotated_detector_apply(detector_, reinterpret(images.data()), + static_cast(images.size()), &results, &result_count); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + + std::shared_ptr data(results, [result_count](auto p) { + mmdeploy_rotated_detector_release_result(p, result_count); + }); + + std::vector rets; + rets.reserve(images.size()); + + size_t offset = 0; + for (size_t i = 0; i < images.size(); ++i) { + offset += rets.emplace_back(offset, result_count[i], data).size(); + } + + return rets; + } + + Result Apply(const Mat& image) { return Apply(Span{image})[0]; } + + private: + mmdeploy_rotated_detector_t detector_{}; +}; + +} // namespace mmdeploy + +#endif // MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_MMDEPLOY_ROTATED_DETECTOR_HPP_ diff --git a/csrc/mmdeploy/apis/cxx/mmdeploy/segmentor.hpp b/csrc/mmdeploy/apis/cxx/mmdeploy/segmentor.hpp new file mode 100644 index 000000000..5b58280f7 --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/mmdeploy/segmentor.hpp @@ -0,0 +1,64 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_SEGMENTOR_HPP_ +#define MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_SEGMENTOR_HPP_ + +#include "mmdeploy/common.hpp" +#include "mmdeploy/segmentor.h" + +namespace mmdeploy { + +using Segmentation = mmdeploy_segmentation_t; + +class Segmentor : public NonMovable { + public: + Segmentor(const Model& model, const Device& device) { + auto ec = mmdeploy_segmentor_create(model, device.name(), device.index(), &segmentor_); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + } + + ~Segmentor() { + if (segmentor_) { + mmdeploy_segmentor_destroy(segmentor_); + segmentor_ = {}; + } + } + + using Result = Result_; + + std::vector Apply(Span images) { + if (images.empty()) { + return {}; + } + + Segmentation* results{}; + auto ec = mmdeploy_segmentor_apply(segmentor_, reinterpret(images.data()), + static_cast(images.size()), &results); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + + std::vector rets; + rets.reserve(images.size()); + + std::shared_ptr data( + results, [count = images.size()](auto p) { mmdeploy_segmentor_release_result(p, count); }); + + for (size_t i = 0; i < images.size(); ++i) { + rets.emplace_back(i, 1, data); + } + + return rets; + } + + Result Apply(const Mat& image) { return Apply(Span{image})[0]; } + + private: + mmdeploy_segmentor_t segmentor_{}; +}; + +} // namespace mmdeploy + +#endif // MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_SEGMENTOR_HPP_ diff --git a/csrc/mmdeploy/apis/cxx/mmdeploy/text_detector.hpp b/csrc/mmdeploy/apis/cxx/mmdeploy/text_detector.hpp new file mode 100644 index 000000000..a275b3371 --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/mmdeploy/text_detector.hpp @@ -0,0 +1,68 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_TEXT_DETECTOR_HPP_ +#define MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_TEXT_DETECTOR_HPP_ + +#include "mmdeploy/common.hpp" +#include "mmdeploy/text_detector.h" + +namespace mmdeploy { + +using TextDetection = mmdeploy_text_detection_t; + +class TextDetector : public NonMovable { + public: + TextDetector(const Model& model, const Device& device) { + auto ec = mmdeploy_text_detector_create(model, device.name(), device.index(), &detector_); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + } + + ~TextDetector() { + if (detector_) { + mmdeploy_text_detector_destroy(detector_); + detector_ = {}; + } + } + + using Result = Result_; + + std::vector Apply(Span images) { + if (images.empty()) { + return {}; + } + + TextDetection* results{}; + int* result_count{}; + auto ec = + mmdeploy_text_detector_apply(detector_, reinterpret(images.data()), + static_cast(images.size()), &results, &result_count); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + + std::shared_ptr data(results, [result_count, count = images.size()](auto p) { + mmdeploy_text_detector_release_result(p, result_count, count); + }); + + std::vector rets; + rets.reserve(images.size()); + + size_t offset = 0; + for (size_t i = 0; i < images.size(); ++i) { + offset += rets.emplace_back(offset, result_count[i], data).size(); + } + + return rets; + } + + Result Apply(const Mat& image) { return Apply(Span{image})[0]; } + + private: + mmdeploy_text_detector_t detector_{}; +}; + +} // namespace mmdeploy + +#endif // MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_TEXT_DETECTOR_HPP_ diff --git a/csrc/mmdeploy/apis/cxx/mmdeploy/text_recognizer.hpp b/csrc/mmdeploy/apis/cxx/mmdeploy/text_recognizer.hpp new file mode 100644 index 000000000..98b2fae98 --- /dev/null +++ b/csrc/mmdeploy/apis/cxx/mmdeploy/text_recognizer.hpp @@ -0,0 +1,79 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_TEXT_RECOGNIZER_HPP_ +#define MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_TEXT_RECOGNIZER_HPP_ + +#include "mmdeploy/common.hpp" +#include "mmdeploy/text_detector.hpp" +#include "mmdeploy/text_recognizer.h" + +namespace mmdeploy { + +using TextRecognition = mmdeploy_text_recognition_t; + +class TextRecognizer : public NonMovable { + public: + TextRecognizer(const Model& model, const Device& device) { + auto ec = mmdeploy_text_recognizer_create(model, device.name(), device.index(), &recognizer_); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + } + + ~TextRecognizer() { + if (recognizer_) { + mmdeploy_text_recognizer_destroy(recognizer_); + recognizer_ = {}; + } + } + + using Result = Result_; + + std::vector Apply(Span images, Span bboxes, + Span bbox_count) { + if (images.empty()) { + return {}; + } + + const TextDetection* p_bboxes{}; + const int* p_bbox_count{}; + + if (!bboxes.empty()) { + p_bboxes = bboxes.data(); + p_bbox_count = bbox_count.data(); + } + + TextRecognition* results{}; + auto ec = mmdeploy_text_recognizer_apply_bbox(recognizer_, reinterpret(images.data()), + static_cast(images.size()), p_bboxes, + p_bbox_count, &results); + if (ec != MMDEPLOY_SUCCESS) { + throw_exception(static_cast(ec)); + } + + std::shared_ptr data(results, [count = images.size()](auto p) { + mmdeploy_text_recognizer_release_result(p, count); + }); + + std::vector rets; + rets.reserve(images.size()); + + size_t offset = 0; + for (size_t i = 0; i < images.size(); ++i) { + offset += rets.emplace_back(offset, bboxes.empty() ? 1 : bbox_count[i], data).size(); + } + + return rets; + } + + Result Apply(const Mat& image, Span bboxes = {}) { + return Apply(Span{image}, bboxes, {static_cast(bboxes.size())})[0]; + } + + private: + mmdeploy_text_recognizer_t recognizer_{}; +}; + +} // namespace mmdeploy + +#endif // MMDEPLOY_CSRC_MMDEPLOY_APIS_CXX_TEXT_RECOGNIZER_HPP_ diff --git a/csrc/mmdeploy/apis/java/CMakeLists.txt b/csrc/mmdeploy/apis/java/CMakeLists.txt new file mode 100644 index 000000000..850a3134a --- /dev/null +++ b/csrc/mmdeploy/apis/java/CMakeLists.txt @@ -0,0 +1,23 @@ +project(mmdeploy_java_package) + +find_package(Java REQUIRED) +include(UseJava) + +add_subdirectory(native) + +add_jar(${PROJECT_NAME} SOURCES + mmdeploy/DataType.java + mmdeploy/Mat.java + mmdeploy/InstanceMask.java + mmdeploy/PixelFormat.java + mmdeploy/PointF.java + mmdeploy/Rect.java + mmdeploy/Classifier.java + mmdeploy/Detector.java + mmdeploy/Segmentor.java + mmdeploy/TextDetector.java + mmdeploy/TextRecognizer.java + mmdeploy/Restorer.java + mmdeploy/PoseDetector.java + OUTPUT_NAME mmdeploy + OUTPUT_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}) diff --git a/csrc/mmdeploy/apis/java/README.md b/csrc/mmdeploy/apis/java/README.md new file mode 100644 index 000000000..d1fbdee17 --- /dev/null +++ b/csrc/mmdeploy/apis/java/README.md @@ -0,0 +1,48 @@ +# Build Java API + +## From Source + +### Requirements + +- OpenJDK >= 10 + +**Step 1.** Download OpenJDK. Using OpenJDK-18 as example: + +```bash +wget https://download.java.net/java/GA/jdk18/43f95e8614114aeaa8e8a5fcf20a682d/36/GPL/openjdk-18_linux-x64_bin.tar.gz +tar xvf openjdk-18_linux-x64_bin.tar.gz +``` + +**Step 2.** Setting environment variables: + +```bash +export JAVA_HOME=${PWD}/jdk-18 +export PATH=${JAVA_HOME}/bin:${PATH} +``` + +**Step 3.** Switch default Java version: + +```bash +sudo update-alternatives --config java +sudo update-alternatives --config javac +``` + +You should select the version you will use. + +### Installation + +For using Java apis, you should build Java class and C++ SDK. + +**Step 1.** Build Java class. + +Build Java `.class` files. + +```bash +cd csrc/mmdeploy/apis/java +javac mmdeploy/*.java +cd ../../../.. +``` + +**Step 2.** Build SDK. + +Build MMDeploy SDK. Please follow this [tutorial](../../../../docs/en/01-how-to-build/linux-x86_64.md)/[教程](../../../../docs/zh_cn/01-how-to-build/linux-x86_64.md) to build SDK. Remember to set the MMDEPLOY_BUILD_SDK_JAVA_API option to ON. diff --git a/csrc/mmdeploy/apis/java/mmdeploy/Classifier.java b/csrc/mmdeploy/apis/java/mmdeploy/Classifier.java new file mode 100644 index 000000000..3919881fc --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/Classifier.java @@ -0,0 +1,54 @@ +package mmdeploy; + +public class Classifier { + static { + System.loadLibrary("mmdeploy_java"); + } + + private final long handle; + + public static class Result { + public int label_id; + public float score; + public Result(int label_id, float score) { + this.label_id = label_id; + this.score = score; + } + } + + public Classifier(String modelPath, String deviceName, int deviceId) { + handle = create(modelPath, deviceName, deviceId); + } + + public Result[][] apply(Mat[] images) { + int[] counts = new int[images.length]; + Result[] results = apply(handle, images, counts); + Result[][] rets = new Result[images.length][]; + int offset = 0; + for (int i = 0; i < images.length; ++i) { + Result[] row = new Result[counts[i]]; + if (counts[i] >= 0) { + System.arraycopy(results, offset, row, 0, counts[i]); + } + offset += counts[i]; + rets[i] = row; + } + return rets; + } + + public Result[] apply(Mat image) { + int[] counts = new int[1]; + Mat[] images = new Mat[]{image}; + return apply(handle, images, counts); + } + + public void release() { + destroy(handle); + } + + private native long create(String modelPath, String deviceName, int deviceId); + + private native void destroy(long handle); + + private native Result[] apply(long handle, Mat[] images, int[] count); +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/DataType.java b/csrc/mmdeploy/apis/java/mmdeploy/DataType.java new file mode 100644 index 000000000..ea52d0134 --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/DataType.java @@ -0,0 +1,13 @@ +package mmdeploy; + +public enum DataType { + FLOAT(0), + HALF(1), + INT8(2), + INT32(3); + final int value; + + DataType(int value) { + this.value = value; + } +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/Detector.java b/csrc/mmdeploy/apis/java/mmdeploy/Detector.java new file mode 100644 index 000000000..0692d3cf4 --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/Detector.java @@ -0,0 +1,58 @@ +package mmdeploy; + +public class Detector { + static { + System.loadLibrary("mmdeploy_java"); + } + + private final long handle; + + public static class Result { + public int label_id; + public float score; + public Rect bbox; + public InstanceMask mask; + public Result(int label_id, float score, Rect bbox, InstanceMask mask) { + this.label_id = label_id; + this.score = score; + this.bbox = bbox; + this.mask = mask; + } + } + + public Detector(String modelPath, String deviceName, int deviceId) { + handle = create(modelPath, deviceName, deviceId); + } + + public Result[][] apply(Mat[] images) { + int[] counts = new int[images.length]; + Result[] results = apply(handle, images, counts); + Result[][] rets = new Result[images.length][]; + int offset = 0; + for (int i = 0; i < images.length; ++i) { + Result[] row = new Result[counts[i]]; + if (counts[i] >= 0) { + System.arraycopy(results, offset, row, 0, counts[i]); + } + offset += counts[i]; + rets[i] = row; + } + return rets; + } + + public Result[] apply(Mat image) { + int[] counts = new int[1]; + Mat[] images = new Mat[]{image}; + return apply(handle, images, counts); + } + + public void release() { + destroy(handle); + } + + private native long create(String modelPath, String deviceName, int deviceId); + + private native void destroy(long handle); + + private native Result[] apply(long handle, Mat[] images, int[] count); +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/InstanceMask.java b/csrc/mmdeploy/apis/java/mmdeploy/InstanceMask.java new file mode 100644 index 000000000..9958da6e8 --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/InstanceMask.java @@ -0,0 +1,12 @@ +package mmdeploy; + +public class InstanceMask { + public int[] shape; + public char[] data; + + + public InstanceMask(int height, int width, char[] data) { + shape = new int[]{height, width}; + this.data = data; + } +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/Mat.java b/csrc/mmdeploy/apis/java/mmdeploy/Mat.java new file mode 100644 index 000000000..1bfe6c5d3 --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/Mat.java @@ -0,0 +1,17 @@ +package mmdeploy; + +public class Mat { + public int[] shape; + public int format; + public int type; + public byte[] data; + + + public Mat(int height, int width, int channel, + PixelFormat format, DataType type, byte[] data) { + shape = new int[]{height, width, channel}; + this.format = format.value; + this.type = type.value; + this.data = data; + } +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/PixelFormat.java b/csrc/mmdeploy/apis/java/mmdeploy/PixelFormat.java new file mode 100644 index 000000000..bb1167ada --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/PixelFormat.java @@ -0,0 +1,15 @@ +package mmdeploy; + +public enum PixelFormat { + BGR(0), + RGB(1), + GRAYSCALE(2), + NV12(3), + NV21(4), + BGRA(5); + final int value; + + PixelFormat(int value) { + this.value = value; + } +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/PointF.java b/csrc/mmdeploy/apis/java/mmdeploy/PointF.java new file mode 100644 index 000000000..564e5e9b9 --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/PointF.java @@ -0,0 +1,12 @@ +package mmdeploy; + +public class PointF { + public float x; + public float y; + + + public PointF(float x, float y) { + this.x = x; + this.y = y; + } +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/PoseDetector.java b/csrc/mmdeploy/apis/java/mmdeploy/PoseDetector.java new file mode 100644 index 000000000..c8f8f3e09 --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/PoseDetector.java @@ -0,0 +1,50 @@ +package mmdeploy; + +public class PoseDetector { + static { + System.loadLibrary("mmdeploy_java"); + } + + private final long handle; + + public static class Result { + public PointF[] point; + public float[] score; + public Result(PointF[] point, float [] score) { + this.point = point; + this.score = score; + } + } + + public PoseDetector(String modelPath, String deviceName, int deviceId) { + handle = create(modelPath, deviceName, deviceId); + } + + public Result[][] apply(Mat[] images) { + Result[] results = apply(handle, images); + Result[][] rets = new Result[images.length][]; + int offset = 0; + for (int i = 0; i < images.length; ++i) { + Result[] row = new Result[1]; + System.arraycopy(results, offset, row, 0, 1); + offset += 1; + rets[i] = row; + } + return rets; + } + + public Result[] apply(Mat image) { + Mat[] images = new Mat[]{image}; + return apply(handle, images); + } + + public void release() { + destroy(handle); + } + + private native long create(String modelPath, String deviceName, int deviceId); + + private native void destroy(long handle); + + private native Result[] apply(long handle, Mat[] images); +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/Rect.java b/csrc/mmdeploy/apis/java/mmdeploy/Rect.java new file mode 100644 index 000000000..28ce70760 --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/Rect.java @@ -0,0 +1,16 @@ +package mmdeploy; + +public class Rect { + public float left; + public float top; + public float right; + public float bottom; + + + public Rect(float left, float top, float right, float bottom) { + this.left = left; + this.top = top; + this.right = right; + this.bottom = bottom; + } +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/Restorer.java b/csrc/mmdeploy/apis/java/mmdeploy/Restorer.java new file mode 100644 index 000000000..0485a5642 --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/Restorer.java @@ -0,0 +1,48 @@ +package mmdeploy; + +public class Restorer { + static { + System.loadLibrary("mmdeploy_java"); + } + + private final long handle; + + public static class Result { + public Mat res; + public Result(Mat res) { + this.res = res; + } + } + + public Restorer(String modelPath, String deviceName, int deviceId) { + handle = create(modelPath, deviceName, deviceId); + } + + public Result[][] apply(Mat[] images) { + Result[] results = apply(handle, images); + Result[][] rets = new Result[images.length][]; + int offset = 0; + for (int i = 0; i < images.length; ++i) { + Result[] row = new Result[1]; + System.arraycopy(results, offset, row, 0, 1); + offset += 1; + rets[i] = row; + } + return rets; + } + + public Result[] apply(Mat image) { + Mat[] images = new Mat[]{image}; + return apply(handle, images); + } + + public void release() { + destroy(handle); + } + + private native long create(String modelPath, String deviceName, int deviceId); + + private native void destroy(long handle); + + private native Result[] apply(long handle, Mat[] images); +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/Segmentor.java b/csrc/mmdeploy/apis/java/mmdeploy/Segmentor.java new file mode 100644 index 000000000..e5f593efd --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/Segmentor.java @@ -0,0 +1,54 @@ +package mmdeploy; + +public class Segmentor { + static { + System.loadLibrary("mmdeploy_java"); + } + + private final long handle; + + public static class Result { + public int height; + public int width; + public int classes; + public int[] mask; + public Result(int height, int width, int classes, int [] mask) { + this.height = height; + this.width = width; + this.classes = classes; + this.mask = mask; + } + } + + public Segmentor(String modelPath, String deviceName, int deviceId) { + handle = create(modelPath, deviceName, deviceId); + } + + public Result[][] apply(Mat[] images) { + Result[] results = apply(handle, images); + Result[][] rets = new Result[images.length][]; + int offset = 0; + for (int i = 0; i < images.length; ++i) { + Result[] row = new Result[1]; + System.arraycopy(results, offset, row, 0, 1); + offset += 1; + rets[i] = row; + } + return rets; + } + + public Result[] apply(Mat image) { + Mat[] images = new Mat[]{image}; + return apply(handle, images); + } + + public void release() { + destroy(handle); + } + + private native long create(String modelPath, String deviceName, int deviceId); + + private native void destroy(long handle); + + private native Result[] apply(long handle, Mat[] images); +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/TextDetector.java b/csrc/mmdeploy/apis/java/mmdeploy/TextDetector.java new file mode 100644 index 000000000..3a1862a3f --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/TextDetector.java @@ -0,0 +1,54 @@ +package mmdeploy; + +public class TextDetector { + static { + System.loadLibrary("mmdeploy_java"); + } + + private final long handle; + + public static class Result { + public PointF[] bbox; + public float score; + public Result(PointF[] bbox, float score) { + this.bbox = bbox; + this.score = score; + } + } + + public TextDetector(String modelPath, String deviceName, int deviceId) { + handle = create(modelPath, deviceName, deviceId); + } + + public Result[][] apply(Mat[] images) { + int[] counts = new int[images.length]; + Result[] results = apply(handle, images, counts); + Result[][] rets = new Result[images.length][]; + int offset = 0; + for (int i = 0; i < images.length; ++i) { + Result[] row = new Result[counts[i]]; + if (counts[i] >= 0) { + System.arraycopy(results, offset, row, 0, counts[i]); + } + offset += counts[i]; + rets[i] = row; + } + return rets; + } + + public Result[] apply(Mat image) { + int[] counts = new int[1]; + Mat[] images = new Mat[]{image}; + return apply(handle, images, counts); + } + + public void release() { + destroy(handle); + } + + private native long create(String modelPath, String deviceName, int deviceId); + + private native void destroy(long handle); + + private native Result[] apply(long handle, Mat[] images, int[] count); +} diff --git a/csrc/mmdeploy/apis/java/mmdeploy/TextRecognizer.java b/csrc/mmdeploy/apis/java/mmdeploy/TextRecognizer.java new file mode 100644 index 000000000..dd5f662ff --- /dev/null +++ b/csrc/mmdeploy/apis/java/mmdeploy/TextRecognizer.java @@ -0,0 +1,57 @@ +package mmdeploy; + +public class TextRecognizer { + static { + System.loadLibrary("mmdeploy_java"); + } + + private final long handle; + + public static class Result { + public byte [] text; + public float [] score; + public Result(byte [] text, float [] score) { + this.text = text; + this.score = score; + } + } + + public TextRecognizer(String modelPath, String deviceName, int deviceId) { + handle = create(modelPath, deviceName, deviceId); + } + + public Result[][] apply(Mat[] images) { + Result[] results = apply(handle, images); + Result[][] rets = new Result[images.length][]; + int offset = 0; + for (int i = 0; i < images.length; ++i) { + Result[] row = new Result[1]; + System.arraycopy(results, offset, row, 0, 1); + offset += 1; + rets[i] = row; + } + return rets; + } + + public Result[] apply(Mat image) { + Mat[] images = new Mat[]{image}; + return apply(handle, images); + } + + public Result[] applyBbox(Mat image, TextDetector.Result[] bbox, int[] bbox_count) { + Mat[] images = new Mat[]{image}; + return applyBbox(handle, images, bbox, bbox_count); + } + + public void release() { + destroy(handle); + } + + private native long create(String modelPath, String deviceName, int deviceId); + + private native void destroy(long handle); + + private native Result[] apply(long handle, Mat[] images); + + private native Result[] applyBbox(long handle, Mat[] images, TextDetector.Result[] bbox, int[] bbox_count); +} diff --git a/csrc/mmdeploy/apis/java/native/CMakeLists.txt b/csrc/mmdeploy/apis/java/native/CMakeLists.txt new file mode 100644 index 000000000..aad606d74 --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/CMakeLists.txt @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +project(mmdeploy_java) + +if (NOT ANDROID) + find_package(JNI REQUIRED) +else () + set(JNI_LIBRARIES) +endif() + +mmdeploy_add_library(${PROJECT_NAME} SHARED EXCLUDE + mmdeploy_Classifier.cpp + mmdeploy_Detector.cpp + mmdeploy_Segmentor.cpp + mmdeploy_Restorer.cpp + mmdeploy_PoseDetector.cpp + mmdeploy_TextDetector.cpp + mmdeploy_TextRecognizer.cpp) + +target_include_directories(${PROJECT_NAME} PRIVATE + ${JNI_INCLUDE_DIRS}) + +mmdeploy_load_static(${PROJECT_NAME} MMDeployStaticModules) +mmdeploy_load_dynamic(${PROJECT_NAME} MMDeployDynamicModules) + +target_link_libraries(${PROJECT_NAME} PRIVATE + ${JNI_LIBRARIES} MMDeployLibs) +install(TARGETS ${PROJECT_NAME} + DESTINATION lib) diff --git a/csrc/mmdeploy/apis/java/native/common.h b/csrc/mmdeploy/apis/java/native/common.h new file mode 100644 index 000000000..ba2601e5f --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/common.h @@ -0,0 +1,54 @@ + +#ifndef MMDEPLOY_CSRC_APIS_JAVA_NATIVE_COMMON_H_ +#define MMDEPLOY_CSRC_APIS_JAVA_NATIVE_COMMON_H_ + +#include + +#include + +#include "mmdeploy/apis/c/mmdeploy/common.h" +#include "mmdeploy/core/logger.h" +#include "mmdeploy/core/utils/formatter.h" + +template +static auto With(JNIEnv *env, jobjectArray imgs, F f) noexcept { + auto mat_clazz = env->FindClass("mmdeploy/Mat"); + auto shape_field = env->GetFieldID(mat_clazz, "shape", "[I"); + auto format_field = env->GetFieldID(mat_clazz, "format", "I"); + auto type_field = env->GetFieldID(mat_clazz, "type", "I"); + auto data_field = env->GetFieldID(mat_clazz, "data", "[B"); + auto num = env->GetArrayLength(imgs); + std::vector mats; + std::vector datum; + + mats.reserve(num); + datum.reserve(num); + + for (int i = 0; i < num; ++i) { + auto obj = env->GetObjectArrayElement(imgs, i); + auto shape_obj = env->GetObjectField(obj, shape_field); + auto shape = env->GetIntArrayElements((jintArray)shape_obj, nullptr); + auto format = env->GetIntField(obj, format_field); + auto type = env->GetIntField(obj, type_field); + auto &mat = mats.emplace_back(); + mat.height = shape[0]; + mat.width = shape[1]; + mat.channel = shape[2]; + env->ReleaseIntArrayElements((jintArray)shape_obj, shape, JNI_ABORT); + mat.format = (mmdeploy_pixel_format_t)format; + mat.type = (mmdeploy_data_type_t)type; + auto data_obj = env->GetObjectField(obj, data_field); + mat.data = (uint8_t *)env->GetByteArrayElements((jbyteArray)data_obj, nullptr); + datum.push_back((jbyteArray)data_obj); + } + + auto ret = f(mats.data(), mats.size()); // ! f must not throw + + for (int i = 0; i < num; ++i) { + env->ReleaseByteArrayElements(datum[i], (jbyte *)mats[i].data, JNI_ABORT); + } + + return ret; +} + +#endif // MMDEPLOY_CSRC_APIS_JAVA_NATIVE_COMMON_H_ diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_Classifier.cpp b/csrc/mmdeploy/apis/java/native/mmdeploy_Classifier.cpp new file mode 100644 index 000000000..55e1acdf3 --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_Classifier.cpp @@ -0,0 +1,58 @@ +#include "mmdeploy_Classifier.h" + +#include + +#include "mmdeploy/apis/c/mmdeploy/classifier.h" +#include "mmdeploy/apis/java/native/common.h" +#include "mmdeploy/core/logger.h" + +jlong Java_mmdeploy_Classifier_create(JNIEnv *env, jobject, jstring modelPath, jstring deviceName, + jint device_id) { + auto model_path = env->GetStringUTFChars(modelPath, nullptr); + auto device_name = env->GetStringUTFChars(deviceName, nullptr); + mmdeploy_classifier_t classifier{}; + auto ec = + mmdeploy_classifier_create_by_path(model_path, device_name, (int)device_id, &classifier); + env->ReleaseStringUTFChars(modelPath, model_path); + env->ReleaseStringUTFChars(deviceName, device_name); + if (ec) { + MMDEPLOY_ERROR("failed to create classifier, code = {}", ec); + } + return (jlong)classifier; +} + +void Java_mmdeploy_Classifier_destroy(JNIEnv *, jobject, jlong handle) { + MMDEPLOY_DEBUG("Java_mmdeploy_Classifier_destroy"); + mmdeploy_classifier_destroy((mmdeploy_classifier_t)handle); +} + +jobjectArray Java_mmdeploy_Classifier_apply(JNIEnv *env, jobject thiz, jlong handle, + jobjectArray images, jintArray counts) { + return With(env, images, [&](const mmdeploy_mat_t imgs[], int size) { + mmdeploy_classification_t *results{}; + int *result_count{}; + auto ec = mmdeploy_classifier_apply((mmdeploy_classifier_t)handle, imgs, size, &results, + &result_count); + if (ec) { + MMDEPLOY_ERROR("failed to apply classifier, code = {}", ec); + } + + auto result_cls = env->FindClass("mmdeploy/Classifier$Result"); + auto result_ctor = env->GetMethodID(result_cls, "", "(IF)V"); + auto total = std::accumulate(result_count, result_count + size, 0); + auto array = env->NewObjectArray(total, result_cls, nullptr); + + for (int i = 0; i < total; ++i) { + auto res = env->NewObject(result_cls, result_ctor, (jint)results[i].label_id, + (jfloat)results[i].score); + env->SetObjectArrayElement(array, i, res); + } + auto counts_array = env->GetIntArrayElements(counts, nullptr); + for (int i = 0; i < size; ++i) { + counts_array[i] = result_count[i]; + } + env->ReleaseIntArrayElements(counts, counts_array, 0); + mmdeploy_classifier_release_result(results, result_count, size); + return array; + }); +} diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_Classifier.h b/csrc/mmdeploy/apis/java/native/mmdeploy_Classifier.h new file mode 100644 index 000000000..16a06b5fb --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_Classifier.h @@ -0,0 +1,35 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class mmdeploy_Classifier */ + +#ifndef _Included_mmdeploy_Classifier +#define _Included_mmdeploy_Classifier +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: mmdeploy_Classifier + * Method: create + * Signature: (Ljava/lang/String;Ljava/lang/String;I)J + */ +JNIEXPORT jlong JNICALL Java_mmdeploy_Classifier_create(JNIEnv *, jobject, jstring, jstring, jint); + +/* + * Class: mmdeploy_Classifier + * Method: destroy + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_mmdeploy_Classifier_destroy(JNIEnv *, jobject, jlong); + +/* + * Class: mmdeploy_Classifier + * Method: apply + * Signature: (J[Lmmdeploy/Mat;[I)[Lmmdeploy/Classifier/Result; + */ +JNIEXPORT jobjectArray JNICALL Java_mmdeploy_Classifier_apply(JNIEnv *, jobject, jlong, + jobjectArray, jintArray); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_Detector.cpp b/csrc/mmdeploy/apis/java/native/mmdeploy_Detector.cpp new file mode 100644 index 000000000..f61f0aed4 --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_Detector.cpp @@ -0,0 +1,82 @@ +#include "mmdeploy_Detector.h" + +#include + +#include "mmdeploy/apis/c/mmdeploy/detector.h" +#include "mmdeploy/apis/java/native/common.h" +#include "mmdeploy/core/logger.h" + +jlong Java_mmdeploy_Detector_create(JNIEnv *env, jobject, jstring modelPath, jstring deviceName, + jint device_id) { + auto model_path = env->GetStringUTFChars(modelPath, nullptr); + auto device_name = env->GetStringUTFChars(deviceName, nullptr); + mmdeploy_detector_t detector{}; + auto ec = mmdeploy_detector_create_by_path(model_path, device_name, (int)device_id, &detector); + env->ReleaseStringUTFChars(modelPath, model_path); + env->ReleaseStringUTFChars(deviceName, device_name); + if (ec) { + MMDEPLOY_ERROR("failed to create detector, code = {}", ec); + } + return (jlong)detector; +} + +void Java_mmdeploy_Detector_destroy(JNIEnv *, jobject, jlong handle) { + MMDEPLOY_DEBUG("Java_mmdeploy_Detector_destroy"); // maybe use info? + mmdeploy_detector_destroy((mmdeploy_detector_t)handle); +} + +jobjectArray Java_mmdeploy_Detector_apply(JNIEnv *env, jobject thiz, jlong handle, + jobjectArray images, jintArray counts) { + return With(env, images, [&](const mmdeploy_mat_t imgs[], int size) { + mmdeploy_detection_t *results{}; + int *result_count{}; + auto ec = + mmdeploy_detector_apply((mmdeploy_detector_t)handle, imgs, size, &results, &result_count); + if (ec) { + MMDEPLOY_ERROR("failed to apply detector, code = {}", ec); + } + auto result_cls = env->FindClass("mmdeploy/Detector$Result"); + auto result_ctor = + env->GetMethodID(result_cls, "", "(IFLmmdeploy/Rect;Lmmdeploy/InstanceMask;)V"); + auto total = std::accumulate(result_count, result_count + size, 0); + auto array = env->NewObjectArray(total, result_cls, nullptr); + auto rect_cls = env->FindClass("mmdeploy/Rect"); + auto rect_ctor = env->GetMethodID(rect_cls, "", "(FFFF)V"); + auto instance_mask_cls = env->FindClass("mmdeploy/InstanceMask"); + auto instance_mask_ctor = env->GetMethodID(instance_mask_cls, "", "(II[C)V"); + + for (int i = 0; i < total; ++i) { + auto rect = env->NewObject(rect_cls, rect_ctor, (jfloat)results[i].bbox.left, + (jfloat)results[i].bbox.top, (jfloat)results[i].bbox.right, + (jfloat)results[i].bbox.bottom); + int width, height; + char *data; + jcharArray jmask; + if (results[i].mask == nullptr) { + width = 0; + height = 0; + data = nullptr; + jmask = env->NewCharArray(0); + } else { + width = results[i].mask->width; + height = results[i].mask->height; + data = results[i].mask->data; + jmask = env->NewCharArray(width * height); + env->SetCharArrayRegion(jmask, 0, width * height, (const jchar *)data); + } + + auto instance_mask = + env->NewObject(instance_mask_cls, instance_mask_ctor, (jint)height, (jint)width, jmask); + auto res = env->NewObject(result_cls, result_ctor, (jint)results[i].label_id, + (jfloat)results[i].score, rect, instance_mask); + env->SetObjectArrayElement(array, i, res); + } + auto counts_array = env->GetIntArrayElements(counts, nullptr); + for (int i = 0; i < size; ++i) { + counts_array[i] = result_count[i]; + } + env->ReleaseIntArrayElements(counts, counts_array, 0); + mmdeploy_detector_release_result(results, result_count, size); + return array; + }); +} diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_Detector.h b/csrc/mmdeploy/apis/java/native/mmdeploy_Detector.h new file mode 100644 index 000000000..41e711d15 --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_Detector.h @@ -0,0 +1,35 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class mmdeploy_Detector */ + +#ifndef _Included_mmdeploy_Detector +#define _Included_mmdeploy_Detector +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: mmdeploy_Detector + * Method: create + * Signature: (Ljava/lang/String;Ljava/lang/String;I)J + */ +JNIEXPORT jlong JNICALL Java_mmdeploy_Detector_create(JNIEnv *, jobject, jstring, jstring, jint); + +/* + * Class: mmdeploy_Detector + * Method: destroy + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_mmdeploy_Detector_destroy(JNIEnv *, jobject, jlong); + +/* + * Class: mmdeploy_Detector + * Method: apply + * Signature: (J[Lmmdeploy/Mat;[I)[Lmmdeploy/Detector/Result; + */ +JNIEXPORT jobjectArray JNICALL Java_mmdeploy_Detector_apply(JNIEnv *, jobject, jlong, jobjectArray, + jintArray); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_PoseDetector.cpp b/csrc/mmdeploy/apis/java/native/mmdeploy_PoseDetector.cpp new file mode 100644 index 000000000..fcbbb863d --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_PoseDetector.cpp @@ -0,0 +1,58 @@ +#include "mmdeploy_PoseDetector.h" + +#include + +#include "mmdeploy/apis/c/mmdeploy/pose_detector.h" +#include "mmdeploy/apis/java/native/common.h" +#include "mmdeploy/core/logger.h" + +jlong Java_mmdeploy_PoseDetector_create(JNIEnv *env, jobject, jstring modelPath, jstring deviceName, + jint device_id) { + auto model_path = env->GetStringUTFChars(modelPath, nullptr); + auto device_name = env->GetStringUTFChars(deviceName, nullptr); + mmdeploy_pose_detector_t pose_estimator{}; + auto ec = mmdeploy_pose_detector_create_by_path(model_path, device_name, (int)device_id, + &pose_estimator); + env->ReleaseStringUTFChars(modelPath, model_path); + env->ReleaseStringUTFChars(deviceName, device_name); + if (ec) { + MMDEPLOY_ERROR("failed to create pose estimator, code = {}", ec); + } + return (jlong)pose_estimator; +} + +void Java_mmdeploy_PoseDetector_destroy(JNIEnv *, jobject, jlong handle) { + MMDEPLOY_DEBUG("Java_mmdeploy_PoseDetector_destroy"); + mmdeploy_pose_detector_destroy((mmdeploy_pose_detector_t)handle); +} + +jobjectArray Java_mmdeploy_PoseDetector_apply(JNIEnv *env, jobject thiz, jlong handle, + jobjectArray images) { + return With(env, images, [&](const mmdeploy_mat_t imgs[], int size) { + mmdeploy_pose_detection_t *results{}; + auto ec = mmdeploy_pose_detector_apply((mmdeploy_pose_detector_t)handle, imgs, size, &results); + if (ec) { + MMDEPLOY_ERROR("failed to apply pose estimator, code = {}", ec); + } + auto result_cls = env->FindClass("mmdeploy/PoseDetector$Result"); + auto result_ctor = env->GetMethodID(result_cls, "", "([Lmmdeploy/PointF;[F)V"); + auto array = env->NewObjectArray(size, result_cls, nullptr); + auto pointf_cls = env->FindClass("mmdeploy/PointF"); + auto pointf_ctor = env->GetMethodID(pointf_cls, "", "(FF)V"); + + for (int i = 0; i < size; ++i) { + auto keypoint_array = env->NewObjectArray(results[i].length, pointf_cls, nullptr); + for (int j = 0; j < results[i].length; ++j) { + auto keypointj = env->NewObject(pointf_cls, pointf_ctor, (jfloat)results[i].point[j].x, + (jfloat)results[i].point[j].y); + env->SetObjectArrayElement(keypoint_array, j, keypointj); + } + auto score_array = env->NewFloatArray(results[i].length); + env->SetFloatArrayRegion(score_array, 0, results[i].length, (jfloat *)results[i].score); + auto res = env->NewObject(result_cls, result_ctor, keypoint_array, score_array); + env->SetObjectArrayElement(array, i, res); + } + mmdeploy_pose_detector_release_result(results, size); + return array; + }); +} diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_PoseDetector.h b/csrc/mmdeploy/apis/java/native/mmdeploy_PoseDetector.h new file mode 100644 index 000000000..a50b7fd82 --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_PoseDetector.h @@ -0,0 +1,36 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class mmdeploy_PoseDetector */ + +#ifndef _Included_mmdeploy_PoseDetector +#define _Included_mmdeploy_PoseDetector +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: mmdeploy_PoseDetector + * Method: create + * Signature: (Ljava/lang/String;Ljava/lang/String;I)J + */ +JNIEXPORT jlong JNICALL Java_mmdeploy_PoseDetector_create(JNIEnv *, jobject, jstring, jstring, + jint); + +/* + * Class: mmdeploy_PoseDetector + * Method: destroy + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_mmdeploy_PoseDetector_destroy(JNIEnv *, jobject, jlong); + +/* + * Class: mmdeploy_PoseDetector + * Method: apply + * Signature: (J[Lmmdeploy/Mat;)[Lmmdeploy/PoseDetector/Result; + */ +JNIEXPORT jobjectArray JNICALL Java_mmdeploy_PoseDetector_apply(JNIEnv *, jobject, jlong, + jobjectArray); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_Restorer.cpp b/csrc/mmdeploy/apis/java/native/mmdeploy_Restorer.cpp new file mode 100644 index 000000000..37bf954ba --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_Restorer.cpp @@ -0,0 +1,71 @@ +#include "mmdeploy_Restorer.h" + +#include + +#include "mmdeploy/apis/c/mmdeploy/restorer.h" +#include "mmdeploy/apis/java/native/common.h" +#include "mmdeploy/core/logger.h" + +jlong Java_mmdeploy_Restorer_create(JNIEnv *env, jobject, jstring modelPath, jstring deviceName, + jint device_id) { + auto model_path = env->GetStringUTFChars(modelPath, nullptr); + auto device_name = env->GetStringUTFChars(deviceName, nullptr); + mmdeploy_restorer_t restorer{}; + auto ec = mmdeploy_restorer_create_by_path(model_path, device_name, (int)device_id, &restorer); + env->ReleaseStringUTFChars(modelPath, model_path); + env->ReleaseStringUTFChars(deviceName, device_name); + if (ec) { + MMDEPLOY_ERROR("failed to create restorer, code = {}", ec); + } + return (jlong)restorer; +} + +void Java_mmdeploy_Restorer_destroy(JNIEnv *, jobject, jlong handle) { + MMDEPLOY_DEBUG("Java_mmdeploy_Restorer_destroy"); + mmdeploy_restorer_destroy((mmdeploy_restorer_t)handle); +} + +jobjectArray Java_mmdeploy_Restorer_apply(JNIEnv *env, jobject thiz, jlong handle, + jobjectArray images) { + return With(env, images, [&](const mmdeploy_mat_t imgs[], int size) { + mmdeploy_mat_t *results{}; + auto ec = mmdeploy_restorer_apply((mmdeploy_restorer_t)handle, imgs, size, &results); + if (ec) { + MMDEPLOY_ERROR("failed to apply restorer, code = {}", ec); + } + const char *java_enum_format[] = {"BGR", "RGB", "GRAYSCALE", "NV12", "NV21", "BGRA"}; + const char *java_enum_type[] = {"FLOAT", "HALF", "INT8", "INT32"}; + auto result_cls = env->FindClass("mmdeploy/Restorer$Result"); + auto result_ctor = env->GetMethodID(result_cls, "", "(Lmmdeploy/Mat;)V"); + auto array = env->NewObjectArray(size, result_cls, nullptr); + auto mat_cls = env->FindClass("mmdeploy/Mat"); + auto mat_ctor = + env->GetMethodID(mat_cls, "", "(IIILmmdeploy/PixelFormat;Lmmdeploy/DataType;[B)V"); + auto format_cls = env->FindClass("mmdeploy/PixelFormat"); + auto type_cls = env->FindClass("mmdeploy/DataType"); + + mmdeploy_mat_t *current_result = results; + for (int i = 0; i < size; ++i) { + auto test_format = current_result->format; + auto jdata = env->NewByteArray(current_result->width * current_result->height * + current_result->channel); + env->SetByteArrayRegion( + jdata, 0, current_result->width * current_result->height * current_result->channel, + (const jbyte *)(current_result->data)); + jfieldID format_id = env->GetStaticFieldID( + format_cls, java_enum_format[current_result->format], "Lmmdeploy/PixelFormat;"); + jobject format = env->GetStaticObjectField(format_cls, format_id); + jfieldID type_id = env->GetStaticFieldID(type_cls, java_enum_type[current_result->type], + "Lmmdeploy/DataType;"); + jobject type = env->GetStaticObjectField(type_cls, type_id); + auto result_mat = env->NewObject(mat_cls, mat_ctor, (jint)(current_result->height), + (jint)(current_result->width), + (jint)(current_result->channel), format, type, jdata); + auto res = env->NewObject(result_cls, result_ctor, result_mat); + env->SetObjectArrayElement(array, i, res); + current_result++; + } + mmdeploy_restorer_release_result(results, size); + return array; + }); +} diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_Restorer.h b/csrc/mmdeploy/apis/java/native/mmdeploy_Restorer.h new file mode 100644 index 000000000..78b09787f --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_Restorer.h @@ -0,0 +1,34 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class mmdeploy_Restorer */ + +#ifndef _Included_mmdeploy_Restorer +#define _Included_mmdeploy_Restorer +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: mmdeploy_Restorer + * Method: create + * Signature: (Ljava/lang/String;Ljava/lang/String;I)J + */ +JNIEXPORT jlong JNICALL Java_mmdeploy_Restorer_create(JNIEnv *, jobject, jstring, jstring, jint); + +/* + * Class: mmdeploy_Restorer + * Method: destroy + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_mmdeploy_Restorer_destroy(JNIEnv *, jobject, jlong); + +/* + * Class: mmdeploy_Restorer + * Method: apply + * Signature: (J[Lmmdeploy/Mat;)[Lmmdeploy/Restorer/Result; + */ +JNIEXPORT jobjectArray JNICALL Java_mmdeploy_Restorer_apply(JNIEnv *, jobject, jlong, jobjectArray); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_Segmentor.cpp b/csrc/mmdeploy/apis/java/native/mmdeploy_Segmentor.cpp new file mode 100644 index 000000000..f3c466cd0 --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_Segmentor.cpp @@ -0,0 +1,52 @@ +#include "mmdeploy_Segmentor.h" + +#include + +#include "mmdeploy/apis/c/mmdeploy/segmentor.h" +#include "mmdeploy/apis/java/native/common.h" +#include "mmdeploy/core/logger.h" + +jlong Java_mmdeploy_Segmentor_create(JNIEnv *env, jobject, jstring modelPath, jstring deviceName, + jint device_id) { + auto model_path = env->GetStringUTFChars(modelPath, nullptr); + auto device_name = env->GetStringUTFChars(deviceName, nullptr); + mmdeploy_segmentor_t segmentor{}; + auto ec = mmdeploy_segmentor_create_by_path(model_path, device_name, (int)device_id, &segmentor); + env->ReleaseStringUTFChars(modelPath, model_path); + env->ReleaseStringUTFChars(deviceName, device_name); + if (ec) { + MMDEPLOY_ERROR("failed to create segmentor, code = {}", ec); + } + return (jlong)segmentor; +} + +void Java_mmdeploy_Segmentor_destroy(JNIEnv *, jobject, jlong handle) { + MMDEPLOY_DEBUG("Java_mmdeploy_Segmentor_destroy"); + mmdeploy_segmentor_destroy((mmdeploy_segmentor_t)handle); +} + +jobjectArray Java_mmdeploy_Segmentor_apply(JNIEnv *env, jobject thiz, jlong handle, + jobjectArray images) { + return With(env, images, [&](const mmdeploy_mat_t imgs[], int size) { + mmdeploy_segmentation_t *results{}; + auto ec = mmdeploy_segmentor_apply((mmdeploy_segmentor_t)handle, imgs, size, &results); + if (ec) { + MMDEPLOY_ERROR("failed to apply segmentor, code = {}", ec); + } + + auto result_cls = env->FindClass("mmdeploy/Segmentor$Result"); + auto result_ctor = env->GetMethodID(result_cls, "", "(III[I)V"); + auto array = env->NewObjectArray(size, result_cls, nullptr); + + for (int i = 0; i < size; ++i) { + int *mask = results[i].mask; + jintArray jmask = env->NewIntArray(results[i].height * results[i].width); + env->SetIntArrayRegion(jmask, 0, results[i].width * results[i].height, (const jint *)mask); + auto res = env->NewObject(result_cls, result_ctor, (jint)results[i].height, + (jint)results[i].width, (jint)results[i].classes, jmask); + env->SetObjectArrayElement(array, i, res); + } + mmdeploy_segmentor_release_result(results, size); + return array; + }); +} diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_Segmentor.h b/csrc/mmdeploy/apis/java/native/mmdeploy_Segmentor.h new file mode 100644 index 000000000..afdf157be --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_Segmentor.h @@ -0,0 +1,35 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class mmdeploy_Segmentor */ + +#ifndef _Included_mmdeploy_Segmentor +#define _Included_mmdeploy_Segmentor +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: mmdeploy_Segmentor + * Method: create + * Signature: (Ljava/lang/String;Ljava/lang/String;I)J + */ +JNIEXPORT jlong JNICALL Java_mmdeploy_Segmentor_create(JNIEnv *, jobject, jstring, jstring, jint); + +/* + * Class: mmdeploy_Segmentor + * Method: destroy + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_mmdeploy_Segmentor_destroy(JNIEnv *, jobject, jlong); + +/* + * Class: mmdeploy_Segmentor + * Method: apply + * Signature: (J[Lmmdeploy/Mat;)[Lmmdeploy/Segmentor/Result; + */ +JNIEXPORT jobjectArray JNICALL Java_mmdeploy_Segmentor_apply(JNIEnv *, jobject, jlong, + jobjectArray); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_TextDetector.cpp b/csrc/mmdeploy/apis/java/native/mmdeploy_TextDetector.cpp new file mode 100644 index 000000000..90c08b21b --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_TextDetector.cpp @@ -0,0 +1,64 @@ +#include "mmdeploy_TextDetector.h" + +#include + +#include "mmdeploy/apis/c/mmdeploy/text_detector.h" +#include "mmdeploy/apis/java/native/common.h" +#include "mmdeploy/core/logger.h" + +jlong Java_mmdeploy_TextDetector_create(JNIEnv *env, jobject, jstring modelPath, jstring deviceName, + jint device_id) { + auto model_path = env->GetStringUTFChars(modelPath, nullptr); + auto device_name = env->GetStringUTFChars(deviceName, nullptr); + mmdeploy_text_detector_t text_detector{}; + auto ec = mmdeploy_text_detector_create_by_path(model_path, device_name, (int)device_id, + &text_detector); + env->ReleaseStringUTFChars(modelPath, model_path); + env->ReleaseStringUTFChars(deviceName, device_name); + if (ec) { + MMDEPLOY_ERROR("failed to create text_detector, code = {}", ec); + } + return (jlong)text_detector; +} + +void Java_mmdeploy_TextDetector_destroy(JNIEnv *, jobject, jlong handle) { + MMDEPLOY_DEBUG("Java_mmdeploy_TextDetector_destroy"); + mmdeploy_text_detector_destroy((mmdeploy_text_detector_t)handle); +} + +jobjectArray Java_mmdeploy_TextDetector_apply(JNIEnv *env, jobject thiz, jlong handle, + jobjectArray images, jintArray counts) { + return With(env, images, [&](const mmdeploy_mat_t imgs[], int size) { + mmdeploy_text_detection_t *results{}; + int *result_count{}; + auto ec = mmdeploy_text_detector_apply((mmdeploy_text_detector_t)handle, imgs, size, &results, + &result_count); + if (ec) { + MMDEPLOY_ERROR("failed to apply detector, code = {}", ec); + } + auto result_cls = env->FindClass("mmdeploy/TextDetector$Result"); + auto result_ctor = env->GetMethodID(result_cls, "", "([Lmmdeploy/PointF;F)V"); + auto total = std::accumulate(result_count, result_count + size, 0); + auto array = env->NewObjectArray(total, result_cls, nullptr); + auto point_cls = env->FindClass("mmdeploy/PointF"); + auto point_ctor = env->GetMethodID(point_cls, "", "(FF)V"); + + for (int i = 0; i < total; ++i) { + jobjectArray bbox = env->NewObjectArray(4, point_cls, nullptr); + for (int j = 0; j < 4; ++j) { + auto point = env->NewObject(point_cls, point_ctor, (jfloat)results[i].bbox[j].x, + (jfloat)results[i].bbox[j].y); + env->SetObjectArrayElement(bbox, j, point); + } + auto res = env->NewObject(result_cls, result_ctor, bbox, (jfloat)results[i].score); + env->SetObjectArrayElement(array, i, res); + } + auto counts_array = env->GetIntArrayElements(counts, nullptr); + for (int i = 0; i < size; ++i) { + counts_array[i] = result_count[i]; + } + env->ReleaseIntArrayElements(counts, counts_array, 0); + mmdeploy_text_detector_release_result(results, result_count, size); + return array; + }); +} diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_TextDetector.h b/csrc/mmdeploy/apis/java/native/mmdeploy_TextDetector.h new file mode 100644 index 000000000..dc5574f77 --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_TextDetector.h @@ -0,0 +1,36 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class mmdeploy_TextDetector */ + +#ifndef _Included_mmdeploy_TextDetector +#define _Included_mmdeploy_TextDetector +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: mmdeploy_TextDetector + * Method: create + * Signature: (Ljava/lang/String;Ljava/lang/String;I)J + */ +JNIEXPORT jlong JNICALL Java_mmdeploy_TextDetector_create(JNIEnv *, jobject, jstring, jstring, + jint); + +/* + * Class: mmdeploy_TextDetector + * Method: destroy + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_mmdeploy_TextDetector_destroy(JNIEnv *, jobject, jlong); + +/* + * Class: mmdeploy_TextDetector + * Method: apply + * Signature: (J[Lmmdeploy/Mat;[I)[Lmmdeploy/TextDetector/Result; + */ +JNIEXPORT jobjectArray JNICALL Java_mmdeploy_TextDetector_apply(JNIEnv *, jobject, jlong, + jobjectArray, jintArray); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_TextRecognizer.cpp b/csrc/mmdeploy/apis/java/native/mmdeploy_TextRecognizer.cpp new file mode 100644 index 000000000..54da958cf --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_TextRecognizer.cpp @@ -0,0 +1,103 @@ +#include "mmdeploy_TextRecognizer.h" + +#include + +#include "mmdeploy/apis/c/mmdeploy/text_recognizer.h" +#include "mmdeploy/apis/java/native/common.h" +#include "mmdeploy/core/logger.h" + +jlong Java_mmdeploy_TextRecognizer_create(JNIEnv *env, jobject, jstring modelPath, + jstring deviceName, jint device_id) { + auto model_path = env->GetStringUTFChars(modelPath, nullptr); + auto device_name = env->GetStringUTFChars(deviceName, nullptr); + mmdeploy_text_recognizer_t text_recognizer{}; + auto ec = mmdeploy_text_recognizer_create_by_path(model_path, device_name, (int)device_id, + &text_recognizer); + env->ReleaseStringUTFChars(modelPath, model_path); + env->ReleaseStringUTFChars(deviceName, device_name); + if (ec) { + MMDEPLOY_ERROR("failed to create text recognizer, code = {}", ec); + } + return (jlong)text_recognizer; +} + +void Java_mmdeploy_TextRecognizer_destroy(JNIEnv *, jobject, jlong handle) { + MMDEPLOY_DEBUG("Java_mmdeploy_TextRecognizer_destroy"); // maybe use info? + mmdeploy_text_recognizer_destroy((mmdeploy_text_recognizer_t)handle); +} + +jobjectArray Java_mmdeploy_TextRecognizer_apply(JNIEnv *env, jobject thiz, jlong handle, + jobjectArray images) { + return With(env, images, [&](const mmdeploy_mat_t imgs[], int size) { + mmdeploy_text_recognition_t *results{}; + auto ec = + mmdeploy_text_recognizer_apply((mmdeploy_text_recognizer_t)handle, imgs, size, &results); + if (ec) { + MMDEPLOY_ERROR("failed to apply text recognizer, code = {}", ec); + } + auto result_cls = env->FindClass("mmdeploy/TextRecognizer$Result"); + auto result_ctor = env->GetMethodID(result_cls, "", "([C[F)V"); + auto array = env->NewObjectArray(size, result_cls, nullptr); + + for (int i = 0; i < size; ++i) { + auto text = env->NewCharArray(results[i].length); + auto score = env->NewFloatArray(results[i].length); + env->SetCharArrayRegion(text, 0, results[i].length, (jchar *)results[i].text); + env->SetFloatArrayRegion(score, 0, results[i].length, (jfloat *)results[i].score); + + auto res = env->NewObject(result_cls, result_ctor, text, score); + env->SetObjectArrayElement(array, i, res); + } + mmdeploy_text_recognizer_release_result(results, size); + return array; + }); +} +jobjectArray Java_mmdeploy_TextRecognizer_applyBbox(JNIEnv *env, jobject thiz, jlong handle, + jobjectArray images, jobjectArray bboxes, + jintArray bbox_count) { + return With(env, images, [&](const mmdeploy_mat_t imgs[], int size) { + mmdeploy_text_recognition_t *recog_results{}; + auto *det_results = new mmdeploy_text_detection_t[env->GetArrayLength(bboxes)]; + int *det_result_count = new int[env->GetArrayLength(bbox_count)]; + auto bbox_cls = env->FindClass("mmdeploy/TextDetector$Result"); + auto pointf_cls = env->FindClass("mmdeploy/PointF"); + auto bbox_id = env->GetFieldID(bbox_cls, "bbox", "[Lmmdeploy/PointF;"); + auto score_id = env->GetFieldID(bbox_cls, "score", "F"); + auto x_id = env->GetFieldID(pointf_cls, "x", "F"); + auto y_id = env->GetFieldID(pointf_cls, "y", "F"); + env->GetIntArrayRegion(bbox_count, 0, env->GetArrayLength(bbox_count), det_result_count); + int total_bboxes = env->GetArrayLength(bboxes); + for (int i = 0; i < total_bboxes; ++i) { + auto bboxi = env->GetObjectArrayElement(bboxes, i); + auto point_array = (jobjectArray)env->GetObjectField(bboxi, bbox_id); + for (int j = 0; j < 4; ++j) { + auto pointj = env->GetObjectArrayElement(point_array, j); + det_results[i].bbox[j].x = (float)env->GetFloatField(pointj, x_id); + det_results[i].bbox[j].y = (float)env->GetFloatField(pointj, y_id); + det_results[i].score = (float)env->GetFloatField(bboxi, score_id); + } + } + auto ec = mmdeploy_text_recognizer_apply_bbox((mmdeploy_text_recognizer_t)handle, imgs, size, + (const mmdeploy_text_detection_t *)det_results, + det_result_count, &recog_results); + if (ec) { + MMDEPLOY_ERROR("failed to apply bbox for text recognizer, code = {}", ec); + } + auto result_cls = env->FindClass("mmdeploy/TextRecognizer$Result"); + auto result_ctor = env->GetMethodID(result_cls, "", "([B[F)V"); + auto array = env->NewObjectArray(total_bboxes, result_cls, nullptr); + + for (int i = 0; i < total_bboxes; ++i) { + auto text = env->NewByteArray(recog_results[i].length); + auto score = env->NewFloatArray(recog_results[i].length); + env->SetByteArrayRegion(text, 0, recog_results[i].length, (jbyte *)recog_results[i].text); + env->SetFloatArrayRegion(score, 0, recog_results[i].length, (jfloat *)recog_results[i].score); + + auto res = env->NewObject(result_cls, result_ctor, text, score); + env->SetObjectArrayElement(array, i, res); + } + mmdeploy_text_recognizer_release_result(recog_results, size); + mmdeploy_text_detector_release_result(det_results, det_result_count, 1); + return array; + }); +} diff --git a/csrc/mmdeploy/apis/java/native/mmdeploy_TextRecognizer.h b/csrc/mmdeploy/apis/java/native/mmdeploy_TextRecognizer.h new file mode 100644 index 000000000..721c17f2b --- /dev/null +++ b/csrc/mmdeploy/apis/java/native/mmdeploy_TextRecognizer.h @@ -0,0 +1,45 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class mmdeploy_TextRecognizer */ + +#ifndef _Included_mmdeploy_TextRecognizer +#define _Included_mmdeploy_TextRecognizer +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: mmdeploy_TextRecognizer + * Method: create + * Signature: (Ljava/lang/String;Ljava/lang/String;I)J + */ +JNIEXPORT jlong JNICALL Java_mmdeploy_TextRecognizer_create(JNIEnv *, jobject, jstring, jstring, + jint); + +/* + * Class: mmdeploy_TextRecognizer + * Method: destroy + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_mmdeploy_TextRecognizer_destroy(JNIEnv *, jobject, jlong); + +/* + * Class: mmdeploy_TextRecognizer + * Method: apply + * Signature: (J[Lmmdeploy/Mat;)[Lmmdeploy/TextRecognizer/Result; + */ +JNIEXPORT jobjectArray JNICALL Java_mmdeploy_TextRecognizer_apply(JNIEnv *, jobject, jlong, + jobjectArray); + +/* + * Class: mmdeploy_TextRecognizer + * Method: applyBbox + * Signature: (J[Lmmdeploy/Mat;[Lmmdeploy/TextDetector/Result;[I)[Lmmdeploy/TextRecognizer/Result; + */ +JNIEXPORT jobjectArray JNICALL Java_mmdeploy_TextRecognizer_applyBbox(JNIEnv *, jobject, jlong, + jobjectArray, jobjectArray, + jintArray); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/csrc/mmdeploy/apis/python/CMakeLists.txt b/csrc/mmdeploy/apis/python/CMakeLists.txt index 5a6f38ab2..656b20fd1 100644 --- a/csrc/mmdeploy/apis/python/CMakeLists.txt +++ b/csrc/mmdeploy/apis/python/CMakeLists.txt @@ -21,6 +21,7 @@ endforeach () pybind11_add_module(${PROJECT_NAME} ${MMDEPLOY_PYTHON_SRCS}) + mmdeploy_load_static(${PROJECT_NAME} MMDeployStaticModules) mmdeploy_load_dynamic(${PROJECT_NAME} MMDeployDynamicModules) target_link_libraries(${PROJECT_NAME} PRIVATE MMDeployLibs) @@ -28,3 +29,4 @@ target_link_libraries(${PROJECT_NAME} PRIVATE MMDeployLibs) target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}) +install(DIRECTORY ${CMAKE_SOURCE_DIR}/demo/python/ DESTINATION example/python) diff --git a/csrc/mmdeploy/apis/python/classifier.cpp b/csrc/mmdeploy/apis/python/classifier.cpp index 129080774..7467a5706 100644 --- a/csrc/mmdeploy/apis/python/classifier.cpp +++ b/csrc/mmdeploy/apis/python/classifier.cpp @@ -1,6 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/classifier.h" +#include "mmdeploy/classifier.h" #include "common.h" @@ -9,29 +9,30 @@ namespace mmdeploy { class PyClassifier { public: PyClassifier(const char *model_path, const char *device_name, int device_id) { - auto status = mmdeploy_classifier_create_by_path(model_path, device_name, device_id, &handle_); - if (status != MM_SUCCESS) { + auto status = + mmdeploy_classifier_create_by_path(model_path, device_name, device_id, &classifier_); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to create classifier"); } } ~PyClassifier() { - mmdeploy_classifier_destroy(handle_); - handle_ = {}; + mmdeploy_classifier_destroy(classifier_); + classifier_ = {}; } // std::vector> std::vector>> Apply(const std::vector &imgs) { - std::vector mats; + std::vector mats; mats.reserve(imgs.size()); for (const auto &img : imgs) { auto mat = GetMat(img); mats.push_back(mat); } - mm_class_t *results{}; + mmdeploy_classification_t *results{}; int *result_count{}; - auto status = - mmdeploy_classifier_apply(handle_, mats.data(), (int)mats.size(), &results, &result_count); - if (status != MM_SUCCESS) { + auto status = mmdeploy_classifier_apply(classifier_, mats.data(), (int)mats.size(), &results, + &result_count); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to apply classifier, code: " + std::to_string(status)); } auto output = std::vector>>{}; @@ -50,15 +51,18 @@ class PyClassifier { } private: - mm_handle_t handle_{}; + mmdeploy_classifier_t classifier_{}; }; static void register_python_classifier(py::module &m) { py::class_(m, "Classifier") .def(py::init([](const char *model_path, const char *device_name, int device_id) { - return std::make_unique(model_path, device_name, device_id); - })) - .def("__call__", &PyClassifier::Apply); + return std::make_unique(model_path, device_name, device_id); + }), + py::arg("model_path"), py::arg("device_name"), py::arg("device_id") = 0) + .def("__call__", + [](PyClassifier *self, const PyImage &img) { return self->Apply(std::vector{img})[0]; }) + .def("batch", &PyClassifier::Apply); } class PythonClassifierRegisterer { diff --git a/csrc/mmdeploy/apis/python/common.cpp b/csrc/mmdeploy/apis/python/common.cpp index 11a489e4c..ba6960ad7 100644 --- a/csrc/mmdeploy/apis/python/common.cpp +++ b/csrc/mmdeploy/apis/python/common.cpp @@ -9,25 +9,25 @@ std::map& gPythonBindings() { return v; } -mm_mat_t GetMat(const PyImage& img) { +mmdeploy_mat_t GetMat(const PyImage& img) { auto info = img.request(); if (info.ndim != 3) { fprintf(stderr, "info.ndim = %d\n", (int)info.ndim); throw std::runtime_error("continuous uint8 HWC array expected"); } auto channels = (int)info.shape[2]; - mm_mat_t mat{}; + mmdeploy_mat_t mat{}; if (channels == 1) { - mat.format = MM_GRAYSCALE; + mat.format = MMDEPLOY_PIXEL_FORMAT_GRAYSCALE; } else if (channels == 3) { - mat.format = MM_BGR; + mat.format = MMDEPLOY_PIXEL_FORMAT_BGR; } else { throw std::runtime_error("images of 1 or 3 channels are supported"); } mat.height = (int)info.shape[0]; mat.width = (int)info.shape[1]; mat.channel = channels; - mat.type = MM_INT8; + mat.type = MMDEPLOY_DATA_TYPE_UINT8; mat.data = (uint8_t*)info.ptr; return mat; } diff --git a/csrc/mmdeploy/apis/python/common.h b/csrc/mmdeploy/apis/python/common.h index cd4e85cfa..ad691ce19 100644 --- a/csrc/mmdeploy/apis/python/common.h +++ b/csrc/mmdeploy/apis/python/common.h @@ -5,7 +5,7 @@ #include -#include "mmdeploy/apis/c/common.h" +#include "mmdeploy/common.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" @@ -18,7 +18,7 @@ namespace mmdeploy { std::map &gPythonBindings(); -mm_mat_t GetMat(const PyImage &img); +mmdeploy_mat_t GetMat(const PyImage &img); class Value; diff --git a/csrc/mmdeploy/apis/python/detector.cpp b/csrc/mmdeploy/apis/python/detector.cpp index fb197d428..645ec820f 100644 --- a/csrc/mmdeploy/apis/python/detector.cpp +++ b/csrc/mmdeploy/apis/python/detector.cpp @@ -1,6 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/detector.h" +#include "mmdeploy/detector.h" #include "common.h" @@ -9,23 +9,23 @@ namespace mmdeploy { class PyDetector { public: PyDetector(const char *model_path, const char *device_name, int device_id) { - auto status = mmdeploy_detector_create_by_path(model_path, device_name, device_id, &handle_); - if (status != MM_SUCCESS) { + auto status = mmdeploy_detector_create_by_path(model_path, device_name, device_id, &detector_); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to create detector"); } } py::list Apply(const std::vector &imgs) { - std::vector mats; + std::vector mats; mats.reserve(imgs.size()); for (const auto &img : imgs) { auto mat = GetMat(img); mats.push_back(mat); } - mm_detect_t *detection{}; + mmdeploy_detection_t *detection{}; int *result_count{}; - auto status = - mmdeploy_detector_apply(handle_, mats.data(), (int)mats.size(), &detection, &result_count); - if (status != MM_SUCCESS) { + auto status = mmdeploy_detector_apply(detector_, mats.data(), (int)mats.size(), &detection, + &result_count); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to apply detector, code: " + std::to_string(status)); } auto output = py::list{}; @@ -57,20 +57,25 @@ class PyDetector { return output; } ~PyDetector() { - mmdeploy_detector_destroy(handle_); - handle_ = {}; + mmdeploy_detector_destroy(detector_); + detector_ = {}; } private: - mm_handle_t handle_{}; + mmdeploy_detector_t detector_{}; }; static void register_python_detector(py::module &m) { py::class_(m, "Detector") .def(py::init([](const char *model_path, const char *device_name, int device_id) { - return std::make_unique(model_path, device_name, device_id); - })) - .def("__call__", &PyDetector::Apply); + return std::make_unique(model_path, device_name, device_id); + }), + py::arg("model_path"), py::arg("device_name"), py::arg("device_id") = 0) + .def("__call__", + [](PyDetector *self, const PyImage &img) -> py::tuple { + return self->Apply(std::vector{img})[0]; + }) + .def("batch", &PyDetector::Apply); } class PythonDetectorRegisterer { diff --git a/csrc/mmdeploy/apis/python/pose_detector.cpp b/csrc/mmdeploy/apis/python/pose_detector.cpp index 8fccb1a1f..54a671e2d 100644 --- a/csrc/mmdeploy/apis/python/pose_detector.cpp +++ b/csrc/mmdeploy/apis/python/pose_detector.cpp @@ -1,6 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/pose_detector.h" +#include "mmdeploy/pose_detector.h" #include #include @@ -11,27 +11,27 @@ namespace mmdeploy { using Rect = std::array; -class PyPoseDedector { +class PyPoseDetector { public: - PyPoseDedector(const char *model_path, const char *device_name, int device_id) { + PyPoseDetector(const char *model_path, const char *device_name, int device_id) { auto status = - mmdeploy_pose_detector_create_by_path(model_path, device_name, device_id, &handle_); - if (status != MM_SUCCESS) { + mmdeploy_pose_detector_create_by_path(model_path, device_name, device_id, &detector_); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to create pose_detector"); } } - py::list Apply(const std::vector &imgs, const std::vector> &vboxes) { - if (imgs.size() == 0 && vboxes.size() == 0) { + py::list Apply(const std::vector &imgs, const std::vector> &bboxes) { + if (imgs.size() == 0 && bboxes.size() == 0) { return py::list{}; } - if (vboxes.size() != 0 && vboxes.size() != imgs.size()) { + if (bboxes.size() != 0 && bboxes.size() != imgs.size()) { std::ostringstream os; - os << "imgs length not equal with vboxes [" << imgs.size() << " vs " << vboxes.size() << "]"; + os << "imgs length not equal with vboxes [" << imgs.size() << " vs " << bboxes.size() << "]"; throw std::invalid_argument(os.str()); } - std::vector mats; - std::vector boxes; + std::vector mats; + std::vector boxes; std::vector bbox_count; mats.reserve(imgs.size()); for (const auto &img : imgs) { @@ -39,37 +39,33 @@ class PyPoseDedector { mats.push_back(mat); } - for (auto _boxes : vboxes) { + for (auto _boxes : bboxes) { for (auto _box : _boxes) { - mm_rect_t box = {_box[0], _box[1], _box[2], _box[3]}; + mmdeploy_rect_t box = {_box[0], _box[1], _box[2], _box[3]}; boxes.push_back(box); } bbox_count.push_back(_boxes.size()); } // full image - if (vboxes.size() == 0) { + if (bboxes.size() == 0) { for (int i = 0; i < mats.size(); i++) { - mm_rect_t box = {0.f, 0.f, mats[i].width - 1.f, mats[i].height - 1.f}; + mmdeploy_rect_t box = {0.f, 0.f, mats[i].width - 1.f, mats[i].height - 1.f}; boxes.push_back(box); bbox_count.push_back(1); } } - mm_pose_detect_t *detection{}; - auto status = mmdeploy_pose_detector_apply_bbox(handle_, mats.data(), (int)mats.size(), + mmdeploy_pose_detection_t *detection{}; + auto status = mmdeploy_pose_detector_apply_bbox(detector_, mats.data(), (int)mats.size(), boxes.data(), bbox_count.data(), &detection); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to apply pose_detector, code: " + std::to_string(status)); } auto output = py::list{}; auto result = detection; for (int i = 0; i < mats.size(); i++) { - if (bbox_count[i] == 0) { - output.append(py::none()); - continue; - } int n_point = result->length; auto pred = py::array_t({bbox_count[i], n_point, 3}); auto dst = pred.mutable_data(); @@ -89,22 +85,44 @@ class PyPoseDedector { mmdeploy_pose_detector_release_result(detection, total); return output; } - ~PyPoseDedector() { - mmdeploy_pose_detector_destroy(handle_); - handle_ = {}; + ~PyPoseDetector() { + mmdeploy_pose_detector_destroy(detector_); + detector_ = {}; } private: - mm_handle_t handle_{}; + mmdeploy_pose_detector_t detector_{}; }; static void register_python_pose_detector(py::module &m) { - py::class_(m, "PoseDetector") + py::class_(m, "PoseDetector") .def(py::init([](const char *model_path, const char *device_name, int device_id) { - return std::make_unique(model_path, device_name, device_id); - })) - .def("__call__", &PyPoseDedector::Apply, py::arg("imgs"), - py::arg("vboxes") = std::vector>()); + return std::make_unique(model_path, device_name, device_id); + }), + py::arg("model_path"), py::arg("device_name"), py::arg("device_id") = 0) + .def("__call__", + [](PyPoseDetector *self, const PyImage &img) -> py::array { + return self->Apply({img}, {})[0]; + }) + .def( + "__call__", + [](PyPoseDetector *self, const PyImage &img, const Rect &box) -> py::array { + std::vector> bboxes; + bboxes.push_back({box}); + return self->Apply({img}, bboxes)[0]; + }, + py::arg("img"), py::arg("box")) + .def( + "__call__", + [](PyPoseDetector *self, const PyImage &img, + const std::vector &bboxes) -> py::array { + std::vector> _bboxes; + _bboxes.push_back(bboxes); + return self->Apply({img}, _bboxes)[0]; + }, + py::arg("img"), py::arg("bboxes")) + .def("batch", &PyPoseDetector::Apply, py::arg("imgs"), + py::arg("bboxes") = std::vector>()); } class PythonPoseDetectorRegisterer { diff --git a/csrc/mmdeploy/apis/python/restorer.cpp b/csrc/mmdeploy/apis/python/restorer.cpp index 7abf8dfcc..4a345be2d 100644 --- a/csrc/mmdeploy/apis/python/restorer.cpp +++ b/csrc/mmdeploy/apis/python/restorer.cpp @@ -1,6 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/restorer.h" +#include "mmdeploy/restorer.h" #include "common.h" @@ -9,26 +9,26 @@ namespace mmdeploy { class PyRestorer { public: PyRestorer(const char *model_path, const char *device_name, int device_id) { - auto status = mmdeploy_restorer_create_by_path(model_path, device_name, device_id, &handle_); - if (status != MM_SUCCESS) { + auto status = mmdeploy_restorer_create_by_path(model_path, device_name, device_id, &restorer_); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to create restorer"); } } ~PyRestorer() { - mmdeploy_restorer_destroy(handle_); - handle_ = {}; + mmdeploy_restorer_destroy(restorer_); + restorer_ = {}; } std::vector> Apply(const std::vector &imgs) { - std::vector mats; + std::vector mats; mats.reserve(imgs.size()); for (const auto &img : imgs) { auto mat = GetMat(img); mats.push_back(mat); } - mm_mat_t *results{}; - auto status = mmdeploy_restorer_apply(handle_, mats.data(), (int)mats.size(), &results); - if (status != MM_SUCCESS) { + mmdeploy_mat_t *results{}; + auto status = mmdeploy_restorer_apply(restorer_, mats.data(), (int)mats.size(), &results); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to apply restorer, code: " + std::to_string(status)); } auto output = std::vector>{}; @@ -43,15 +43,20 @@ class PyRestorer { } private: - mm_handle_t handle_{}; + mmdeploy_restorer_t restorer_{}; }; static void register_python_restorer(py::module &m) { py::class_(m, "Restorer") .def(py::init([](const char *model_path, const char *device_name, int device_id) { - return std::make_unique(model_path, device_name, device_id); - })) - .def("__call__", &PyRestorer::Apply); + return std::make_unique(model_path, device_name, device_id); + }), + py::arg("model_path"), py::arg("device_name"), py::arg("device_id") = 0) + .def("__call__", + [](PyRestorer *self, const PyImage &img) -> py::array { + return self->Apply(std::vector{img})[0]; + }) + .def("batch", &PyRestorer::Apply); } class PythonRestorerRegisterer { diff --git a/csrc/mmdeploy/apis/python/rotated_detector.cpp b/csrc/mmdeploy/apis/python/rotated_detector.cpp index 491d65121..df2c9ea7c 100644 --- a/csrc/mmdeploy/apis/python/rotated_detector.cpp +++ b/csrc/mmdeploy/apis/python/rotated_detector.cpp @@ -1,6 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/rotated_detector.h" +#include "mmdeploy/rotated_detector.h" #include "common.h" @@ -10,24 +10,24 @@ class PyRotatedDetector { public: PyRotatedDetector(const char *model_path, const char *device_name, int device_id) { auto status = - mmdeploy_rotated_detector_create_by_path(model_path, device_name, device_id, &handle_); - if (status != MM_SUCCESS) { + mmdeploy_rotated_detector_create_by_path(model_path, device_name, device_id, &detector_); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to create rotated detector"); } } py::list Apply(const std::vector &imgs) { - std::vector mats; + std::vector mats; mats.reserve(imgs.size()); for (const auto &img : imgs) { auto mat = GetMat(img); mats.push_back(mat); } - mm_rotated_detect_t *rbboxes{}; + mmdeploy_rotated_detection_t *rbboxes{}; int *res_count{}; - auto status = mmdeploy_rotated_detector_apply(handle_, mats.data(), (int)mats.size(), &rbboxes, - &res_count); - if (status != MM_SUCCESS) { + auto status = mmdeploy_rotated_detector_apply(detector_, mats.data(), (int)mats.size(), + &rbboxes, &res_count); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to apply rotated detector, code: " + std::to_string(status)); } auto output = py::list{}; @@ -53,20 +53,25 @@ class PyRotatedDetector { return output; } ~PyRotatedDetector() { - mmdeploy_rotated_detector_destroy(handle_); - handle_ = {}; + mmdeploy_rotated_detector_destroy(detector_); + detector_ = {}; } private: - mm_handle_t handle_{}; + mmdeploy_rotated_detector_t detector_{}; }; static void register_python_rotated_detector(py::module &m) { py::class_(m, "RotatedDetector") .def(py::init([](const char *model_path, const char *device_name, int device_id) { - return std::make_unique(model_path, device_name, device_id); - })) - .def("__call__", &PyRotatedDetector::Apply); + return std::make_unique(model_path, device_name, device_id); + }), + py::arg("model_path"), py::arg("device_name"), py::arg("device_id") = 0) + .def("__call__", + [](PyRotatedDetector *self, const PyImage &img) -> py::tuple { + return self->Apply(std::vector{img})[0]; + }) + .def("batch", &PyRotatedDetector::Apply); } class PythonRotatedDetectorRegisterer { diff --git a/csrc/mmdeploy/apis/python/segmentor.cpp b/csrc/mmdeploy/apis/python/segmentor.cpp index a2cae5a97..2132e4c03 100644 --- a/csrc/mmdeploy/apis/python/segmentor.cpp +++ b/csrc/mmdeploy/apis/python/segmentor.cpp @@ -1,6 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/segmentor.h" +#include "mmdeploy/segmentor.h" #include "common.h" @@ -9,26 +9,27 @@ namespace mmdeploy { class PySegmentor { public: PySegmentor(const char *model_path, const char *device_name, int device_id) { - auto status = mmdeploy_segmentor_create_by_path(model_path, device_name, device_id, &handle_); - if (status != MM_SUCCESS) { + auto status = + mmdeploy_segmentor_create_by_path(model_path, device_name, device_id, &segmentor_); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to create segmentor"); } } ~PySegmentor() { - mmdeploy_segmentor_destroy(handle_); - handle_ = {}; + mmdeploy_segmentor_destroy(segmentor_); + segmentor_ = {}; } std::vector> Apply(const std::vector &imgs) { - std::vector mats; + std::vector mats; mats.reserve(imgs.size()); for (const auto &img : imgs) { auto mat = GetMat(img); mats.push_back(mat); } - mm_segment_t *segm{}; - auto status = mmdeploy_segmentor_apply(handle_, mats.data(), (int)mats.size(), &segm); - if (status != MM_SUCCESS) { + mmdeploy_segmentation_t *segm{}; + auto status = mmdeploy_segmentor_apply(segmentor_, mats.data(), (int)mats.size(), &segm); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to apply segmentor, code: " + std::to_string(status)); } auto output = std::vector>{}; @@ -43,15 +44,20 @@ class PySegmentor { } private: - mm_handle_t handle_{}; + mmdeploy_segmentor_t segmentor_{}; }; static void register_python_segmentor(py::module &m) { py::class_(m, "Segmentor") .def(py::init([](const char *model_path, const char *device_name, int device_id) { - return std::make_unique(model_path, device_name, device_id); - })) - .def("__call__", &PySegmentor::Apply); + return std::make_unique(model_path, device_name, device_id); + }), + py::arg("model_path"), py::arg("device_name"), py::arg("device_id") = 0) + .def("__call__", + [](PySegmentor *self, const PyImage &img) -> py::array { + return self->Apply(std::vector{img})[0]; + }) + .def("batch", &PySegmentor::Apply); } class PythonSegmentorRegisterer { diff --git a/csrc/mmdeploy/apis/python/text_detector.cpp b/csrc/mmdeploy/apis/python/text_detector.cpp index 52522ef5f..fb1975370 100644 --- a/csrc/mmdeploy/apis/python/text_detector.cpp +++ b/csrc/mmdeploy/apis/python/text_detector.cpp @@ -1,6 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/text_detector.h" +#include "mmdeploy/text_detector.h" #include "common.h" @@ -10,23 +10,23 @@ class PyTextDetector { public: PyTextDetector(const char *model_path, const char *device_name, int device_id) { auto status = - mmdeploy_text_detector_create_by_path(model_path, device_name, device_id, &handle_); - if (status != MM_SUCCESS) { + mmdeploy_text_detector_create_by_path(model_path, device_name, device_id, &detector_); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to create text_detector"); } } std::vector> Apply(const std::vector &imgs) { - std::vector mats; + std::vector mats; mats.reserve(imgs.size()); for (const auto &img : imgs) { auto mat = GetMat(img); mats.push_back(mat); } - mm_text_detect_t *detection{}; + mmdeploy_text_detection_t *detection{}; int *result_count{}; - auto status = mmdeploy_text_detector_apply(handle_, mats.data(), (int)mats.size(), &detection, + auto status = mmdeploy_text_detector_apply(detector_, mats.data(), (int)mats.size(), &detection, &result_count); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to apply text_detector, code: " + std::to_string(status)); } auto output = std::vector>{}; @@ -47,20 +47,25 @@ class PyTextDetector { return output; } ~PyTextDetector() { - mmdeploy_text_detector_destroy(handle_); - handle_ = {}; + mmdeploy_text_detector_destroy(detector_); + detector_ = {}; } private: - mm_handle_t handle_{}; + mmdeploy_text_detector_t detector_{}; }; static void register_python_text_detector(py::module &m) { py::class_(m, "TextDetector") .def(py::init([](const char *model_path, const char *device_name, int device_id) { - return std::make_unique(model_path, device_name, device_id); - })) - .def("__call__", &PyTextDetector::Apply); + return std::make_unique(model_path, device_name, device_id); + }), + py::arg("model_path"), py::arg("device_name"), py::arg("device_id") = 0) + .def("__call__", + [](PyTextDetector *self, const PyImage &img) -> py::array { + return self->Apply(std::vector{img})[0]; + }) + .def("batch", &PyTextDetector::Apply); } class PythonTextDetectorRegisterer { diff --git a/csrc/mmdeploy/apis/python/text_recognizer.cpp b/csrc/mmdeploy/apis/python/text_recognizer.cpp index 468058a5c..4b6b13434 100644 --- a/csrc/mmdeploy/apis/python/text_recognizer.cpp +++ b/csrc/mmdeploy/apis/python/text_recognizer.cpp @@ -1,6 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "mmdeploy/apis/c/text_recognizer.h" +#include "mmdeploy/text_recognizer.h" #include "common.h" @@ -10,21 +10,22 @@ class PyTextRecognizer { public: PyTextRecognizer(const char *model_path, const char *device_name, int device_id) { auto status = - mmdeploy_text_recognizer_create_by_path(model_path, device_name, device_id, &handle_); - if (status != MM_SUCCESS) { + mmdeploy_text_recognizer_create_by_path(model_path, device_name, device_id, &recognizer_); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to create text_recognizer"); } } std::vector>> Apply(const std::vector &imgs) { - std::vector mats; + std::vector mats; mats.reserve(imgs.size()); for (const auto &img : imgs) { auto mat = GetMat(img); mats.push_back(mat); } - mm_text_recognize_t *results{}; - auto status = mmdeploy_text_recognizer_apply(handle_, mats.data(), (int)mats.size(), &results); - if (status != MM_SUCCESS) { + mmdeploy_text_recognition_t *results{}; + auto status = + mmdeploy_text_recognizer_apply(recognizer_, mats.data(), (int)mats.size(), &results); + if (status != MMDEPLOY_SUCCESS) { throw std::runtime_error("failed to apply text_recognizer, code: " + std::to_string(status)); } auto output = std::vector>>{}; @@ -35,21 +36,47 @@ class PyTextRecognizer { mmdeploy_text_recognizer_release_result(results, (int)mats.size()); return output; } + std::vector>> Apply(const PyImage &img, + const std::vector &bboxes) { + if (bboxes.size() * sizeof(float) % sizeof(mmdeploy_text_detection_t)) { + throw std::invalid_argument("bboxes is not a list of 'mmdeploy_text_detection_t'"); + } + auto mat = GetMat(img); + int bbox_count = bboxes.size() * sizeof(float) / sizeof(mmdeploy_text_detection_t); + mmdeploy_text_recognition_t *results{}; + auto status = mmdeploy_text_recognizer_apply_bbox( + recognizer_, &mat, 1, (mmdeploy_text_detection_t *)bboxes.data(), &bbox_count, &results); + if (status != MMDEPLOY_SUCCESS) { + throw std::runtime_error("failed to apply text_recognizer, code: " + std::to_string(status)); + } + auto output = std::vector>>{}; + for (int i = 0; i < bbox_count; ++i) { + std::vector score(results[i].score, results[i].score + results[i].length); + output.emplace_back(results[i].text, std::move(score)); + } + mmdeploy_text_recognizer_release_result(results, bbox_count); + return output; + } ~PyTextRecognizer() { - mmdeploy_text_recognizer_destroy(handle_); - handle_ = {}; + mmdeploy_text_recognizer_destroy(recognizer_); + recognizer_ = {}; } private: - mm_handle_t handle_{}; + mmdeploy_text_recognizer_t recognizer_{}; }; static void register_python_text_recognizer(py::module &m) { py::class_(m, "TextRecognizer") .def(py::init([](const char *model_path, const char *device_name, int device_id) { - return std::make_unique(model_path, device_name, device_id); - })) - .def("__call__", &PyTextRecognizer::Apply); + return std::make_unique(model_path, device_name, device_id); + }), + py::arg("model_path"), py::arg("device_name"), py::arg("device_id") = 0) + .def("__call__", [](PyTextRecognizer *self, + const PyImage &img) { return self->Apply(std::vector{img})[0]; }) + .def("__call__", [](PyTextRecognizer *self, const PyImage &img, + const std::vector &bboxes) { return self->Apply(img, bboxes); }) + .def("batch", py::overload_cast &>(&PyTextRecognizer::Apply)); } class PythonTextRecognizerRegisterer { diff --git a/csrc/mmdeploy/backend_ops/ncnn/onnx2ncnn/CMakeLists.txt b/csrc/mmdeploy/backend_ops/ncnn/onnx2ncnn/CMakeLists.txt index 6901e4fca..62f9dd42d 100755 --- a/csrc/mmdeploy/backend_ops/ncnn/onnx2ncnn/CMakeLists.txt +++ b/csrc/mmdeploy/backend_ops/ncnn/onnx2ncnn/CMakeLists.txt @@ -5,15 +5,18 @@ project(onnx2ncnn) find_package(Protobuf) if (PROTOBUF_FOUND) + if (${Protobuf_PROTOC_EXECUTABLE} STREQUAL "") + message(FATAL_ERROR "protoc not found, try `-DProtobuf_PROTOC_EXECUTABLE=/path/to/protoc`") + endif () protobuf_generate_cpp(ONNX_PROTO_SRCS ONNX_PROTO_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/onnx.proto) - add_executable(onnx2ncnn onnx2ncnn.cpp fuse_pass.cpp shape_inference.cpp ${ONNX_PROTO_SRCS} ${ONNX_PROTO_HDRS}) - target_include_directories(onnx2ncnn PRIVATE ${PROTOBUF_INCLUDE_DIR} + add_executable(mmdeploy_onnx2ncnn onnx2ncnn.cpp fuse_pass.cpp shape_inference.cpp ${ONNX_PROTO_SRCS} ${ONNX_PROTO_HDRS}) + target_include_directories(mmdeploy_onnx2ncnn PRIVATE ${PROTOBUF_INCLUDE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) - target_link_libraries(onnx2ncnn PRIVATE ${PROTOBUF_LIBRARIES}) + target_link_libraries(mmdeploy_onnx2ncnn PRIVATE ${PROTOBUF_LIBRARIES}) set(_NCNN_CONVERTER_DIR ${CMAKE_SOURCE_DIR}/mmdeploy/backend/ncnn) - install(TARGETS onnx2ncnn DESTINATION ${_NCNN_CONVERTER_DIR}) + install(TARGETS mmdeploy_onnx2ncnn DESTINATION ${_NCNN_CONVERTER_DIR}) else () message( FATAL_ERROR "Protobuf not found, onnx model convert tool won't be built") diff --git a/csrc/mmdeploy/backend_ops/onnxruntime/CMakeLists.txt b/csrc/mmdeploy/backend_ops/onnxruntime/CMakeLists.txt index 67f8439a4..53c62eabe 100644 --- a/csrc/mmdeploy/backend_ops/onnxruntime/CMakeLists.txt +++ b/csrc/mmdeploy/backend_ops/onnxruntime/CMakeLists.txt @@ -8,6 +8,8 @@ include(${CMAKE_SOURCE_DIR}/cmake/modules/FindONNXRUNTIME.cmake) file(GLOB_RECURSE ORT_OPS_SRCS *.cpp) add_library(${PROJECT_NAME}_obj OBJECT "${ORT_OPS_SRCS}") target_compile_definitions(${PROJECT_NAME}_obj PRIVATE -DMMDEPLOY_API_EXPORTS=1) +target_compile_options(${PROJECT_NAME}_obj PRIVATE + $<$:-fvisibility=hidden>) set_target_properties(${PROJECT_NAME}_obj PROPERTIES POSITION_INDEPENDENT_CODE 1) mmdeploy_export(${PROJECT_NAME}_obj) diff --git a/csrc/mmdeploy/backend_ops/tensorrt/batched_nms/trt_batched_nms.cpp b/csrc/mmdeploy/backend_ops/tensorrt/batched_nms/trt_batched_nms.cpp index b8150b992..431f2dd63 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/batched_nms/trt_batched_nms.cpp +++ b/csrc/mmdeploy/backend_ops/tensorrt/batched_nms/trt_batched_nms.cpp @@ -18,19 +18,20 @@ static const char* NMS_PLUGIN_VERSION{"1"}; static const char* NMS_PLUGIN_NAME{"TRTBatchedNMS"}; } // namespace -TRTBatchedNMS::TRTBatchedNMS(const std::string& name, NMSParameters params) - : TRTPluginBase(name), param(params) {} +TRTBatchedNMS::TRTBatchedNMS(const std::string& name, NMSParameters params, bool returnIndex) + : TRTPluginBase(name), param(params), mReturnIndex(returnIndex) {} TRTBatchedNMS::TRTBatchedNMS(const std::string& name, const void* data, size_t length) : TRTPluginBase(name) { deserialize_value(&data, &length, ¶m); - deserialize_value(&data, &length, &boxesSize); - deserialize_value(&data, &length, &scoresSize); - deserialize_value(&data, &length, &numPriors); deserialize_value(&data, &length, &mClipBoxes); + deserialize_value(&data, &length, &mReturnIndex); } -int TRTBatchedNMS::getNbOutputs() const TRT_NOEXCEPT { return 2; } +int TRTBatchedNMS::getNbOutputs() const TRT_NOEXCEPT { + int num = mReturnIndex ? 3 : 2; + return num; +} nvinfer1::DimsExprs TRTBatchedNMS::getOutputDimensions( int outputIndex, const nvinfer1::DimsExprs* inputs, int nbInputs, @@ -51,6 +52,8 @@ nvinfer1::DimsExprs TRTBatchedNMS::getOutputDimensions( case 1: ret.nbDims = 2; break; + case 2: + ret.nbDims = 2; default: break; } @@ -81,6 +84,7 @@ int TRTBatchedNMS::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, void* nmsedDets = outputs[0]; void* nmsedLabels = outputs[1]; + void* nmsedIndex = mReturnIndex ? outputs[2] : nullptr; size_t batch_size = inputDesc[0].dims.d[0]; size_t boxes_size = inputDesc[0].dims.d[1] * inputDesc[0].dims.d[2] * inputDesc[0].dims.d[3]; @@ -94,24 +98,22 @@ int TRTBatchedNMS::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, pluginStatus_t status = nmsInference( stream, batch_size, boxes_size, score_size, shareLocation, param.backgroundLabelId, num_priors, param.numClasses, topk, param.keepTopK, param.scoreThreshold, param.iouThreshold, - DataType::kFLOAT, locData, DataType::kFLOAT, confData, nmsedDets, nmsedLabels, workSpace, - param.isNormalized, false, mClipBoxes, rotated); + DataType::kFLOAT, locData, DataType::kFLOAT, confData, nmsedDets, nmsedLabels, nmsedIndex, + workSpace, param.isNormalized, false, mClipBoxes, rotated); ASSERT(status == STATUS_SUCCESS); return 0; } size_t TRTBatchedNMS::getSerializationSize() const TRT_NOEXCEPT { - // NMSParameters, boxesSize,scoresSize,numPriors - return sizeof(NMSParameters) + sizeof(int) * 3 + sizeof(bool); + // NMSParameters + return sizeof(NMSParameters) + sizeof(mClipBoxes) + sizeof(mReturnIndex); } void TRTBatchedNMS::serialize(void* buffer) const TRT_NOEXCEPT { serialize_value(&buffer, param); - serialize_value(&buffer, boxesSize); - serialize_value(&buffer, scoresSize); - serialize_value(&buffer, numPriors); serialize_value(&buffer, mClipBoxes); + serialize_value(&buffer, mReturnIndex); } void TRTBatchedNMS::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* inputs, int nbInputs, @@ -122,7 +124,7 @@ void TRTBatchedNMS::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* inp bool TRTBatchedNMS::supportsFormatCombination(int pos, const nvinfer1::PluginTensorDesc* ioDesc, int nbInputs, int nbOutputs) TRT_NOEXCEPT { - if (pos == 3) { + if (pos == 3 || pos == 4) { return ioDesc[pos].type == nvinfer1::DataType::kINT32 && ioDesc[pos].format == nvinfer1::TensorFormat::kLINEAR; } @@ -135,10 +137,7 @@ const char* TRTBatchedNMS::getPluginType() const TRT_NOEXCEPT { return NMS_PLUGI const char* TRTBatchedNMS::getPluginVersion() const TRT_NOEXCEPT { return NMS_PLUGIN_VERSION; } IPluginV2DynamicExt* TRTBatchedNMS::clone() const TRT_NOEXCEPT { - auto* plugin = new TRTBatchedNMS(mLayerName, param); - plugin->boxesSize = boxesSize; - plugin->scoresSize = scoresSize; - plugin->numPriors = numPriors; + auto* plugin = new TRTBatchedNMS(mLayerName, param, mReturnIndex); plugin->setPluginNamespace(mNamespace.c_str()); plugin->setClipParam(mClipBoxes); return plugin; @@ -147,7 +146,7 @@ IPluginV2DynamicExt* TRTBatchedNMS::clone() const TRT_NOEXCEPT { nvinfer1::DataType TRTBatchedNMS::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT { ASSERT(index >= 0 && index < this->getNbOutputs()); - if (index == 1) { + if (index == 1 || index == 2) { return nvinfer1::DataType::kINT32; } return inputTypes[0]; @@ -167,6 +166,7 @@ TRTBatchedNMSCreator::TRTBatchedNMSCreator() { PluginField("iou_threshold", nullptr, PluginFieldType::kFLOAT32, 1)); mPluginAttributes.emplace_back(PluginField("is_normalized", nullptr, PluginFieldType::kINT32, 1)); mPluginAttributes.emplace_back(PluginField("clip_boxes", nullptr, PluginFieldType::kINT32, 1)); + mPluginAttributes.emplace_back(PluginField("return_index", nullptr, PluginFieldType::kINT32, 1)); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); @@ -182,6 +182,7 @@ IPluginV2Ext* TRTBatchedNMSCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT { const PluginField* fields = fc->fields; bool clipBoxes = true; + bool returnIndex = false; nvinfer1::plugin::NMSParameters params{}; for (int i = 0; i < fc->nbFields; ++i) { @@ -208,10 +209,12 @@ IPluginV2Ext* TRTBatchedNMSCreator::createPlugin(const char* name, params.isNormalized = *(static_cast(fields[i].data)); } else if (!strcmp(attrName, "clip_boxes")) { clipBoxes = *(static_cast(fields[i].data)); + } else if (!strcmp(attrName, "return_index")) { + returnIndex = *(static_cast(fields[i].data)); } } - TRTBatchedNMS* plugin = new TRTBatchedNMS(name, params); + TRTBatchedNMS* plugin = new TRTBatchedNMS(name, params, returnIndex); plugin->setClipParam(clipBoxes); plugin->setPluginNamespace(mNamespace.c_str()); return plugin; diff --git a/csrc/mmdeploy/backend_ops/tensorrt/batched_nms/trt_batched_nms.hpp b/csrc/mmdeploy/backend_ops/tensorrt/batched_nms/trt_batched_nms.hpp index f37805213..d1e5d643d 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/batched_nms/trt_batched_nms.hpp +++ b/csrc/mmdeploy/backend_ops/tensorrt/batched_nms/trt_batched_nms.hpp @@ -9,9 +9,12 @@ #include "NvInferPluginUtils.h" #include "trt_plugin_base.hpp" namespace mmdeploy { + +enum NMSReturnType { RETURN_DETS = 1, RETURN_INDEX = 1 << 1 }; + class TRTBatchedNMS : public TRTPluginBase { public: - TRTBatchedNMS(const std::string& name, nvinfer1::plugin::NMSParameters param); + TRTBatchedNMS(const std::string& name, nvinfer1::plugin::NMSParameters param, bool returnIndex); TRTBatchedNMS(const std::string& name, const void* data, size_t length); @@ -55,10 +58,8 @@ class TRTBatchedNMS : public TRTPluginBase { private: nvinfer1::plugin::NMSParameters param{}; - int boxesSize{}; - int scoresSize{}; - int numPriors{}; bool mClipBoxes{}; + bool mReturnIndex{}; }; class TRTBatchedNMSCreator : public TRTPluginCreatorBase { diff --git a/csrc/mmdeploy/backend_ops/tensorrt/batched_rotated_nms/trt_batched_rotated_nms.cpp b/csrc/mmdeploy/backend_ops/tensorrt/batched_rotated_nms/trt_batched_rotated_nms.cpp index d478ee797..9d977bc93 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/batched_rotated_nms/trt_batched_rotated_nms.cpp +++ b/csrc/mmdeploy/backend_ops/tensorrt/batched_rotated_nms/trt_batched_rotated_nms.cpp @@ -23,9 +23,6 @@ TRTBatchedRotatedNMS::TRTBatchedRotatedNMS(const std::string& name, NMSParameter TRTBatchedRotatedNMS::TRTBatchedRotatedNMS(const std::string& name, const void* data, size_t length) : TRTPluginBase(name) { deserialize_value(&data, &length, ¶m); - deserialize_value(&data, &length, &boxesSize); - deserialize_value(&data, &length, &scoresSize); - deserialize_value(&data, &length, &numPriors); deserialize_value(&data, &length, &mClipBoxes); } @@ -94,23 +91,20 @@ int TRTBatchedRotatedNMS::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, pluginStatus_t status = nmsInference( stream, batch_size, boxes_size, score_size, shareLocation, param.backgroundLabelId, num_priors, param.numClasses, topk, param.keepTopK, param.scoreThreshold, param.iouThreshold, - DataType::kFLOAT, locData, DataType::kFLOAT, confData, nmsedDets, nmsedLabels, workSpace, - param.isNormalized, false, mClipBoxes, rotated); + DataType::kFLOAT, locData, DataType::kFLOAT, confData, nmsedDets, nmsedLabels, nullptr, + workSpace, param.isNormalized, false, mClipBoxes, rotated); ASSERT(status == STATUS_SUCCESS); return 0; } size_t TRTBatchedRotatedNMS::getSerializationSize() const TRT_NOEXCEPT { - // NMSParameters, boxesSize,scoresSize,numPriors - return sizeof(NMSParameters) + sizeof(int) * 3 + sizeof(bool); + // NMSParameters, + return sizeof(NMSParameters) + sizeof(bool); } void TRTBatchedRotatedNMS::serialize(void* buffer) const TRT_NOEXCEPT { serialize_value(&buffer, param); - serialize_value(&buffer, boxesSize); - serialize_value(&buffer, scoresSize); - serialize_value(&buffer, numPriors); serialize_value(&buffer, mClipBoxes); } @@ -140,9 +134,6 @@ const char* TRTBatchedRotatedNMS::getPluginVersion() const TRT_NOEXCEPT { IPluginV2DynamicExt* TRTBatchedRotatedNMS::clone() const TRT_NOEXCEPT { auto* plugin = new TRTBatchedRotatedNMS(mLayerName, param); - plugin->boxesSize = boxesSize; - plugin->scoresSize = scoresSize; - plugin->numPriors = numPriors; plugin->setPluginNamespace(mNamespace.c_str()); plugin->setClipParam(mClipBoxes); return plugin; diff --git a/csrc/mmdeploy/backend_ops/tensorrt/batched_rotated_nms/trt_batched_rotated_nms.hpp b/csrc/mmdeploy/backend_ops/tensorrt/batched_rotated_nms/trt_batched_rotated_nms.hpp index 9e7de526a..66479eb7e 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/batched_rotated_nms/trt_batched_rotated_nms.hpp +++ b/csrc/mmdeploy/backend_ops/tensorrt/batched_rotated_nms/trt_batched_rotated_nms.hpp @@ -54,9 +54,6 @@ class TRTBatchedRotatedNMS : public TRTPluginBase { private: nvinfer1::plugin::NMSParameters param{}; - int boxesSize{}; - int scoresSize{}; - int numPriors{}; bool mClipBoxes{}; }; diff --git a/csrc/mmdeploy/backend_ops/tensorrt/common/nms/batched_nms_kernel.hpp b/csrc/mmdeploy/backend_ops/tensorrt/common/nms/batched_nms_kernel.hpp index b4929bb67..22cffa060 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/common/nms/batched_nms_kernel.hpp +++ b/csrc/mmdeploy/backend_ops/tensorrt/common/nms/batched_nms_kernel.hpp @@ -13,7 +13,7 @@ pluginStatus_t nmsInference(cudaStream_t stream, const int N, const int perBatch const float scoreThreshold, const float iouThreshold, const DataType DT_BBOX, const void* locData, const DataType DT_SCORE, const void* confData, void* nmsedDets, void* nmsedLabels, - void* workspace, bool isNormalized, bool confSigmoid, bool clipBoxes, - bool rotated = false); + void* nmsedIndex, void* workspace, bool isNormalized, bool confSigmoid, + bool clipBoxes, bool rotated = false); #endif diff --git a/csrc/mmdeploy/backend_ops/tensorrt/common/nms/kernel.h b/csrc/mmdeploy/backend_ops/tensorrt/common/nms/kernel.h index 1b9561b19..1b50fa4e9 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/common/nms/kernel.h +++ b/csrc/mmdeploy/backend_ops/tensorrt/common/nms/kernel.h @@ -76,7 +76,8 @@ pluginStatus_t gatherNMSOutputs(cudaStream_t stream, bool shareLocation, int num int numPredsPerClass, int numClasses, int topK, int keepTopK, DataType DT_BBOX, DataType DT_SCORE, const void* indices, const void* scores, const void* bboxData, void* nmsedDets, - void* nmsedLabels, bool clipBoxes = true, bool rotated = false); + void* nmsedLabels, void* nmsedIndex = nullptr, + bool clipBoxes = true, bool rotated = false); size_t detectionInferenceWorkspaceSize(bool shareLocation, int N, int C1, int C2, int numClasses, int numPredsPerClass, int topK, DataType DT_BBOX, diff --git a/csrc/mmdeploy/backend_ops/tensorrt/common/trt_plugin_base.hpp b/csrc/mmdeploy/backend_ops/tensorrt/common/trt_plugin_base.hpp index a98b782d1..8440bb621 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/common/trt_plugin_base.hpp +++ b/csrc/mmdeploy/backend_ops/tensorrt/common/trt_plugin_base.hpp @@ -26,6 +26,21 @@ class TRTPluginBase : public nvinfer1::IPluginV2DynamicExt { } const char *getPluginNamespace() const TRT_NOEXCEPT override { return mNamespace.c_str(); } + virtual void configurePlugin(const nvinfer1::DynamicPluginTensorDesc *in, int nbInputs, + const nvinfer1::DynamicPluginTensorDesc *out, + int nbOutputs) TRT_NOEXCEPT override {} + + virtual size_t getWorkspaceSize(const nvinfer1::PluginTensorDesc *inputs, int nbInputs, + const nvinfer1::PluginTensorDesc *outputs, + int nbOutputs) const TRT_NOEXCEPT override { + return 0; + } + + virtual void attachToContext(cudnnContext *cudnnContext, cublasContext *cublasContext, + nvinfer1::IGpuAllocator *gpuAllocator) TRT_NOEXCEPT override {} + + virtual void detachFromContext() TRT_NOEXCEPT override {} + protected: const std::string mLayerName; std::string mNamespace; @@ -34,10 +49,8 @@ class TRTPluginBase : public nvinfer1::IPluginV2DynamicExt { protected: // To prevent compiler warnings. using nvinfer1::IPluginV2DynamicExt::canBroadcastInputAcrossBatch; - using nvinfer1::IPluginV2DynamicExt::configurePlugin; using nvinfer1::IPluginV2DynamicExt::enqueue; using nvinfer1::IPluginV2DynamicExt::getOutputDimensions; - using nvinfer1::IPluginV2DynamicExt::getWorkspaceSize; using nvinfer1::IPluginV2DynamicExt::isOutputBroadcastAcrossBatch; using nvinfer1::IPluginV2DynamicExt::supportsFormat; #endif diff --git a/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/allClassNMS.cu b/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/allClassNMS.cu index 0a1e3e283..d048a36ef 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/allClassNMS.cu +++ b/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/allClassNMS.cu @@ -200,6 +200,7 @@ pluginStatus_t allClassNMS_gpu(cudaStream_t stream, const int num, const int num const int GS = num_classes; const int t_size = (top_k + BS - 1) / BS; + ASSERT(t_size <= 10); kernel[t_size - 1]<<>>( num, num_classes, num_preds_per_class, top_k, nms_threshold, share_location, isNormalized, (T_BBOX *)bbox_data, (T_SCORE *)beforeNMS_scores, (int *)beforeNMS_index_array, diff --git a/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/allClassRotatedNMS.cu b/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/allClassRotatedNMS.cu index a5e102fb5..0edea2bfa 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/allClassRotatedNMS.cu +++ b/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/allClassRotatedNMS.cu @@ -295,7 +295,7 @@ __host__ __device__ __forceinline__ T single_box_iou_rotated(T const *const box1 const T area1 = box1.w * box1.h; const T area2 = box2.w * box2.h; if (area1 < 1e-14 || area2 < 1e-14) { - return 0.f; + return 1.0f; } const T intersection = rotated_boxes_intersection(box1, box2); @@ -430,6 +430,7 @@ pluginStatus_t allClassRotatedNMS_gpu(cudaStream_t stream, const int num, const const int GS = num_classes; const int t_size = (top_k + BS - 1) / BS; + ASSERT(t_size <= 10); kernel[t_size - 1]<<>>( num, num_classes, num_preds_per_class, top_k, nms_threshold, share_location, isNormalized, (T_BBOX *)bbox_data, (T_SCORE *)beforeNMS_scores, (int *)beforeNMS_index_array, diff --git a/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/batched_nms_kernel.cpp b/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/batched_nms_kernel.cpp index 6be5293a3..71cb7a859 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/batched_nms_kernel.cpp +++ b/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/batched_nms_kernel.cpp @@ -10,8 +10,8 @@ pluginStatus_t nmsInference(cudaStream_t stream, const int N, const int perBatch const float scoreThreshold, const float iouThreshold, const DataType DT_BBOX, const void* locData, const DataType DT_SCORE, const void* confData, void* nmsedDets, void* nmsedLabels, - void* workspace, bool isNormalized, bool confSigmoid, bool clipBoxes, - bool rotated) { + void* nmsedIndex, void* workspace, bool isNormalized, bool confSigmoid, + bool clipBoxes, bool rotated) { const int topKVal = topK < 0 ? numPredsPerClass : topK; const int keepTopKVal = keepTopK < 0 ? numPredsPerClass : keepTopK; // locCount = batch_size * number_boxes_per_sample * 4 @@ -117,7 +117,7 @@ pluginStatus_t nmsInference(cudaStream_t stream, const int N, const int perBatch // Gather data from the sorted bounding boxes after NMS status = gatherNMSOutputs(stream, shareLocation, N, numPredsPerClass, numClasses, topKVal, keepTopKVal, DataType::kFLOAT, DataType::kFLOAT, indices, scores, - bboxData, nmsedDets, nmsedLabels, clipBoxes, rotated); + bboxData, nmsedDets, nmsedLabels, nmsedIndex, clipBoxes, rotated); ASSERT_FAILURE(status == STATUS_SUCCESS); diff --git a/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/gatherNMSOutputs.cu b/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/gatherNMSOutputs.cu index c86ccab5c..8a0ec7bac 100644 --- a/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/gatherNMSOutputs.cu +++ b/csrc/mmdeploy/backend_ops/tensorrt/common_impl/nms/gatherNMSOutputs.cu @@ -12,7 +12,7 @@ __launch_bounds__(nthds_per_cta) __global__ const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const int *indices, const T_SCORE *scores, const T_BBOX *bboxData, T_BBOX *nmsedDets, int *nmsedLabels, - bool clipBoxes) { + int *nmsedIndex, bool clipBoxes) { if (keepTopK > topK) return; for (int i = blockIdx.x * nthds_per_cta + threadIdx.x; i < numImages * keepTopK; i += gridDim.x * nthds_per_cta) { @@ -23,6 +23,9 @@ __launch_bounds__(nthds_per_cta) __global__ const T_SCORE score = scores[offset + detId]; if (index == -1) { nmsedLabels[i] = -1; + if (nmsedIndex != nullptr) { + nmsedIndex[i] = -1; + } if (rotated) { nmsedDets[i * 6] = 0; nmsedDets[i * 6 + 1] = 0; @@ -46,6 +49,9 @@ __launch_bounds__(nthds_per_cta) __global__ : index % (numClasses * numPredsPerClass)) + bboxOffset) * 5; + if (nmsedIndex != nullptr) { + nmsedIndex[i] = bboxId / 5; + } // clipped bbox xmin nmsedDets[i * 6] = clipBoxes ? max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId]; @@ -67,6 +73,9 @@ __launch_bounds__(nthds_per_cta) __global__ : index % (numClasses * numPredsPerClass)) + bboxOffset) * 4; + if (nmsedIndex != nullptr) { + nmsedIndex[i] = bboxId / 4; + } // clipped bbox xmin nmsedDets[i * 5] = clipBoxes ? max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId]; @@ -90,12 +99,14 @@ pluginStatus_t gatherNMSOutputs_gpu(cudaStream_t stream, const bool shareLocatio const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const void *indices, const void *scores, const void *bboxData, - void *nmsedDets, void *nmsedLabels, bool clipBoxes) { + void *nmsedDets, void *nmsedLabels, void *nmsedIndex, + bool clipBoxes) { const int BS = 32; const int GS = 32; gatherNMSOutputs_kernel<<>>( shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, (int *)indices, - (T_SCORE *)scores, (T_BBOX *)bboxData, (T_BBOX *)nmsedDets, (int *)nmsedLabels, clipBoxes); + (T_SCORE *)scores, (T_BBOX *)bboxData, (T_BBOX *)nmsedDets, (int *)nmsedLabels, + (int *)nmsedIndex, clipBoxes); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; @@ -104,7 +115,7 @@ pluginStatus_t gatherNMSOutputs_gpu(cudaStream_t stream, const bool shareLocatio // gatherNMSOutputs LAUNCH CONFIG {{{ typedef pluginStatus_t (*nmsOutFunc)(cudaStream_t, const bool, const int, const int, const int, const int, const int, const void *, const void *, const void *, - void *, void *, bool); + void *, void *, void *, bool); struct nmsOutLaunchConfig { DataType t_bbox; DataType t_score; @@ -138,14 +149,15 @@ pluginStatus_t gatherNMSOutputs(cudaStream_t stream, const bool shareLocation, c const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const DataType DT_BBOX, const DataType DT_SCORE, const void *indices, const void *scores, const void *bboxData, - void *nmsedDets, void *nmsedLabels, bool clipBoxes, bool rotated) { + void *nmsedDets, void *nmsedLabels, void *nmsedIndex, + bool clipBoxes, bool rotated) { nmsOutLaunchConfig lc = nmsOutLaunchConfig(DT_BBOX, DT_SCORE, rotated); for (unsigned i = 0; i < nmsOutFuncVec.size(); ++i) { if (lc == nmsOutFuncVec[i]) { DEBUG_PRINTF("gatherNMSOutputs kernel %d\n", i); return nmsOutFuncVec[i].function(stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, indices, scores, bboxData, - nmsedDets, nmsedLabels, clipBoxes); + nmsedDets, nmsedLabels, nmsedIndex, clipBoxes); } } return STATUS_BAD_PARAM; diff --git a/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors.cpp b/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors.cpp new file mode 100644 index 000000000..1850fbfc1 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors.cpp @@ -0,0 +1,154 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "trt_grid_priors.hpp" + +#include + +#include + +#include "trt_grid_priors_kernel.hpp" +#include "trt_serialize.hpp" + +using namespace nvinfer1; + +namespace mmdeploy { +namespace { +static const char *PLUGIN_VERSION{"1"}; +static const char *PLUGIN_NAME{"GridPriorsTRT"}; +} // namespace + +GridPriorsTRT::GridPriorsTRT(const std::string &name, const nvinfer1::Dims stride) + : TRTPluginBase(name), mStride(stride) {} + +GridPriorsTRT::GridPriorsTRT(const std::string name, const void *data, size_t length) + : TRTPluginBase(name) { + deserialize_value(&data, &length, &mStride); +} +GridPriorsTRT::~GridPriorsTRT() {} + +nvinfer1::IPluginV2DynamicExt *GridPriorsTRT::clone() const TRT_NOEXCEPT { + GridPriorsTRT *plugin = new GridPriorsTRT(mLayerName, mStride); + plugin->setPluginNamespace(getPluginNamespace()); + + return plugin; +} + +nvinfer1::DimsExprs GridPriorsTRT::getOutputDimensions( + int outputIndex, const nvinfer1::DimsExprs *inputs, int nbInputs, + nvinfer1::IExprBuilder &exprBuilder) TRT_NOEXCEPT { + // input[0] == base_anchor + // input[1] == empty_h + // input[2] == empty_w + + nvinfer1::DimsExprs ret; + ret.nbDims = 2; + auto area = + exprBuilder.operation(nvinfer1::DimensionOperation::kPROD, *inputs[2].d[0], *inputs[1].d[0]); + ret.d[0] = exprBuilder.operation(nvinfer1::DimensionOperation::kPROD, *area, *(inputs[0].d[0])); + ret.d[1] = exprBuilder.constant(4); + + return ret; +} + +bool GridPriorsTRT::supportsFormatCombination(int pos, const nvinfer1::PluginTensorDesc *ioDesc, + int nbInputs, int nbOutputs) TRT_NOEXCEPT { + if (pos == 0) { + return (ioDesc[pos].type == nvinfer1::DataType::kFLOAT && + ioDesc[pos].format == nvinfer1::TensorFormat::kLINEAR); + } else if (pos - nbInputs == 0) { + return ioDesc[pos].type == ioDesc[0].type && ioDesc[pos].format == ioDesc[0].format; + } else { + return true; + } +} + +int GridPriorsTRT::enqueue(const nvinfer1::PluginTensorDesc *inputDesc, + const nvinfer1::PluginTensorDesc *outputDesc, const void *const *inputs, + void *const *outputs, void *workSpace, + cudaStream_t stream) TRT_NOEXCEPT { + int num_base_anchors = inputDesc[0].dims.d[0]; + int feat_h = inputDesc[1].dims.d[0]; + int feat_w = inputDesc[2].dims.d[0]; + + const void *base_anchor = inputs[0]; + void *output = outputs[0]; + + auto data_type = inputDesc[0].type; + switch (data_type) { + case nvinfer1::DataType::kFLOAT: + trt_grid_priors_impl((float *)base_anchor, (float *)output, num_base_anchors, feat_w, + feat_h, mStride.d[0], mStride.d[1], stream); + break; + default: + return 1; + } + + return 0; +} + +nvinfer1::DataType GridPriorsTRT::getOutputDataType(int index, const nvinfer1::DataType *inputTypes, + int nbInputs) const TRT_NOEXCEPT { + return inputTypes[0]; +} + +// IPluginV2 Methods +const char *GridPriorsTRT::getPluginType() const TRT_NOEXCEPT { return PLUGIN_NAME; } + +const char *GridPriorsTRT::getPluginVersion() const TRT_NOEXCEPT { return PLUGIN_VERSION; } + +int GridPriorsTRT::getNbOutputs() const TRT_NOEXCEPT { return 1; } + +size_t GridPriorsTRT::getSerializationSize() const TRT_NOEXCEPT { return serialized_size(mStride); } + +void GridPriorsTRT::serialize(void *buffer) const TRT_NOEXCEPT { + serialize_value(&buffer, mStride); + ; +} + +////////////////////// creator ///////////////////////////// + +GridPriorsTRTCreator::GridPriorsTRTCreator() { + mPluginAttributes.clear(); + mPluginAttributes.emplace_back(nvinfer1::PluginField("stride_h")); + mPluginAttributes.emplace_back(nvinfer1::PluginField("stride_w")); + mFC.nbFields = mPluginAttributes.size(); + mFC.fields = mPluginAttributes.data(); +} + +const char *GridPriorsTRTCreator::getPluginName() const TRT_NOEXCEPT { return PLUGIN_NAME; } + +const char *GridPriorsTRTCreator::getPluginVersion() const TRT_NOEXCEPT { return PLUGIN_VERSION; } + +nvinfer1::IPluginV2 *GridPriorsTRTCreator::createPlugin( + const char *name, const nvinfer1::PluginFieldCollection *fc) TRT_NOEXCEPT { + int stride_w = 1; + int stride_h = 1; + + for (int i = 0; i < fc->nbFields; i++) { + if (fc->fields[i].data == nullptr) { + continue; + } + std::string field_name(fc->fields[i].name); + + if (field_name.compare("stride_w") == 0) { + stride_w = static_cast(fc->fields[i].data)[0]; + } + if (field_name.compare("stride_h") == 0) { + stride_h = static_cast(fc->fields[i].data)[0]; + } + } + nvinfer1::Dims stride{2, {stride_w, stride_h}}; + + GridPriorsTRT *plugin = new GridPriorsTRT(name, stride); + plugin->setPluginNamespace(getPluginNamespace()); + return plugin; +} + +nvinfer1::IPluginV2 *GridPriorsTRTCreator::deserializePlugin(const char *name, + const void *serialData, + size_t serialLength) TRT_NOEXCEPT { + auto plugin = new GridPriorsTRT(name, serialData, serialLength); + plugin->setPluginNamespace(getPluginNamespace()); + return plugin; +} +REGISTER_TENSORRT_PLUGIN(GridPriorsTRTCreator); +} // namespace mmdeploy diff --git a/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors.hpp b/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors.hpp new file mode 100644 index 000000000..0036f6258 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors.hpp @@ -0,0 +1,66 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#ifndef TRT_GRID_PRIORS_HPP +#define TRT_GRID_PRIORS_HPP +#include + +#include +#include +#include + +#include "trt_plugin_base.hpp" + +namespace mmdeploy { +class GridPriorsTRT : public TRTPluginBase { + public: + GridPriorsTRT(const std::string &name, const nvinfer1::Dims stride); + + GridPriorsTRT(const std::string name, const void *data, size_t length); + + GridPriorsTRT() = delete; + + ~GridPriorsTRT() TRT_NOEXCEPT override; + + // IPluginV2DynamicExt Methods + nvinfer1::IPluginV2DynamicExt *clone() const TRT_NOEXCEPT override; + nvinfer1::DimsExprs getOutputDimensions(int outputIndex, const nvinfer1::DimsExprs *inputs, + int nbInputs, nvinfer1::IExprBuilder &exprBuilder) + TRT_NOEXCEPT override; + bool supportsFormatCombination(int pos, const nvinfer1::PluginTensorDesc *ioDesc, int nbInputs, + int nbOutputs) TRT_NOEXCEPT override; + int enqueue(const nvinfer1::PluginTensorDesc *inputDesc, + const nvinfer1::PluginTensorDesc *outputDesc, const void *const *inputs, + void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT override; + + // IPluginV2Ext Methods + nvinfer1::DataType getOutputDataType(int index, const nvinfer1::DataType *inputTypes, + int nbInputs) const TRT_NOEXCEPT override; + + // IPluginV2 Methods + const char *getPluginType() const TRT_NOEXCEPT override; + const char *getPluginVersion() const TRT_NOEXCEPT override; + int getNbOutputs() const TRT_NOEXCEPT override; + size_t getSerializationSize() const TRT_NOEXCEPT override; + void serialize(void *buffer) const TRT_NOEXCEPT override; + + private: + nvinfer1::Dims mStride; + + cublasHandle_t m_cublas_handle; +}; + +class GridPriorsTRTCreator : public TRTPluginCreatorBase { + public: + GridPriorsTRTCreator(); + + const char *getPluginName() const TRT_NOEXCEPT override; + + const char *getPluginVersion() const TRT_NOEXCEPT override; + + nvinfer1::IPluginV2 *createPlugin(const char *name, const nvinfer1::PluginFieldCollection *fc) + TRT_NOEXCEPT override; + + nvinfer1::IPluginV2 *deserializePlugin(const char *name, const void *serialData, + size_t serialLength) TRT_NOEXCEPT override; +}; +} // namespace mmdeploy +#endif // TRT_GRID_PRIORS_HPP diff --git a/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors_kernel.cu b/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors_kernel.cu new file mode 100644 index 000000000..72c33d179 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors_kernel.cu @@ -0,0 +1,43 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include + +#include "common_cuda_helper.hpp" +#include "trt_grid_priors_kernel.hpp" +#include "trt_plugin_helper.hpp" + +template +__global__ void trt_grid_priors_kernel(const scalar_t* base_anchor, scalar_t* output, + int num_base_anchors, int feat_w, int feat_h, int stride_w, + int stride_h) { + // load base anchor into shared memory. + extern __shared__ scalar_t shared_base_anchor[]; + for (int i = threadIdx.x; i < num_base_anchors * 4; i += blockDim.x) { + shared_base_anchor[i] = base_anchor[i]; + } + __syncthreads(); + + CUDA_1D_KERNEL_LOOP(index, num_base_anchors * feat_w * feat_h) { + const int a_offset = (index % num_base_anchors) << 2; + const scalar_t w = scalar_t(((index / num_base_anchors) % feat_w) * stride_w); + const scalar_t h = scalar_t((index / (feat_w * num_base_anchors)) * stride_h); + + auto out_start = output + index * 4; + out_start[0] = shared_base_anchor[a_offset] + w; + out_start[1] = shared_base_anchor[a_offset + 1] + h; + out_start[2] = shared_base_anchor[a_offset + 2] + w; + out_start[3] = shared_base_anchor[a_offset + 3] + h; + } +} + +template +void trt_grid_priors_impl(const scalar_t* base_anchor, scalar_t* output, int num_base_anchors, + int feat_w, int feat_h, int stride_w, int stride_h, cudaStream_t stream) { + trt_grid_priors_kernel<<>>( + base_anchor, output, (int)num_base_anchors, (int)feat_w, (int)feat_h, (int)stride_w, + (int)stride_h); +} + +template void trt_grid_priors_impl(const float* base_anchor, float* output, + int num_base_anchors, int feat_w, int feat_h, + int stride_w, int stride_h, cudaStream_t stream); diff --git a/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors_kernel.hpp b/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors_kernel.hpp new file mode 100644 index 000000000..77cef58c5 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/tensorrt/grid_priors/trt_grid_priors_kernel.hpp @@ -0,0 +1,10 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef TRT_GRID_PRIORS_KERNEL_HPP +#define TRT_GRID_PRIORS_KERNEL_HPP +#include + +template +void trt_grid_priors_impl(const scalar_t* base_anchor, scalar_t* output, int num_base_anchors, + int feat_w, int feat_h, int stride_w, int stride_h, cudaStream_t stream); + +#endif diff --git a/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align.cpp b/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align.cpp new file mode 100644 index 000000000..663760312 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align.cpp @@ -0,0 +1,228 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#include "trt_multi_level_rotated_roi_align.hpp" + +#include + +#include + +#include "trt_multi_level_rotated_roi_align_kernel.hpp" +#include "trt_plugin_helper.hpp" +#include "trt_serialize.hpp" +namespace mmdeploy { +namespace { +static const char *PLUGIN_VERSION{"1"}; +static const char *PLUGIN_NAME{"MMCVMultiLevelRotatedRoiAlign"}; +} // namespace + +TRTMultiLevelRotatedRoiAlign::TRTMultiLevelRotatedRoiAlign( + const std::string &name, int alignedHeight, int alignedWidth, int clockwise, int sampleNum, + const std::vector &featmapStrides, float roiScaleFactor, int finestScale, bool aligned) + : TRTPluginBase(name), + mAlignedHeight(alignedHeight), + mAlignedWidth(alignedWidth), + mClockwise(clockwise), + mSampleNum(sampleNum), + mFeatmapStrides(featmapStrides), + mRoiScaleFactor(roiScaleFactor), + mFinestScale(finestScale), + mAligned(aligned) {} + +TRTMultiLevelRotatedRoiAlign::TRTMultiLevelRotatedRoiAlign(const std::string name, const void *data, + size_t length) + : TRTPluginBase(name) { + deserialize_value(&data, &length, &mAlignedHeight); + deserialize_value(&data, &length, &mAlignedWidth); + deserialize_value(&data, &length, &mClockwise); + deserialize_value(&data, &length, &mSampleNum); + deserialize_value(&data, &length, &mRoiScaleFactor); + deserialize_value(&data, &length, &mFinestScale); + deserialize_value(&data, &length, &mAligned); + deserialize_value(&data, &length, &mFeatmapStrides); +} + +nvinfer1::IPluginV2DynamicExt *TRTMultiLevelRotatedRoiAlign::clone() const TRT_NOEXCEPT { + TRTMultiLevelRotatedRoiAlign *plugin = new TRTMultiLevelRotatedRoiAlign( + mLayerName, mAlignedHeight, mAlignedWidth, mClockwise, mSampleNum, mFeatmapStrides, + mRoiScaleFactor, mFinestScale, mAligned); + plugin->setPluginNamespace(getPluginNamespace()); + + return plugin; +} + +nvinfer1::DimsExprs TRTMultiLevelRotatedRoiAlign::getOutputDimensions( + int outputIndex, const nvinfer1::DimsExprs *inputs, int nbInputs, + nvinfer1::IExprBuilder &exprBuilder) TRT_NOEXCEPT { + // warning, nbInputs should equal to mFeatmapStrides.size() + 1 + nvinfer1::DimsExprs ret; + ret.nbDims = 4; + ret.d[0] = inputs[0].d[0]; + ret.d[1] = inputs[1].d[1]; + ret.d[2] = exprBuilder.constant(mAlignedHeight); + ret.d[3] = exprBuilder.constant(mAlignedWidth); + + return ret; +} + +bool TRTMultiLevelRotatedRoiAlign::supportsFormatCombination( + int pos, const nvinfer1::PluginTensorDesc *ioDesc, int nbInputs, int nbOutputs) TRT_NOEXCEPT { + return ioDesc[pos].type == nvinfer1::DataType::kFLOAT && + ioDesc[pos].format == nvinfer1::TensorFormat::kLINEAR; +} + +void TRTMultiLevelRotatedRoiAlign::configurePlugin(const nvinfer1::DynamicPluginTensorDesc *inputs, + int nbInputs, + const nvinfer1::DynamicPluginTensorDesc *outputs, + int nbOutputs) TRT_NOEXCEPT { + // Validate input arguments + ASSERT(nbOutputs == 1); + ASSERT(nbInputs >= 1); + mFeatmapStrides = + std::vector(mFeatmapStrides.begin(), mFeatmapStrides.begin() + nbInputs - 1); +} + +size_t TRTMultiLevelRotatedRoiAlign::getWorkspaceSize(const nvinfer1::PluginTensorDesc *inputs, + int nbInputs, + const nvinfer1::PluginTensorDesc *outputs, + int nbOutputs) const TRT_NOEXCEPT { + return 0; +} + +int TRTMultiLevelRotatedRoiAlign::enqueue(const nvinfer1::PluginTensorDesc *inputDesc, + const nvinfer1::PluginTensorDesc *outputDesc, + const void *const *inputs, void *const *outputs, + void *workSpace, cudaStream_t stream) TRT_NOEXCEPT { + int num_rois = inputDesc[0].dims.d[0]; + int batch_size = inputDesc[1].dims.d[0]; + int channels = inputDesc[1].dims.d[1]; + + const int kMaxFeatMap = 10; + int heights[kMaxFeatMap]; + int widths[kMaxFeatMap]; + float strides[kMaxFeatMap]; + + int num_feats = mFeatmapStrides.size(); + for (int i = 0; i < num_feats; ++i) { + heights[i] = inputDesc[i + 1].dims.d[2]; + widths[i] = inputDesc[i + 1].dims.d[3]; + strides[i] = mFeatmapStrides[i]; + } + + const void *rois = inputs[0]; + const void *const *feats = inputs + 1; + + multi_level_rotated_roi_align((float *)outputs[0], (const float *)rois, num_rois, feats, + num_feats, batch_size, channels, &heights[0], &widths[0], + &strides[0], mAlignedHeight, mAlignedWidth, mClockwise, + mSampleNum, mRoiScaleFactor, mFinestScale, mAligned, stream); + + return 0; +} + +nvinfer1::DataType TRTMultiLevelRotatedRoiAlign::getOutputDataType( + int index, const nvinfer1::DataType *inputTypes, int nbInputs) const TRT_NOEXCEPT { + return nvinfer1::DataType::kFLOAT; +} + +// IPluginV2 Methods +const char *TRTMultiLevelRotatedRoiAlign::getPluginType() const TRT_NOEXCEPT { return PLUGIN_NAME; } + +const char *TRTMultiLevelRotatedRoiAlign::getPluginVersion() const TRT_NOEXCEPT { + return PLUGIN_VERSION; +} + +int TRTMultiLevelRotatedRoiAlign::getNbOutputs() const TRT_NOEXCEPT { return 1; } + +size_t TRTMultiLevelRotatedRoiAlign::getSerializationSize() const TRT_NOEXCEPT { + return serialized_size(mFeatmapStrides) + serialized_size(mAlignedHeight) + + serialized_size(mAlignedWidth) + serialized_size(mClockwise) + + serialized_size(mSampleNum) + serialized_size(mRoiScaleFactor) + + serialized_size(mFinestScale) + serialized_size(mAligned); +} + +void TRTMultiLevelRotatedRoiAlign::serialize(void *buffer) const TRT_NOEXCEPT { + serialize_value(&buffer, mAlignedHeight); + serialize_value(&buffer, mAlignedWidth); + serialize_value(&buffer, mClockwise); + serialize_value(&buffer, mSampleNum); + serialize_value(&buffer, mRoiScaleFactor); + serialize_value(&buffer, mFinestScale); + serialize_value(&buffer, mAligned); + serialize_value(&buffer, mFeatmapStrides); +} + +TRTMultiLevelRotatedRoiAlignCreator::TRTMultiLevelRotatedRoiAlignCreator() { + mPluginAttributes = std::vector( + {nvinfer1::PluginField("output_height"), nvinfer1::PluginField("output_width"), + nvinfer1::PluginField("clockwise"), nvinfer1::PluginField("sampling_ratio"), + nvinfer1::PluginField("featmap_strides"), nvinfer1::PluginField("roi_scale_factor"), + nvinfer1::PluginField("finest_scale"), nvinfer1::PluginField("aligned")}); + mFC.nbFields = mPluginAttributes.size(); + mFC.fields = mPluginAttributes.data(); +} + +const char *TRTMultiLevelRotatedRoiAlignCreator::getPluginName() const TRT_NOEXCEPT { + return PLUGIN_NAME; +} + +const char *TRTMultiLevelRotatedRoiAlignCreator::getPluginVersion() const TRT_NOEXCEPT { + return PLUGIN_VERSION; +} + +nvinfer1::IPluginV2 *TRTMultiLevelRotatedRoiAlignCreator::createPlugin( + const char *name, const nvinfer1::PluginFieldCollection *fc) TRT_NOEXCEPT { + int alignedHeight = 7; + int alignedWidth = 7; + int clockwise = 0; + int sampleNum = 2; + std::vector featmapStrides; + float roiScaleFactor = -1; + int finestScale = 56; + bool aligned = false; + + for (int i = 0; i < fc->nbFields; i++) { + if (fc->fields[i].data == nullptr) { + continue; + } + std::string field_name(fc->fields[i].name); + + if (field_name.compare("output_height") == 0) { + alignedHeight = static_cast(fc->fields[i].data)[0]; + } else if (field_name.compare("output_width") == 0) { + alignedWidth = static_cast(fc->fields[i].data)[0]; + } else if (field_name.compare("clockwise") == 0) { + clockwise = static_cast(fc->fields[i].data)[0]; + } else if (field_name.compare("sampling_ratio") == 0) { + sampleNum = static_cast(fc->fields[i].data)[0]; + } else if (field_name.compare("roi_scale_factor") == 0) { + roiScaleFactor = static_cast(fc->fields[i].data)[0]; + } else if (field_name.compare("finest_scale") == 0) { + finestScale = static_cast(fc->fields[i].data)[0]; + } else if (field_name.compare("featmap_strides") == 0) { + int data_size = (fc->fields[i].length); + const float *data_start = static_cast(fc->fields[i].data); + featmapStrides = std::vector(data_start, data_start + data_size); + } else if (field_name.compare("aligned") == 0) { + int aligned_int = static_cast(fc->fields[i].data)[0]; + aligned = aligned_int != 0; + } + } + + ASSERT(featmapStrides.size() != 0); + + TRTMultiLevelRotatedRoiAlign *plugin = + new TRTMultiLevelRotatedRoiAlign(name, alignedHeight, alignedWidth, clockwise, sampleNum, + featmapStrides, roiScaleFactor, finestScale, aligned); + plugin->setPluginNamespace(getPluginNamespace()); + return plugin; +} + +nvinfer1::IPluginV2 *TRTMultiLevelRotatedRoiAlignCreator::deserializePlugin( + const char *name, const void *serialData, size_t serialLength) TRT_NOEXCEPT { + auto plugin = new TRTMultiLevelRotatedRoiAlign(name, serialData, serialLength); + plugin->setPluginNamespace(getPluginNamespace()); + return plugin; +} + +REGISTER_TENSORRT_PLUGIN(TRTMultiLevelRotatedRoiAlignCreator); +} // namespace mmdeploy diff --git a/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align.hpp b/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align.hpp new file mode 100644 index 000000000..cf0bab758 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align.hpp @@ -0,0 +1,79 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#ifndef TRT_MULTI_LEVEL_ROTATED_ROI_ALIGN_HPP +#define TRT_MULTI_LEVEL_ROTATED_ROI_ALIGN_HPP + +#include + +#include +#include +#include + +#include "trt_plugin_base.hpp" + +namespace mmdeploy { +class TRTMultiLevelRotatedRoiAlign : public TRTPluginBase { + public: + TRTMultiLevelRotatedRoiAlign(const std::string &name, int alignedHeight, int alignedWidth, + int clockwise, int sampleNum, + const std::vector &featmapStrides, float roiScaleFactor = -1, + int finestScale = 56, bool aligned = false); + + TRTMultiLevelRotatedRoiAlign(const std::string name, const void *data, size_t length); + + TRTMultiLevelRotatedRoiAlign() = delete; + + // IPluginV2DynamicExt Methods + nvinfer1::IPluginV2DynamicExt *clone() const TRT_NOEXCEPT override; + nvinfer1::DimsExprs getOutputDimensions(int outputIndex, const nvinfer1::DimsExprs *inputs, + int nbInputs, nvinfer1::IExprBuilder &exprBuilder) + TRT_NOEXCEPT override; + bool supportsFormatCombination(int pos, const nvinfer1::PluginTensorDesc *ioDesc, int nbInputs, + int nbOutputs) TRT_NOEXCEPT override; + void configurePlugin(const nvinfer1::DynamicPluginTensorDesc *in, int nbInputs, + const nvinfer1::DynamicPluginTensorDesc *out, + int nbOutputs) TRT_NOEXCEPT override; + size_t getWorkspaceSize(const nvinfer1::PluginTensorDesc *inputs, int nbInputs, + const nvinfer1::PluginTensorDesc *outputs, + int nbOutputs) const TRT_NOEXCEPT override; + int enqueue(const nvinfer1::PluginTensorDesc *inputDesc, + const nvinfer1::PluginTensorDesc *outputDesc, const void *const *inputs, + void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT override; + + // IPluginV2Ext Methods + nvinfer1::DataType getOutputDataType(int index, const nvinfer1::DataType *inputTypes, + int nbInputs) const TRT_NOEXCEPT override; + + // IPluginV2 Methods + const char *getPluginType() const TRT_NOEXCEPT override; + const char *getPluginVersion() const TRT_NOEXCEPT override; + int getNbOutputs() const TRT_NOEXCEPT override; + size_t getSerializationSize() const TRT_NOEXCEPT override; + void serialize(void *buffer) const TRT_NOEXCEPT override; + + private: + int mAlignedHeight; + int mAlignedWidth; + int mClockwise; + int mSampleNum; + std::vector mFeatmapStrides; + float mRoiScaleFactor; + int mFinestScale; + bool mAligned; +}; + +class TRTMultiLevelRotatedRoiAlignCreator : public TRTPluginCreatorBase { + public: + TRTMultiLevelRotatedRoiAlignCreator(); + + const char *getPluginName() const TRT_NOEXCEPT override; + + const char *getPluginVersion() const TRT_NOEXCEPT override; + + nvinfer1::IPluginV2 *createPlugin(const char *name, const nvinfer1::PluginFieldCollection *fc) + TRT_NOEXCEPT override; + + nvinfer1::IPluginV2 *deserializePlugin(const char *name, const void *serialData, + size_t serialLength) TRT_NOEXCEPT override; +}; +} // namespace mmdeploy +#endif // TRT_MULTI_LEVEL_ROTATED_ROI_ALIGN_HPP diff --git a/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align_kernel.cu b/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align_kernel.cu new file mode 100644 index 000000000..1c6f292ba --- /dev/null +++ b/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align_kernel.cu @@ -0,0 +1,164 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#include +#include + +#include +#include + +#include "common_cuda_helper.hpp" +#include "trt_multi_level_rotated_roi_align_kernel.hpp" +#include "trt_plugin_helper.hpp" + +const int kMAX_FEATMAP_SIZE = 10; +struct FeatData { + const void *data[kMAX_FEATMAP_SIZE]; + int batch_size; + int channels; + int h[kMAX_FEATMAP_SIZE]; + int w[kMAX_FEATMAP_SIZE]; + float spatial_scale[kMAX_FEATMAP_SIZE]; + int num_featmap; +}; + +template +__device__ scalar_t roi_align_single(const scalar_t *__restrict__ bottom_data, + const int roi_batch_ind, scalar_t roi_center_w, + scalar_t roi_center_h, scalar_t roi_width, scalar_t roi_height, + scalar_t theta, const scalar_t spatial_scale, const int pw, + const int ph, const int c, const int sample_num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width) { + // Force malformed ROIs to be 1x1 + + roi_width = max(roi_width, (scalar_t)1.); + roi_height = max(roi_height, (scalar_t)1.); + + const scalar_t bin_size_h = roi_height / scalar_t(pooled_height); + const scalar_t bin_size_w = roi_width / scalar_t(pooled_width); + + const scalar_t *offset_bottom_data = + bottom_data + (roi_batch_ind * channels + c) * height * width; + + const int roi_bin_grid_h = (sample_num > 0) ? sample_num : ceil(roi_height / pooled_height); + const int roi_bin_grid_w = (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); + + const scalar_t roi_start_h = -roi_height / scalar_t(2.0); + const scalar_t roi_start_w = -roi_width / scalar_t(2.0); + const scalar_t cosscalar_theta = cos(theta); + const scalar_t sinscalar_theta = sin(theta); + + // We do average (integral) pooling inside a bin + const scalar_t count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + scalar_t output_val = 0.; + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 + const scalar_t yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const scalar_t xx = + roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / static_cast(roi_bin_grid_w); + + // Rotate by theta (counterclockwise) around the center and translate + scalar_t y = yy * cosscalar_theta - xx * sinscalar_theta + roi_center_h; + scalar_t x = yy * sinscalar_theta + xx * cosscalar_theta + roi_center_w; + + scalar_t val = bilinear_interpolate(offset_bottom_data, height, width, y, x); + output_val += val; + } + } + + return output_val / count; +} + +template +__global__ void rotated_roi_extractor_kernel(scalar_t *__restrict__ output, + const scalar_t *__restrict__ bottom_rois, + FeatData feat_data, const int clockwise, + const int sample_num, const float roi_scale_factor, + const int finest_scale, const int pooled_height, + const int pooled_width, int nThreads) { + CUDA_1D_KERNEL_LOOP(index, nThreads) { + const int channels = feat_data.channels; + int tmp_index = index; + const int pw = tmp_index % pooled_width; + tmp_index /= pooled_width; + const int ph = tmp_index % pooled_height; + tmp_index /= pooled_height; + const int c = tmp_index % channels; + const int n = tmp_index / channels; + + const scalar_t *offset_bottom_rois = bottom_rois + n * 6; + + scalar_t roi_offset_x0 = offset_bottom_rois[1]; + scalar_t roi_offset_y0 = offset_bottom_rois[2]; + scalar_t roi_offset_width = offset_bottom_rois[3]; + scalar_t roi_offset_height = offset_bottom_rois[4]; + scalar_t theta = offset_bottom_rois[5]; + + const scalar_t scale = sqrtf(roi_offset_width * roi_offset_height); + + const int target_lvls = + min(feat_data.num_featmap - 1, + max(0, int(floorf(log2f(scale / (scalar_t)(finest_scale) + 1e-6))))); + + if (roi_scale_factor > 0.) { + roi_offset_width = roi_offset_width * roi_scale_factor; + roi_offset_height = roi_offset_height * roi_scale_factor; + } + + const scalar_t spatial_scale = (scalar_t)feat_data.spatial_scale[target_lvls]; + const int height = feat_data.h[target_lvls]; + const int width = feat_data.w[target_lvls]; + const scalar_t *bottom_data = (scalar_t *)feat_data.data[target_lvls]; + + const int roi_batch_ind = offset_bottom_rois[0]; + const scalar_t offset = aligned ? (scalar_t)-0.5 : (scalar_t)0.0; + const scalar_t roi_center_w = fma(roi_offset_x0, spatial_scale, offset); + const scalar_t roi_center_h = fma(roi_offset_y0, spatial_scale, offset); + const scalar_t roi_width = roi_offset_width * spatial_scale; + const scalar_t roi_height = roi_offset_height * spatial_scale; + + theta = clockwise > 0 ? -theta : theta; + + const scalar_t output_val = roi_align_single( + bottom_data, roi_batch_ind, roi_center_w, roi_center_h, roi_width, roi_height, theta, + spatial_scale, pw, ph, c, sample_num, channels, height, width, pooled_height, pooled_width); + output[index] = output_val; + } +} + +template +void multi_level_rotated_roi_align(T *output, const T *rois, int num_rois, const void *const *feats, + int num_feats, int n, int c, int *h, int *w, float *strides, + int aligned_height, int aligned_width, int clockwise, + int sample_num, float roi_scale_factor, int finest_scale, + bool aligned, cudaStream_t stream) { + FeatData feat_data; + feat_data.batch_size = n; + feat_data.channels = c; + feat_data.num_featmap = num_feats; + for (int i = 0; i < num_feats; ++i) { + feat_data.data[i] = feats[i]; + feat_data.h[i] = h[i]; + feat_data.w[i] = w[i]; + feat_data.spatial_scale[i] = 1. / float(strides[i]); + } + int nThreads = num_rois * c * aligned_height * aligned_width; + if (aligned) { + rotated_roi_extractor_kernel<<>>( + output, rois, feat_data, clockwise, sample_num, roi_scale_factor, finest_scale, + aligned_height, aligned_width, nThreads); + } else { + rotated_roi_extractor_kernel<<>>( + output, rois, feat_data, clockwise, sample_num, roi_scale_factor, finest_scale, + aligned_height, aligned_width, nThreads); + } +} + +template void multi_level_rotated_roi_align( + float *output, const float *rois, int num_rois, const void *const *feats, int num_feats, int n, + int c, int *h, int *w, float *strides, int aligned_height, int aligned_width, int clockwise, + int sample_num, float roi_scale_factor, int finest_scale, bool aligned, cudaStream_t stream); diff --git a/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align_kernel.hpp b/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align_kernel.hpp new file mode 100644 index 000000000..fc3700df3 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/tensorrt/multi_level_rotated_roi_align/trt_multi_level_rotated_roi_align_kernel.hpp @@ -0,0 +1,13 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#ifndef TRT_MULTI_LEVEL_ROTATED_ROI_ALIGN_KERNEL_HPP +#define TRT_MULTI_LEVEL_ROTATED_ROI_ALIGN_KERNEL_HPP +#include + +template +void multi_level_rotated_roi_align(T *output, const T *rois, int num_rois, const void *const *feats, + int num_feats, int n, int c, int *h, int *w, float *strides, + int aligned_height, int aligned_width, int clockwise, + int sample_num, float roi_scale_factor, int finest_scale, + bool aligned, cudaStream_t stream); + +#endif // TRT_MULTI_LEVEL_ROTATED_ROI_ALIGN_KERNEL_HPP diff --git a/csrc/mmdeploy/backend_ops/torchscript/CMakeLists.txt b/csrc/mmdeploy/backend_ops/torchscript/CMakeLists.txt index 8d862b941..4b080f621 100644 --- a/csrc/mmdeploy/backend_ops/torchscript/CMakeLists.txt +++ b/csrc/mmdeploy/backend_ops/torchscript/CMakeLists.txt @@ -1,4 +1,3 @@ # Copyright (c) OpenMMLab. All rights reserved. add_subdirectory(ops) -add_subdirectory(optimizer) diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/CMakeLists.txt b/csrc/mmdeploy/backend_ops/torchscript/optimizer/CMakeLists.txt index ead1e61a5..1b5e75ccc 100644 --- a/csrc/mmdeploy/backend_ops/torchscript/optimizer/CMakeLists.txt +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/CMakeLists.txt @@ -3,6 +3,7 @@ project(ts_optimizer) find_package(Torch REQUIRED) +find_library(TORCH_PYTHON_LIBRARY torch_python PATHS "${TORCH_INSTALL_PREFIX}/lib") if (NOT TARGET pybind11) add_subdirectory(${CMAKE_SOURCE_DIR}/third_party/pybind11 pybind11) endif () @@ -10,7 +11,7 @@ endif () file(GLOB_RECURSE OPTIMIZER_SRCS *.cpp) pybind11_add_module(${PROJECT_NAME} ${OPTIMIZER_SRCS}) -target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES}) +target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES} ${TORCH_PYTHON_LIBRARY}) target_link_directories(${PROJECT_NAME} PRIVATE mmdeploy::torchscript_ops) set_target_properties( ${PROJECT_NAME} PROPERTIES LIBRARY_OUTPUT_DIRECTORY diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/bind.cpp b/csrc/mmdeploy/backend_ops/torchscript/optimizer/bind.cpp index 21a691f14..3b8bb0f63 100644 --- a/csrc/mmdeploy/backend_ops/torchscript/optimizer/bind.cpp +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/bind.cpp @@ -1,10 +1,14 @@ // Copyright (c) OpenMMLab. All rights reserved. #include +#include +#include #include #include "optimizer.h" +#include "passes/onnx/common_subgraph_elimination.h" #include "passes/onnx/flatten_cls_head.h" +#include "passes/onnx/fuse_select_assign.h" #include "passes/onnx/merge_shape_concate.h" #include "passes/onnx/onnx_peephole.h" @@ -33,6 +37,10 @@ PYBIND11_MODULE(ts_optimizer, m) { onnx_module.def("_jit_pass_merge_shape_concate", MergeShapeConcate, py::arg("graph")); onnx_module.def("_jit_pass_onnx_peephole", ONNXPeephole, py::arg("graph")); onnx_module.def("_jit_pass_flatten_cls_head", FlattenClsHead, py::arg("graph")); + onnx_module.def("_jit_pass_fuse_select_assign", FuseSelectAssign, py::arg("graph"), + py::arg("params")); + onnx_module.def("_jit_pass_common_subgraph_elimination", CommonSubgraphElimination, + py::arg("graph"), py::arg("params")); } } // namespace torch_jit diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/subgraph_matcher.cpp b/csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/subgraph_matcher.cpp index 97425aa5b..6f188c568 100644 --- a/csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/subgraph_matcher.cpp +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/subgraph_matcher.cpp @@ -125,7 +125,7 @@ bool SubgraphMatcher::SubgraphMatcherImpl::matchAttributes(const Node* n1, Node* "' did not match:\n", *n1, *n2); return false; } - std::vector n1is, n2is; + std::vector n1is, n2is; std::vector n1fs, n2fs; switch (n1->kindOf(attr_name)) { case AttributeKind::s: @@ -295,6 +295,8 @@ bool SubgraphMatcher::SubgraphMatcherImpl::matchesSubgraphFromAnchorNode(Node* a SubgraphMatcher::SubgraphMatcher(const Graph& pattern, MatchAttribute match_attribute) : impl_(new SubgraphMatcher::SubgraphMatcherImpl(pattern, match_attribute)) {} +SubgraphMatcher::~SubgraphMatcher() = default; + bool SubgraphMatcher::matchesSubgraphFromAnchorNode(Node* anchor) { return impl_->matchesSubgraphFromAnchorNode(anchor); } diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/subgraph_matcher.h b/csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/subgraph_matcher.h index 6629b598e..e2488e252 100644 --- a/csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/subgraph_matcher.h +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/subgraph_matcher.h @@ -17,6 +17,8 @@ class SubgraphMatcher { public: explicit SubgraphMatcher(const Graph& pattern, MatchAttribute match_attribute = TRY_MATCH); + ~SubgraphMatcher(); + bool matchesSubgraphFromAnchorNode(Node* anchor); /** \brief Return match map for nodes. */ @@ -27,7 +29,7 @@ class SubgraphMatcher { private: class SubgraphMatcherImpl; - std::unique_ptr impl_ = nullptr; + std::unique_ptr impl_; }; } // namespace torch_jit diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/common_subgraph_elimination.cpp b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/common_subgraph_elimination.cpp new file mode 100644 index 000000000..c6541e630 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/common_subgraph_elimination.cpp @@ -0,0 +1,138 @@ +// https://github.com/pytorch/pytorch/blob/v1.8.1/torch/csrc/jit/passes/common_subexpression_elimination.cpp +#include "common_subgraph_elimination.h" + +#include +#include + +namespace mmdeploy { +namespace torch_jit { + +using c10::Symbol; +using torch::jit::Block; +using torch::jit::EqualNode; +using torch::jit::HashNode; +using torch::jit::Node; +using torch::jit::Value; + +struct EqualNodeWithParams { + EqualNodeWithParams(std::unordered_map& params) : params_(params) {} + + bool operator()(const Node* lhs, const Node* rhs) const { + auto lhs_inputs = lhs->inputs(); + auto rhs_inputs = rhs->inputs(); + } + + private: + std::unordered_map& params_; +}; + +struct CommonSubexpressionEliminator { + using ParamMapType = std::unordered_map>; + CommonSubexpressionEliminator(std::shared_ptr graph, + std::unordered_map& params) + : graph_(std::move(graph)), params_(params) {} + + bool run(std::function parent_lookup_fn) { + ParamMapType param_map; + return run(graph_->block(), std::move(parent_lookup_fn), param_map); + } + + // The function implements common subexpression elimination. + // Since the nodes are visited in topological order, one pass is enough. + // returns true if CSE made changes to a graph + bool run(Block* block, std::function parent_lookup_fn, ParamMapType& param_map) { + std::unordered_set subexprs; + bool changed = false; + for (auto it = block->nodes().begin(); it != block->nodes().end(); ++it) { + auto node = *it; + + // check if inputs come from params(graph input) + auto node_inputs = node->inputs(); + for (auto input : node_inputs) { + if (input->node()->kind() == Symbol::fromQualString("prim::Param")) { + auto debug_name = input->debugName(); + + // check if input in params_ + if (params_.find(debug_name) == params_.end()) continue; + + // check if input is already visited. + if (param_map.find(debug_name) != param_map.end()) continue; + + // check if there is a param has same value with input + auto val = params_[debug_name]; + bool update_map = true; + for (auto kv : param_map) { + auto param_val = kv.second.first; + if (val.device() != param_val.device()) continue; + if (val.dtype() != param_val.dtype()) continue; + if (!val.equal(param_val)) continue; + input->replaceAllUsesWith(kv.second.second); + update_map = false; + break; + } + + // add input to param_map + if (update_map) { + param_map.emplace(debug_name, + std::make_pair(std::move(val), std::move(input))); + } + } + } + + if (!node->blocks().empty()) { + // Traverse sub-blocks. + for (auto block : node->blocks()) { + changed |= run( + block, + [&](Node* n) { + auto existing = subexprs.find(n); + if (existing != subexprs.end()) { + return *existing; + } + + return parent_lookup_fn(n); + }, + param_map); + } + + continue; + } + + // Check for CSE opportunities in the parent block. + auto parent_lookup = parent_lookup_fn(node); + auto g_out = node->owningGraph()->outputs(); + if (parent_lookup != nullptr) { + changed = true; + node->replaceAllUsesWith(parent_lookup); + it.destroyCurrent(); + continue; + } + + // Check whether the same subexpression already exists. + auto subit = subexprs.insert(node); + if (!subit.second) { + // Subexpression exists, replace the uses of node, and destroy it. + auto existing = *subit.first; + + changed = true; + node->replaceAllUsesWith(existing); + // Destroy the node. + it.destroyCurrent(); + } + } + + return changed; + } + + private: + std::shared_ptr graph_; + std::unordered_map& params_; +}; + +void CommonSubgraphElimination(std::shared_ptr& graph, + std::unordered_map& params) { + CommonSubexpressionEliminator cse(graph, params); + cse.run([](Node*) { return nullptr; }); +} +} // namespace torch_jit +} // namespace mmdeploy diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/common_subgraph_elimination.h b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/common_subgraph_elimination.h new file mode 100644 index 000000000..d90b98073 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/common_subgraph_elimination.h @@ -0,0 +1,20 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#ifndef _COMMON_SUBGRAPH_ELIMINATION_H_ +#define _COMMON_SUBGRAPH_ELIMINATION_H_ + +#include +namespace mmdeploy { +namespace torch_jit { +using torch::Tensor; +using torch::jit::Graph; + +// This pass is used eliminate the common subgraph. +// There are two main difference between the one in torch/csrc/jit/pass +// 1. AliasDb is not needed in ONNX model +// 2. params might also participated in the elimination +void CommonSubgraphElimination(std::shared_ptr& graph, + std::unordered_map& params); +} // namespace torch_jit +} // namespace mmdeploy + +#endif diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/flatten_cls_head.cpp b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/flatten_cls_head.cpp index 5c7082f6a..73f896541 100644 --- a/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/flatten_cls_head.cpp +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/flatten_cls_head.cpp @@ -67,7 +67,7 @@ static bool matchClsHead(const Match& match, const std::unordered_mapt(Symbol::attr("value")); if (ival.dim() != 0) return false; - auto ival_dataptr = ival.data_ptr(); + auto ival_dataptr = ival.data_ptr(); if (ival_dataptr[0] != 0) return false; } diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/fuse_select_assign.cpp b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/fuse_select_assign.cpp new file mode 100644 index 000000000..8dc584775 --- /dev/null +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/fuse_select_assign.cpp @@ -0,0 +1,163 @@ +#include "fuse_select_assign.h" + +#include + +#include "../../ir/subgraph_matcher.h" +#include "common_subgraph_elimination.h" +#include "torch/csrc/jit/ir/irparser.h" + +namespace mmdeploy { +namespace torch_jit { + +using c10::Symbol; +using torch::jit::Block; +using torch::jit::IValue; +using torch::jit::Node; + +bool RemoveBoolCast(Node* node) { + auto bottom_node = node->input()->node(); + if (bottom_node->kind() != Symbol::onnx("Greater") && + bottom_node->kind() != Symbol::onnx("Less")) { + return false; + } + node->output()->replaceAllUsesWith(bottom_node->output()); + return true; +} + +bool FuseSelectAssign(Node* node, std::unordered_map& params, + std::unordered_map& vmap, SubgraphMatcher& matcher) { + auto values_map = matcher.values_map(); + + auto cmp1 = values_map[vmap["cmp_1"]]->node(); + auto cmp2 = values_map[vmap["cmp_2"]]->node(); + if (cmp1 != cmp2) { + // cmp_1 == cmp_2, cmp in (Great, Less) + if (cmp1->kind() != cmp2->kind()) return false; + if (!(cmp1->kind() == Symbol::onnx("Greater") || cmp1->kind() == Symbol::onnx("Less"))) + return false; + + // check threshold + Node* cmps[] = {cmp1, cmp2}; + float thres = 0.0f; + Node* x = nullptr; + for (int i = 0; i < 2; ++i) { + auto cmp = cmps[i]; + auto threshold = cmp->inputs()[1]->node(); + if (threshold->kind() != Symbol::onnx("Constant")) return false; + auto thres_val = threshold->t(Symbol::attr("value")); + if (i == 0) { + thres = thres_val.data_ptr()[0]; + x = cmp->inputs()[0]->node(); + } else { + float tmp_val = thres_val.data_ptr()[0]; + if (fabs(thres - tmp_val) > 1e-10) { + return false; + } + if (x != cmp->inputs()[0]->node()) { + return false; + } + } + } + } + + { + // check shape of reshape + Node* shape = values_map[vmap["reshape_1_shape"]]->node(); + auto shape_val = shape->t(Symbol::attr("value")); + if (shape_val.dim() != 1) return false; + if (shape_val.data_ptr()[0] != -1) return false; + } + + { + // check transpose + Node* trans[] = {values_map[vmap["trans_1"]]->node(), values_map[vmap["trans_2"]]->node()}; + for (auto tran : trans) { + auto tran_perm = tran->is(Symbol::attr("perm")); + if (tran_perm.size() != 2) return false; + if (tran_perm[0] != 1 || tran_perm[1] != 0) return false; + } + } + + { + // check gather indice + Node* gather_inds = values_map[vmap["gather_inds_2"]]->node(); + auto inds_val = gather_inds->t(Symbol::attr("value")); + if (inds_val.dim() != 0) return false; + if (inds_val.data_ptr()[0] != 0) return false; + } + + { + // check slice start + Node* slice = values_map[vmap["slice_2"]]->node(); + auto start_name = slice->inputs()[1]->debugName(); + auto start_val = params[start_name]; + if (start_val.dim() != 1) return false; + if (start_val.data_ptr()[0] != 0) return false; + } + + // create new node + auto graph = node->owningGraph(); + auto z = values_map[vmap["z"]]; + auto y = values_map[vmap["y"]]; + auto where_node = graph->create(Symbol::onnx("Where"), {cmp1->output(), z, y}); + where_node->insertBefore(node); + where_node->output()->copyMetadata(node->output()); + node->output()->replaceAllUsesWith(where_node->output()); + return true; +} + +void FuseSelectAssign(Block* block, std::unordered_map& params, + std::unordered_map& vmap, SubgraphMatcher& matcher) { + auto graph = block->owningGraph(); + auto it = block->nodes().begin(); + while (it != block->nodes().end()) { + auto node = *it; + ++it; + for (auto block : node->blocks()) { + FuseSelectAssign(block, params, vmap, matcher); + } + + if (node->kind() == Symbol::onnx("Cast") && node->i(Symbol::attr("to")) == 9) { + RemoveBoolCast(node); + } else if (matcher.matchesSubgraphFromAnchorNode(node)) { + FuseSelectAssign(node, params, vmap, matcher); + } + } +} + +void FuseSelectAssign(std::shared_ptr& graph, + std::unordered_map& params) { + // cse before search + CommonSubgraphElimination(graph, params); + + std::string pattern_str = R"IR( + graph(%y, %z, %cmp_1, %cmp_2, %start, %axes, %shape_2): + %nz_1 = onnx::NonZero(%cmp_1) + %trans_1 = onnx::Transpose(%nz_1) + %gather_1 = onnx::GatherND(%z, %trans_1) + %reshape_1_shape = onnx::Constant() + %reshape_1 = onnx::Reshape(%gather_1, %reshape_1_shape) + %expand_2 = onnx::Expand(%cmp_2, %shape_2) + %nz_2 = onnx::NonZero(%expand_2) + %trans_2 = onnx::Transpose(%nz_2) + %trans_shape_2 = onnx::Shape(%trans_2) + %gather_inds_2 = onnx::Constant() + %gather_2 = onnx::Gather(%trans_shape_2, %gather_inds_2) + %unsqueeze_2 = onnx::Unsqueeze(%gather_2) + %slice_2 = onnx::Slice(%reshape_1, %start, %unsqueeze_2, %axes) + %scatter_2 = onnx::ScatterND(%y, %trans_2, %slice_2) + return (%scatter_2) + )IR"; + + Graph pattern; + std::unordered_map vmap; + torch::jit::parseIR(pattern_str, &pattern, vmap); + + SubgraphMatcher matcher(pattern, MatchAttribute::NO_MATCH); + FuseSelectAssign(graph->block(), params, vmap, matcher); + torch::jit::EliminateDeadCode( + graph->block(), true, + torch::jit::DCESideEffectPolicy::ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS); +} +} // namespace torch_jit +} // namespace mmdeploy diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/fuse_select_assign.h b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/fuse_select_assign.h new file mode 100644 index 000000000..afa0dc56d --- /dev/null +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/fuse_select_assign.h @@ -0,0 +1,17 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#ifndef _FUSE_SELECT_ASSIGN_H_ +#define _FUSE_SELECT_ASSIGN_H_ + +#include +namespace mmdeploy { +namespace torch_jit { +using torch::Tensor; +using torch::jit::Graph; + +// this pass is used to fuse y[x>thres] = z[x>thres] +void FuseSelectAssign(std::shared_ptr& graph, + std::unordered_map& params); +} // namespace torch_jit +} // namespace mmdeploy + +#endif diff --git a/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/merge_shape_concate.cpp b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/merge_shape_concate.cpp index 47fc8c205..ad0551fa1 100644 --- a/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/merge_shape_concate.cpp +++ b/csrc/mmdeploy/backend_ops/torchscript/optimizer/passes/onnx/merge_shape_concate.cpp @@ -18,7 +18,7 @@ using torch::jit::Value; void MergeShapeConcate(Node* node) { auto inputs = node->inputs(); - std::vector gather_value; + std::vector gather_value; Value* shape_from = nullptr; std::vector node_to_remove{node}; @@ -54,7 +54,7 @@ void MergeShapeConcate(Node* node) { if (!is_kind(constant_node, "onnx::Constant")) return; auto gather_indices_val = constant_node->t(Symbol::attr("value")); - long* data_ptr = gather_indices_val.data_ptr(); + int64_t* data_ptr = gather_indices_val.data_ptr(); if (gather_indices_val.dim() == 0) { gather_value.push_back(data_ptr[0]); } else { diff --git a/csrc/mmdeploy/codebase/common.h b/csrc/mmdeploy/codebase/common.h index 3a965a178..870cab02f 100644 --- a/csrc/mmdeploy/codebase/common.h +++ b/csrc/mmdeploy/codebase/common.h @@ -45,7 +45,8 @@ class CodebaseCreator : public Creator { auto postprocess_type = cfg[key].get(); auto creator = Registry::Get().GetCreator(postprocess_type); if (creator == nullptr) { - MMDEPLOY_ERROR("could not found entry '{}' in {}", postprocess_type, Tag::name); + MMDEPLOY_ERROR("Could not found entry '{}' in {}. Available components: {}", postprocess_type, + Tag::name, Registry::Get().List()); throw_exception(eEntryNotFound); } return creator->Create(cfg); diff --git a/csrc/mmdeploy/codebase/mmocr/contour_expand.cpp b/csrc/mmdeploy/codebase/mmocr/contour_expand.cpp index 4c9fe1e02..4199a964c 100644 --- a/csrc/mmdeploy/codebase/mmocr/contour_expand.cpp +++ b/csrc/mmdeploy/codebase/mmocr/contour_expand.cpp @@ -9,7 +9,7 @@ #include #include "mmdeploy/core/tensor.h" -#include "opencv2/opencv.hpp" +#include "opencv2/imgproc/imgproc.hpp" namespace mmdeploy::mmocr { diff --git a/csrc/mmdeploy/codebase/mmocr/cpu/CMakeLists.txt b/csrc/mmdeploy/codebase/mmocr/cpu/CMakeLists.txt index 8d0f60076..38f19f4ae 100644 --- a/csrc/mmdeploy/codebase/mmocr/cpu/CMakeLists.txt +++ b/csrc/mmdeploy/codebase/mmocr/cpu/CMakeLists.txt @@ -5,6 +5,9 @@ project(mmdeploy_mmocr_cpu_impl CXX) if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) add_library(${PROJECT_NAME} OBJECT dbnet.cpp panet.cpp psenet.cpp) set_target_properties(${PROJECT_NAME} PROPERTIES POSITION_INDEPENDENT_CODE 1) + if (NOT (MMDEPLOY_SHARED_LIBS OR MSVC)) + target_compile_options(${PROJECT_NAME} PRIVATE $<$:-fvisibility=hidden>) + endif () target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_opencv_utils mmdeploy::core) diff --git a/csrc/mmdeploy/codebase/mmocr/cpu/dbnet.cpp b/csrc/mmdeploy/codebase/mmocr/cpu/dbnet.cpp index 8eaec861a..a963c3908 100644 --- a/csrc/mmdeploy/codebase/mmocr/cpu/dbnet.cpp +++ b/csrc/mmdeploy/codebase/mmocr/cpu/dbnet.cpp @@ -3,7 +3,6 @@ #include "mmdeploy/codebase/mmocr/dbnet.h" #include "mmdeploy/core/utils/device_utils.h" -#include "opencv2/imgcodecs.hpp" #include "opencv2/imgproc.hpp" namespace mmdeploy::mmocr { diff --git a/csrc/mmdeploy/codebase/mmocr/cuda/dbnet.cpp b/csrc/mmdeploy/codebase/mmocr/cuda/dbnet.cpp index a96c5de53..c91c760f9 100644 --- a/csrc/mmdeploy/codebase/mmocr/cuda/dbnet.cpp +++ b/csrc/mmdeploy/codebase/mmocr/cuda/dbnet.cpp @@ -6,7 +6,6 @@ #include "mmdeploy/codebase/mmocr/cuda/utils.h" #include "mmdeploy/core/utils/device_utils.h" #include "mmdeploy/device/cuda/cuda_device.h" -#include "opencv2/imgcodecs.hpp" #include "opencv2/imgproc.hpp" namespace mmdeploy::mmocr { diff --git a/csrc/mmdeploy/codebase/mmocr/dbnet.cpp b/csrc/mmdeploy/codebase/mmocr/dbnet.cpp index 5b9d2f0e2..0f3255317 100644 --- a/csrc/mmdeploy/codebase/mmocr/dbnet.cpp +++ b/csrc/mmdeploy/codebase/mmocr/dbnet.cpp @@ -35,7 +35,9 @@ class DBHead : public MMOCR { auto platform = Platform(device_.platform_id()).GetPlatformName(); auto creator = Registry::Get().GetCreator(platform); if (!creator) { - MMDEPLOY_ERROR("DBHead: implementation for platform \"{}\" not found", platform); + MMDEPLOY_ERROR( + "DBHead: implementation for platform \"{}\" not found. Available platforms: {}", platform, + Registry::Get().List()); throw_exception(eEntryNotFound); } impl_ = creator->Create(nullptr); diff --git a/csrc/mmdeploy/codebase/mmocr/panet.cpp b/csrc/mmdeploy/codebase/mmocr/panet.cpp index 042d088be..57c71d097 100644 --- a/csrc/mmdeploy/codebase/mmocr/panet.cpp +++ b/csrc/mmdeploy/codebase/mmocr/panet.cpp @@ -3,13 +3,13 @@ #include "mmdeploy/codebase/mmocr/panet.h" #include -#include #include "mmdeploy/codebase/mmocr/mmocr.h" #include "mmdeploy/core/device.h" #include "mmdeploy/core/registry.h" #include "mmdeploy/core/serialization.h" #include "mmdeploy/core/utils/device_utils.h" +#include "opencv2/imgproc/imgproc.hpp" namespace mmdeploy { @@ -37,7 +37,9 @@ class PANHead : public MMOCR { auto platform = Platform(device_.platform_id()).GetPlatformName(); auto creator = Registry::Get().GetCreator(platform); if (!creator) { - MMDEPLOY_ERROR("PANHead: implementation for platform \"{}\" not found", platform); + MMDEPLOY_ERROR( + "PANHead: implementation for platform \"{}\" not found. Available platforms: {}", + platform, Registry::Get().List()); throw_exception(eEntryNotFound); } impl_ = creator->Create(nullptr); diff --git a/csrc/mmdeploy/codebase/mmocr/pixel_group.cpp b/csrc/mmdeploy/codebase/mmocr/pixel_group.cpp index 36239de21..def430ad5 100644 --- a/csrc/mmdeploy/codebase/mmocr/pixel_group.cpp +++ b/csrc/mmdeploy/codebase/mmocr/pixel_group.cpp @@ -8,7 +8,7 @@ #include #include "mmdeploy/core/tensor.h" -#include "opencv2/opencv.hpp" +#include "opencv2/imgproc/imgproc.hpp" namespace mmdeploy::mmocr { diff --git a/csrc/mmdeploy/codebase/mmocr/psenet.cpp b/csrc/mmdeploy/codebase/mmocr/psenet.cpp index 19ab31817..4860b73f4 100644 --- a/csrc/mmdeploy/codebase/mmocr/psenet.cpp +++ b/csrc/mmdeploy/codebase/mmocr/psenet.cpp @@ -3,13 +3,13 @@ #include "mmdeploy/codebase/mmocr/psenet.h" #include -#include #include "mmdeploy/codebase/mmocr/mmocr.h" #include "mmdeploy/core/device.h" #include "mmdeploy/core/registry.h" #include "mmdeploy/core/serialization.h" #include "mmdeploy/core/utils/device_utils.h" +#include "opencv2/imgproc/imgproc.hpp" namespace mmdeploy { namespace mmocr { @@ -34,7 +34,9 @@ class PSEHead : public MMOCR { auto platform = Platform(device_.platform_id()).GetPlatformName(); auto creator = Registry::Get().GetCreator(platform); if (!creator) { - MMDEPLOY_ERROR("PSEHead: implementation for platform \"{}\" not found", platform); + MMDEPLOY_ERROR( + "PSEHead: implementation for platform \"{}\" not found. Available platforms: {}", + platform, Registry::Get().List()); throw_exception(eEntryNotFound); } impl_ = creator->Create(nullptr); diff --git a/csrc/mmdeploy/codebase/mmocr/warp.cpp b/csrc/mmdeploy/codebase/mmocr/warp.cpp index a8107a244..6f231662f 100644 --- a/csrc/mmdeploy/codebase/mmocr/warp.cpp +++ b/csrc/mmdeploy/codebase/mmocr/warp.cpp @@ -8,6 +8,7 @@ #include "mmdeploy/core/utils/formatter.h" #include "mmdeploy/core/value.h" #include "mmdeploy/experimental/module_adapter.h" +#include "opencv2/imgproc/imgproc.hpp" #include "opencv_utils.h" namespace mmdeploy { @@ -40,9 +41,9 @@ class WarpBoxes { auto w = static_cast(std::max(e0, e1)); auto h = static_cast(std::min(e0, e1)); std::vector dst{{0, 0}, {w, 0}, {w, h}, {0, h}}; - auto m = getAffineTransform(src.data(), dst.data()); + auto m = cv::getAffineTransform(src.data(), dst.data()); cv::Mat warped; - warpAffine(img, warped, m, {static_cast(w), static_cast(h)}); + cv::warpAffine(img, warped, m, {static_cast(w), static_cast(h)}); return warped; } diff --git a/csrc/mmdeploy/codebase/mmpose/keypoints_from_heatmap.cpp b/csrc/mmdeploy/codebase/mmpose/keypoints_from_heatmap.cpp index 7477836e1..81df42adb 100644 --- a/csrc/mmdeploy/codebase/mmpose/keypoints_from_heatmap.cpp +++ b/csrc/mmdeploy/codebase/mmpose/keypoints_from_heatmap.cpp @@ -1,7 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. #include -#include #include #include "mmdeploy/core/device.h" diff --git a/csrc/mmdeploy/codebase/mmpose/keypoints_from_regression.cpp b/csrc/mmdeploy/codebase/mmpose/keypoints_from_regression.cpp index 58fd4c972..6104be047 100644 --- a/csrc/mmdeploy/codebase/mmpose/keypoints_from_regression.cpp +++ b/csrc/mmdeploy/codebase/mmpose/keypoints_from_regression.cpp @@ -1,6 +1,5 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include #include #include "mmdeploy/core/device.h" diff --git a/csrc/mmdeploy/codebase/mmrotate/oriented_object_detection.cpp b/csrc/mmdeploy/codebase/mmrotate/oriented_object_detection.cpp index 45bfd46b6..161d91a08 100644 --- a/csrc/mmdeploy/codebase/mmrotate/oriented_object_detection.cpp +++ b/csrc/mmdeploy/codebase/mmrotate/oriented_object_detection.cpp @@ -1,6 +1,5 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include #include #include "mmdeploy/core/device.h" diff --git a/csrc/mmdeploy/core/CMakeLists.txt b/csrc/mmdeploy/core/CMakeLists.txt index 68843cd3c..932993620 100644 --- a/csrc/mmdeploy/core/CMakeLists.txt +++ b/csrc/mmdeploy/core/CMakeLists.txt @@ -14,6 +14,9 @@ else () set(MMDEPLOY_SPDLOG_DIR ${CMAKE_SOURCE_DIR}/third_party/spdlog) add_subdirectory(${MMDEPLOY_SPDLOG_DIR} ${CMAKE_CURRENT_BINARY_DIR}/spdlog EXCLUDE_FROM_ALL) set_target_properties(spdlog PROPERTIES POSITION_INDEPENDENT_CODE ON) + if (NOT (MMDEPLOY_SHARED_LIBS OR MSVC)) + target_compile_options(spdlog PRIVATE $<$:-fvisibility=hidden>) + endif () set(SPDLOG_LIB spdlog::spdlog) mmdeploy_export(spdlog) if (MMDEPLOY_BUILD_SDK_CXX_API) @@ -62,7 +65,7 @@ if (MMDEPLOY_BUILD_SDK_CXX_API) endif () target_link_libraries(${PROJECT_NAME} PUBLIC ${SPDLOG_LIB}) -if (NOT (MSVC OR ANDROID)) +if (NOT (MSVC OR ANDROID OR APPLE)) target_link_libraries(${PROJECT_NAME} PUBLIC stdc++fs) endif () add_library(mmdeploy::core ALIAS ${PROJECT_NAME}) diff --git a/csrc/mmdeploy/core/model.cpp b/csrc/mmdeploy/core/model.cpp index 66dae5634..6a0be43f0 100644 --- a/csrc/mmdeploy/core/model.cpp +++ b/csrc/mmdeploy/core/model.cpp @@ -35,13 +35,13 @@ Result Model::Init(const std::string& model_path) { } OUTCOME_TRY(auto meta, impl->ReadMeta()); - MMDEPLOY_INFO("{} successfully load sdk model {}", entry.name, model_path); + MMDEPLOY_INFO("{} successfully load model {}", entry.name, model_path); impl_ = std::move(impl); meta_ = std::move(meta); return success(); } - MMDEPLOY_ERROR("no ModelImpl can read sdk_model {}", model_path); + MMDEPLOY_ERROR("no ModelImpl can read model {}", model_path); return Status(eNotSupported); } @@ -56,7 +56,7 @@ Result Model::Init(const void* buffer, size_t size) { } OUTCOME_TRY(auto meta, impl->ReadMeta()); - MMDEPLOY_INFO("successfully load sdk model {}", entry.name); + MMDEPLOY_INFO("Successfully load model {}", entry.name); impl_ = std::move(impl); meta_ = std::move(meta); return success(); diff --git a/csrc/mmdeploy/core/mpl/span.h b/csrc/mmdeploy/core/mpl/span.h index 16f719aa1..7aa630bf0 100644 --- a/csrc/mmdeploy/core/mpl/span.h +++ b/csrc/mmdeploy/core/mpl/span.h @@ -58,6 +58,13 @@ class Span { typename = std::void_t()))>> constexpr Span(U& v) : data_(std::data(v)), size_(std::size(v)) {} + template ()))>, + typename = std::void_t()))>> + constexpr Span(const U& v) : data_(std::data(v)), size_(std::size(v)) {} + + template + constexpr Span(std::initializer_list il) noexcept : Span(il.begin(), il.size()) {} + template constexpr Span(element_type (&arr)[N]) noexcept : data_(std::data(arr)), size_(N) {} @@ -79,9 +86,9 @@ class Span { constexpr Span last(size_type count) const { return {end() - count, count}; } constexpr Span subspan(size_type offset, size_type count = -1) const { if (count == -1) { - return {begin() + offset, end()}; + return Span(begin() + offset, end()); } else { - return {begin() + offset, begin() + offset + count}; + return Span(begin() + offset, begin() + offset + count); } } @@ -113,6 +120,9 @@ Span(T (&)[N]) -> Span; template ().data())>, typename = std::void_t().size())>> Span(U& v) -> Span::value_type>; + +template +Span(std::initializer_list) -> Span; // clang-format on } // namespace mmdeploy diff --git a/csrc/mmdeploy/core/registry.cpp b/csrc/mmdeploy/core/registry.cpp index 4d404138a..78a341f6e 100644 --- a/csrc/mmdeploy/core/registry.cpp +++ b/csrc/mmdeploy/core/registry.cpp @@ -8,7 +8,7 @@ Registry::Registry() = default; Registry::~Registry() = default; -bool Registry::AddCreator(Creator &creator) { +bool Registry::AddCreator(Creator& creator) { MMDEPLOY_DEBUG("Adding creator: {}", creator.GetName()); auto key = creator.GetName(); if (entries_.find(key) == entries_.end()) { @@ -26,7 +26,7 @@ bool Registry::AddCreator(Creator &creator) { return true; } -Creator *Registry::GetCreator(const std::string &type, int version) { +Creator* Registry::GetCreator(const std::string& type, int version) { auto iter = entries_.find(type); if (iter == entries_.end()) { return nullptr; @@ -43,4 +43,12 @@ Creator *Registry::GetCreator(const std::string &type, int version) return nullptr; } +std::vector Registry::List() { + std::vector list; + for (const auto& [name, _] : entries_) { + list.push_back(name); + } + return list; +} + } // namespace mmdeploy diff --git a/csrc/mmdeploy/core/registry.h b/csrc/mmdeploy/core/registry.h index bde878a35..a6bcc1cf6 100644 --- a/csrc/mmdeploy/core/registry.h +++ b/csrc/mmdeploy/core/registry.h @@ -38,7 +38,7 @@ template <> class Creator { public: virtual ~Creator() = default; - virtual const char *GetName() const = 0; + virtual const char* GetName() const = 0; virtual int GetVersion() const { return 0; } }; @@ -48,7 +48,7 @@ class Creator : public Creator { using ReturnType = detail::get_return_type_t; public: - virtual ReturnType Create(const Value &args) = 0; + virtual ReturnType Create(const Value& args) = 0; }; template @@ -61,25 +61,27 @@ class MMDEPLOY_API Registry { ~Registry(); - bool AddCreator(Creator &creator); + bool AddCreator(Creator& creator); - Creator *GetCreator(const std::string &type, int version = 0); + Creator* GetCreator(const std::string& type, int version = 0); + + std::vector List(); private: - std::multimap *> entries_; + std::multimap*> entries_; }; template class Registry : public Registry { public: - bool AddCreator(Creator &creator) { return Registry::AddCreator(creator); } + bool AddCreator(Creator& creator) { return Registry::AddCreator(creator); } - Creator *GetCreator(const std::string &type, int version = 0) { + Creator* GetCreator(const std::string& type, int version = 0) { auto creator = Registry::GetCreator(type, version); - return static_cast *>(creator); + return static_cast*>(creator); } - static Registry &Get(); + static Registry& Get(); private: Registry() = default; @@ -98,11 +100,11 @@ class Registerer { #define MMDEPLOY_DECLARE_REGISTRY(EntryType) \ template <> \ - Registry &Registry::Get(); + Registry& Registry::Get(); #define MMDEPLOY_DEFINE_REGISTRY(EntryType) \ template <> \ - MMDEPLOY_EXPORT Registry &Registry::Get() { \ + MMDEPLOY_EXPORT Registry& Registry::Get() { \ static Registry v; \ return v; \ } @@ -115,10 +117,10 @@ class Registerer { public: \ module_name##Creator() = default; \ ~module_name##Creator() = default; \ - const char *GetName() const override { return #module_name; } \ + const char* GetName() const override { return #module_name; } \ int GetVersion() const override { return version; } \ \ - std::unique_ptr Create(const Value &value) override { \ + std::unique_ptr Create(const Value& value) override { \ return std::make_unique(value); \ } \ }; \ diff --git a/csrc/mmdeploy/core/status_code.cpp b/csrc/mmdeploy/core/status_code.cpp index 1654f4caf..3747960b5 100644 --- a/csrc/mmdeploy/core/status_code.cpp +++ b/csrc/mmdeploy/core/status_code.cpp @@ -3,6 +3,7 @@ #include "mmdeploy/core/status_code.h" #include "mmdeploy/core/logger.h" +#include "mmdeploy/core/utils/source_location.h" namespace mmdeploy { @@ -21,7 +22,11 @@ string_ref Status::message() const { std::string ret; try { #if MMDEPLOY_STATUS_USE_SOURCE_LOCATION +#if MMDEPLOY_HAS_SOURCE_LOCATION ret = fmt::format("{} ({}) @ {}:{}", to_string(ec), (int32_t)ec, file, line); +#else + ret = fmt::format("{} ({})", to_string(ec), (int32_t)ec); +#endif #elif MMDEPLOY_STATUS_USE_STACKTRACE ret = fmt::format("{} ({}), stacktrace:\n{}", to_string(ec), (int32_t)ec, st.to_string()); #else diff --git a/csrc/mmdeploy/core/types.h b/csrc/mmdeploy/core/types.h index 3a1daf9b5..32e6c745a 100644 --- a/csrc/mmdeploy/core/types.h +++ b/csrc/mmdeploy/core/types.h @@ -37,6 +37,15 @@ class NonCopyable { NonCopyable& operator=(const NonCopyable&) = delete; }; +class NonMovable { + public: + NonMovable() = default; + NonMovable(const NonCopyable&) = delete; + NonMovable& operator=(const NonCopyable&) = delete; + NonMovable(NonMovable&&) noexcept = delete; + NonMovable& operator=(NonMovable&&) noexcept = delete; +}; + } // namespace mmdeploy #endif // !CORE_TYPES_H diff --git a/csrc/mmdeploy/core/utils/filesystem.h b/csrc/mmdeploy/core/utils/filesystem.h index 7aca6a8d8..3f2a3286c 100644 --- a/csrc/mmdeploy/core/utils/filesystem.h +++ b/csrc/mmdeploy/core/utils/filesystem.h @@ -3,8 +3,7 @@ #ifndef MMDEPLOY_CSRC_CORE_UTILS_FILESYSTEM_H_ #define MMDEPLOY_CSRC_CORE_UTILS_FILESYSTEM_H_ -// TODO: what about clang? -#if __GNUC__ >= 8 || _MSC_VER +#if __GNUC__ >= 8 || _MSC_VER || __clang_major__ >= 7 #include namespace fs = std::filesystem; #else diff --git a/csrc/mmdeploy/device/cuda/cuda_device.cpp b/csrc/mmdeploy/device/cuda/cuda_device.cpp index a29bd6f95..83129320c 100644 --- a/csrc/mmdeploy/device/cuda/cuda_device.cpp +++ b/csrc/mmdeploy/device/cuda/cuda_device.cpp @@ -58,7 +58,7 @@ Allocator CreateDefaultAllocator() { AllocatorImplPtr allocator = std::make_shared(); allocator = std::make_shared(allocator, -1, .5); allocator = std::make_shared(allocator); - MMDEPLOY_INFO("Default CUDA allocator initialized"); + MMDEPLOY_DEBUG("Default CUDA allocator initialized"); return Access::create(allocator); } diff --git a/csrc/mmdeploy/execution/bulk.h b/csrc/mmdeploy/execution/bulk.h index 32e4d2be3..aeeeb40c9 100644 --- a/csrc/mmdeploy/execution/bulk.h +++ b/csrc/mmdeploy/execution/bulk.h @@ -36,7 +36,7 @@ struct _Receiver::type { template friend void tag_invoke(set_value_t, type&& self, As&&... as) noexcept { - MMDEPLOY_WARN("fallback Bulk implementation"); + MMDEPLOY_DEBUG("fallback Bulk implementation"); for (Shape i = 0; i < self.shape_; ++i) { self.func_(i, as...); } diff --git a/csrc/mmdeploy/execution/expand.h b/csrc/mmdeploy/execution/expand.h index d9a278cbf..734f9946a 100644 --- a/csrc/mmdeploy/execution/expand.h +++ b/csrc/mmdeploy/execution/expand.h @@ -11,66 +11,39 @@ namespace mmdeploy { namespace _expand { -template -struct _Operation { - struct type; -}; -template -using operation_t = typename _Operation>::type; - template struct _Receiver { - struct type; + struct type { + Receiver receiver_; + template + friend void tag_invoke(set_value_t, type&& self, Tuple&& tup) noexcept { + std::apply( + [&](auto&&... args) { + SetValue((Receiver &&) self.receiver_, (decltype(args)&&)args...); + }, + (Tuple &&) tup); + } + }; }; template -using receiver_t = typename _Receiver::type; - -template -struct _Receiver::type { - operation_t* op_state_; - - template - friend void tag_invoke(set_value_t, type&& self, Tuple&& tup) noexcept { - std::apply( - [&](auto&&... args) { - SetValue((Receiver &&) self.op_state_->receiver_, (decltype(args)&&)args...); - }, - (Tuple &&) tup); - } -}; - -template -struct _Operation::type { - connect_result_t> op_state2_; - Receiver receiver_; - - template - type(Sender2&& sender, Receiver&& receiver) - : op_state2_(Connect((Sender2 &&) sender, receiver_t{this})), - receiver_((Receiver &&) receiver) {} - - friend void tag_invoke(start_t, type& op_state) { Start(op_state.op_state2_); } -}; +using receiver_t = typename _Receiver>::type; template struct _Sender { - struct type; + struct type { + using value_types = std::tuple_element_t<0, completion_signatures_of_t>; + Sender sender_; + + template = true> + friend auto tag_invoke(connect_t, Self&& self, Receiver&& receiver) { + return Connect(((Self &&) self).sender_, + receiver_t{(Receiver &&) receiver}); + } + }; }; template using sender_t = typename _Sender>::type; -template -struct _Sender::type { - using value_types = std::tuple_element_t<0, completion_signatures_of_t>; - Sender sender_; - - template = true> - friend auto tag_invoke(connect_t, Self&& self, Receiver&& receiver) - -> operation_t { - return operation_t(((Self &&) self).sender_, (Receiver &&) receiver); - } -}; - struct expand_t { template , int> = 0> auto operator()(Sender&& sender) const { diff --git a/csrc/mmdeploy/execution/schedulers/dynamic_batch_scheduler.h b/csrc/mmdeploy/execution/schedulers/dynamic_batch_scheduler.h index 564048b3f..bb439e09e 100644 --- a/csrc/mmdeploy/execution/schedulers/dynamic_batch_scheduler.h +++ b/csrc/mmdeploy/execution/schedulers/dynamic_batch_scheduler.h @@ -77,7 +77,7 @@ struct Context : context_base_t { delay_(scheduler_.timeout_), timer_(scheduler_.timer_) {} - ~Context() { MMDEPLOY_INFO("~Context()"); } + ~Context() { MMDEPLOY_DEBUG("~Context()"); } using _operation_t = operation_t; diff --git a/csrc/mmdeploy/execution/schedulers/schedulers.cpp b/csrc/mmdeploy/execution/schedulers/schedulers.cpp index 5773b9bd3..df0135eeb 100644 --- a/csrc/mmdeploy/execution/schedulers/schedulers.cpp +++ b/csrc/mmdeploy/execution/schedulers/schedulers.cpp @@ -30,7 +30,7 @@ Scheduler CreateFromContext(std::unique_ptr context) { using EraseType = _type_erased::TypeErasedSchedulerImpl; auto sched = new EraseType(context->GetScheduler()); return Scheduler{std::shared_ptr( - sched, [context = std::move(context)](EraseType* p) { delete p; })}; + sched, [context = std::shared_ptr(std::move(context))](EraseType* p) { delete p; })}; } } // namespace diff --git a/csrc/mmdeploy/graph/common.h b/csrc/mmdeploy/graph/common.h index 1b8dd9e5c..64a6e94d3 100644 --- a/csrc/mmdeploy/graph/common.h +++ b/csrc/mmdeploy/graph/common.h @@ -26,16 +26,17 @@ inline std::true_type Check(T&&) { template ::ReturnType> inline Result CreateFromRegistry(const Value& config, const char* key = "type") { - MMDEPLOY_INFO("config: {}", config); + MMDEPLOY_DEBUG("config: {}", config); auto type = config[key].get(); auto creator = Registry::Get().GetCreator(type); if (!creator) { - MMDEPLOY_ERROR("failed to find module creator: {}", type); + MMDEPLOY_ERROR("Failed to find creator: {}. Available: {}", type, + Registry::Get().List()); return Status(eEntryNotFound); } auto inst = creator->Create(config); if (!Check(inst)) { - MMDEPLOY_ERROR("failed to create module: {}", type); + MMDEPLOY_ERROR("Failed to create module: {}, config: {}", type, config); return Status(eFail); } return std::move(inst); diff --git a/csrc/mmdeploy/graph/pipeline.cpp b/csrc/mmdeploy/graph/pipeline.cpp index dfa098f24..15f079c54 100644 --- a/csrc/mmdeploy/graph/pipeline.cpp +++ b/csrc/mmdeploy/graph/pipeline.cpp @@ -125,7 +125,6 @@ Result> PipelineParser::Parse(const Value& config) { use_count_.resize(size + 1); - // MMDEPLOY_INFO("pipeline->inputs: {}", pipeline->inputs()); OUTCOME_TRY(UpdateOutputCoords(static_cast(size), pipeline->inputs())); for (auto task_config : task_configs) { auto index = static_cast(nodes.size()); @@ -143,7 +142,7 @@ Result> PipelineParser::Parse(const Value& config) { OUTCOME_TRY(UpdateOutputCoords(index, node->outputs())); nodes.push_back(std::move(node)); } else { - MMDEPLOY_ERROR("could not create {}: {}", name, type); + MMDEPLOY_ERROR("Could not create {}: {}", type, name); return Status(eFail); } } @@ -163,7 +162,6 @@ Result> PipelineParser::Parse(const Value& config) { } Result> PipelineParser::GetInputCoords(const vector& names) { - // MMDEPLOY_INFO("GetInputCoords: {}", names); vector ret; ret.reserve(names.size()); for (int i = 0; i < names.size(); ++i) { @@ -192,7 +190,6 @@ Result PipelineParser::UpdateOutputCoords(int index, const vector& MMDEPLOY_ERROR("duplicate output: ", output); return Status(eNotSupported); } else { - // MMDEPLOY_ERROR("insert: {}", output); output_name_to_coords_.insert({output, {index, i}}); } } @@ -202,9 +199,12 @@ Result PipelineParser::UpdateOutputCoords(int index, const vector& class PipelineCreator : public Creator { public: const char* GetName() const override { return "Pipeline"; } - int GetVersion() const override { return 0; } std::unique_ptr Create(const Value& value) override { - return PipelineParser{}.Parse(value).value(); + try { + return PipelineParser{}.Parse(value).value(); + } catch (...) { + } + return nullptr; } }; diff --git a/csrc/mmdeploy/graph/task.cpp b/csrc/mmdeploy/graph/task.cpp index 41f1ff69d..98a4ad81c 100644 --- a/csrc/mmdeploy/graph/task.cpp +++ b/csrc/mmdeploy/graph/task.cpp @@ -72,9 +72,12 @@ Result> TaskParser::Parse(const Value& config) { class TaskCreator : public Creator { public: const char* GetName() const override { return "Task"; } - int GetVersion() const override { return 0; } std::unique_ptr Create(const Value& value) override { - return TaskParser::Parse(value).value(); + try { + return TaskParser::Parse(value).value(); + } catch (...) { + } + return nullptr; } }; diff --git a/csrc/mmdeploy/model/CMakeLists.txt b/csrc/mmdeploy/model/CMakeLists.txt index 547bcb79e..70e0577e1 100644 --- a/csrc/mmdeploy/model/CMakeLists.txt +++ b/csrc/mmdeploy/model/CMakeLists.txt @@ -10,7 +10,8 @@ endif () foreach (MODEL_NAME ${MODEL_NAMES}) set(TARGET_MODEL_NAME mmdeploy_${MODEL_NAME}) mmdeploy_add_module(${TARGET_MODEL_NAME} ${MODEL_NAME}_impl.cpp) - if (NOT (MSVC OR ANDROID)) + # TODO: stdc++fs only exists in libstdc++ + if (NOT (MSVC OR ANDROID OR APPLE)) target_link_libraries(${TARGET_MODEL_NAME} PUBLIC stdc++fs) endif () if (${MODEL_NAME} STREQUAL "zip_model") diff --git a/csrc/mmdeploy/model/zip_model_impl.cpp b/csrc/mmdeploy/model/zip_model_impl.cpp index a05344104..f215509b0 100644 --- a/csrc/mmdeploy/model/zip_model_impl.cpp +++ b/csrc/mmdeploy/model/zip_model_impl.cpp @@ -34,10 +34,10 @@ class ZipModelImpl : public ModelImpl { int ret = 0; zip_ = zip_open(model_path.c_str(), 0, &ret); if (ret != 0) { - MMDEPLOY_INFO("open zip file {} failed, ret {}", model_path.c_str(), ret); + MMDEPLOY_INFO("Failed to open zip file {}, ret {}", model_path.c_str(), ret); return Status(eInvalidArgument); } - MMDEPLOY_INFO("open sdk model file {} successfully", model_path.c_str()); + MMDEPLOY_INFO("Open model file {} successfully", model_path.c_str()); return InitZip(); } @@ -103,7 +103,7 @@ class ZipModelImpl : public ModelImpl { private: Result InitZip() { int files = zip_get_num_files(zip_); - MMDEPLOY_INFO("there are {} files in sdk model file", files); + MMDEPLOY_INFO("There are {} files in the model", files); if (files == 0) { return Status(eFail); } diff --git a/csrc/mmdeploy/net/CMakeLists.txt b/csrc/mmdeploy/net/CMakeLists.txt index a7cd00d3d..3b42740c2 100644 --- a/csrc/mmdeploy/net/CMakeLists.txt +++ b/csrc/mmdeploy/net/CMakeLists.txt @@ -22,5 +22,9 @@ if ("openvino" IN_LIST MMDEPLOY_TARGET_BACKENDS) add_subdirectory(openvino) endif () +if ("snpe" IN_LIST MMDEPLOY_TARGET_BACKENDS) + add_subdirectory(snpe) +endif () + mmdeploy_add_module(${PROJECT_NAME} net_module.cpp) add_library(mmdeploy::net_module ALIAS ${PROJECT_NAME}) diff --git a/csrc/mmdeploy/net/net_module.cpp b/csrc/mmdeploy/net/net_module.cpp index 206b798c9..c3513a692 100644 --- a/csrc/mmdeploy/net/net_module.cpp +++ b/csrc/mmdeploy/net/net_module.cpp @@ -34,13 +34,15 @@ struct NetModule::Impl { stream_ = context.value("stream", Stream::GetDefault(device_)); auto creator = Registry::Get().GetCreator(config.backend); if (!creator) { - MMDEPLOY_ERROR("Net backend not found: {}", config.backend); + MMDEPLOY_ERROR("Net backend not found: {}, available backends: {}", config.backend, + Registry::Get().List()); return Status(eEntryNotFound); } auto net_cfg = args; net_cfg["context"].update({{"device", device_}, {"stream", stream_}}); net_ = creator->Create(net_cfg); if (!net_) { + MMDEPLOY_ERROR("Failed to create Net backend: {}, config: {}", config.backend, net_cfg); return Status(eFail); } OUTCOME_TRY(InitializeInputTensors(args)); diff --git a/csrc/mmdeploy/net/ort/ort_net.cpp b/csrc/mmdeploy/net/ort/ort_net.cpp index 0ddfd7652..1f5adaca2 100644 --- a/csrc/mmdeploy/net/ort/ort_net.cpp +++ b/csrc/mmdeploy/net/ort/ort_net.cpp @@ -77,7 +77,7 @@ Result OrtNet::Init(const Value& args) { auto input_name = session_.GetInputName(i, allocator); auto type_info = session_.GetInputTypeInfo(i); auto shape = to_shape(type_info); - MMDEPLOY_INFO("input {}, shape = {}", i, shape); + MMDEPLOY_DEBUG("input {}, shape = {}", i, shape); filter_shape(shape); OUTCOME_TRY(auto data_type, ConvertElementType(type_info.GetTensorTypeAndShapeInfo().GetElementType())); @@ -91,7 +91,7 @@ Result OrtNet::Init(const Value& args) { auto output_name = session_.GetOutputName(i, allocator); auto type_info = session_.GetOutputTypeInfo(i); auto shape = to_shape(type_info); - MMDEPLOY_INFO("output {}, shape = {}", i, shape); + MMDEPLOY_DEBUG("output {}, shape = {}", i, shape); filter_shape(shape); OUTCOME_TRY(auto data_type, ConvertElementType(type_info.GetTensorTypeAndShapeInfo().GetElementType())); diff --git a/csrc/mmdeploy/net/ppl/CMakeLists.txt b/csrc/mmdeploy/net/ppl/CMakeLists.txt index a16dd7006..91f086468 100644 --- a/csrc/mmdeploy/net/ppl/CMakeLists.txt +++ b/csrc/mmdeploy/net/ppl/CMakeLists.txt @@ -7,10 +7,10 @@ find_package(pplnn REQUIRED) mmdeploy_add_module(${PROJECT_NAME} ppl_net.cpp) target_include_directories(${PROJECT_NAME} PUBLIC $) -if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) +if (PPLNN_USE_X86 AND ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES)) target_compile_definitions(${PROJECT_NAME} PRIVATE -DPPL_NN_HAS_X86=1) endif () -if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) +if (PPLNN_USE_CUDA AND ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES)) target_compile_definitions(${PROJECT_NAME} PRIVATE -DPPL_NN_HAS_CUDA=1) target_include_directories(${PROJECT_NAME} PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/include) target_link_directories(${PROJECT_NAME} PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/lib64) diff --git a/csrc/mmdeploy/net/ppl/ppl_net.cpp b/csrc/mmdeploy/net/ppl/ppl_net.cpp index f8cfd990e..3f404983d 100644 --- a/csrc/mmdeploy/net/ppl/ppl_net.cpp +++ b/csrc/mmdeploy/net/ppl/ppl_net.cpp @@ -6,14 +6,17 @@ #include "mmdeploy/core/model.h" #include "mmdeploy/core/utils/formatter.h" #include "ppl/nn/common/logger.h" -#include "ppl/nn/models/onnx/onnx_runtime_builder_factory.h" +#include "ppl/nn/models/onnx/runtime_builder_factory.h" #if PPL_NN_HAS_X86 #include "ppl/nn/engines/x86/engine_factory.h" -#include "ppl/nn/engines/x86/x86_options.h" +#include "ppl/nn/engines/x86/engine_options.h" +#include "ppl/nn/engines/x86/ops.h" #endif #if PPL_NN_HAS_CUDA -#include "ppl/nn/engines/cuda/cuda_options.h" #include "ppl/nn/engines/cuda/engine_factory.h" +#include "ppl/nn/engines/cuda/engine_options.h" +#include "ppl/nn/engines/cuda/ops.h" +#define PPL_CUDA_IMPORT_FROM_BUFFER 1 #endif namespace mmdeploy { @@ -35,7 +38,7 @@ Result> ppl_try(T* v) { } Tensor PPLNet::CreateInternalTensor(ppl::nn::Tensor* src, Device device) { - auto desc = src->GetShape(); + const auto& desc = *src->GetShape(); auto name = src->GetName(); std::vector shape{desc.GetDims(), desc.GetDims() + desc.GetDimCount()}; if (std::any_of(begin(shape), end(shape), [](auto x) { return x <= 0; })) { @@ -56,15 +59,37 @@ Result PPLNet::Init(const Value& args) { #if PPL_NN_HAS_CUDA if (device_.is_device()) { - engines_.emplace_back(ppl::nn::CudaEngineFactory::Create({})); - // Use default algorithms until PPL can set algorithms from a memory buffer - // since the optimization process is really slow - engines_.back()->Configure(ppl::nn::CUDA_CONF_USE_DEFAULT_ALGORITHMS, true); + ppl::nn::cuda::RegisterBuiltinOpImpls(); + ppl::nn::cuda::EngineOptions options{}; + options.device_id = device_.device_id(); + options.mm_policy = ppl::nn::cuda::MM_BEST_FIT; + engines_.emplace_back(ppl::nn::cuda::EngineFactory::Create(options)); + + bool import_algo = false; + +#if PPL_CUDA_IMPORT_FROM_BUFFER + auto algo = model.ReadFile(config.weights); + if (algo) { + auto ret = + engines_.back()->Configure(ppl::nn::cuda::ENGINE_CONF_IMPORT_ALGORITHMS_FROM_BUFFER, + algo.value().c_str(), algo.value().size()); + if (ret == ppl::common::RC_SUCCESS) { + import_algo = true; + } else { + MMDEPLOY_ERROR("failed to import algorithms ({}), default algorithms will be used", ret); + } + } +#endif + + if (!import_algo) { + engines_.back()->Configure(ppl::nn::cuda::ENGINE_CONF_USE_DEFAULT_ALGORITHMS, true); + } } #endif #if PPL_NN_HAS_X86 if (device_.is_host()) { - engines_.emplace_back(ppl::nn::X86EngineFactory::Create({})); + ppl::nn::x86::RegisterBuiltinOpImpls(); + engines_.emplace_back(ppl::nn::x86::EngineFactory::Create({})); } #endif @@ -73,8 +98,14 @@ Result PPLNet::Init(const Value& args) { engines.push_back(engine.get()); } - OUTCOME_TRY(auto builder, ppl_try(ppl::nn::OnnxRuntimeBuilderFactory::Create( - onnx.data(), onnx.size(), engines.data(), engines.size()))); + OUTCOME_TRY(auto builder, ppl_try(ppl::nn::onnx::RuntimeBuilderFactory::Create())); + OUTCOME_TRY(ppl_try(builder->LoadModel(onnx.data(), onnx.size(), nullptr))); + + ppl::nn::onnx::RuntimeBuilder::Resources resources{}; + resources.engines = engines.data(); + resources.engine_num = engines.size(); + OUTCOME_TRY(ppl_try(builder->SetResources(resources))); + OUTCOME_TRY(ppl_try(builder->Preprocess())); OUTCOME_TRY(auto runtime, ppl_try(builder->CreateRuntime())); @@ -84,7 +115,7 @@ Result PPLNet::Init(const Value& args) { inputs_external_.push_back(CreateInternalTensor(src, device_)); /// debug only - auto& desc = inputs_internal_[i]->GetShape(); + const auto& desc = *inputs_internal_[i]->GetShape(); std::vector shape_(desc.GetDims(), desc.GetDims() + desc.GetDimCount()); MMDEPLOY_DEBUG("input {}: datatype = {}, dataformat = {}, shape = {}", i, ppl::common::GetDataTypeStr(desc.GetDataType()), @@ -96,7 +127,7 @@ Result PPLNet::Init(const Value& args) { outputs_internal_.push_back(src); outputs_external_.push_back(CreateInternalTensor(src, device_)); - auto desc = outputs_internal_[i]->GetShape(); + const auto& desc = *outputs_internal_[i]->GetShape(); std::vector shape_(desc.GetDims(), desc.GetDims() + desc.GetDimCount()); MMDEPLOY_DEBUG("output {}: datatype = {}, dataformat = {}, shape = {}", i, ppl::common::GetDataTypeStr(desc.GetDataType()), @@ -128,7 +159,7 @@ Result PPLNet::Deinit() { } static TensorShape GetShape(const PPLTensor& tensor) { - auto& desc = tensor.GetShape(); + const auto& desc = *tensor.GetShape(); return {desc.GetDims(), desc.GetDims() + desc.GetDimCount()}; } @@ -170,18 +201,17 @@ Result PPLNet::Forward() { OUTCOME_TRY(stream_.Wait()); OUTCOME_TRY(ppl_try(runtime_->Run())); - OUTCOME_TRY(ppl_try(runtime_->Sync())); for (int i = 0; i < outputs_external_.size(); ++i) { auto& internal = *outputs_internal_[i]; - auto format = internal.GetShape().GetDataFormat(); + auto format = internal.GetShape()->GetDataFormat(); if (format != ppl::common::DATAFORMAT_NDARRAY) { MMDEPLOY_ERROR("output {}'s format is {}, only NDARRAY is currently supported", i, ppl::common::GetDataFormatStr(format)); return Status(eNotSupported); } auto& external = outputs_external_[i]; - auto dtype_int = internal.GetShape().GetDataType(); + auto dtype_int = internal.GetShape()->GetDataType(); OUTCOME_TRY(auto dtype_ext, GetPPLDataType(external.data_type())); auto shape_int = GetShape(internal); auto shape_ext = external.shape(); @@ -213,7 +243,7 @@ Result PPLNet::Forward() { Result PPLNet::ForwardAsync(Event* event) { return Status(eNotSupported); } Result ReshapeLike(PPLTensor& dst, Tensor& src) { - auto& dst_desc = dst.GetShape(); + auto& dst_desc = *dst.GetShape(); auto& src_desc = src.desc(); OUTCOME_TRY(auto data_type, GetPPLDataType(src_desc.data_type)); dst_desc.SetDataType(data_type); @@ -236,7 +266,7 @@ Result PPLNet::Reshape(Span input_shapes) { if (can_infer_output_shapes_) { OUTCOME_TRY(auto output_shapes, InferOutputShapes(input_shapes, prev_in_shapes, prev_out_shapes)); - // MMDEPLOY_ERROR("inferred output shapes: {}", output_shapes); + MMDEPLOY_DEBUG("inferred output shapes: {}", output_shapes); for (int i = 0; i < outputs_external_.size(); ++i) { auto& output = outputs_external_[i]; output.Reshape(output_shapes[i]); diff --git a/csrc/mmdeploy/net/snpe/CMakeLists.txt b/csrc/mmdeploy/net/snpe/CMakeLists.txt new file mode 100644 index 000000000..2f8af24dc --- /dev/null +++ b/csrc/mmdeploy/net/snpe/CMakeLists.txt @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +project(mmdeploy_snpe_net) + +add_library(snpe SHARED IMPORTED) + +if(NOT DEFINED ENV{MMDEPLOY_SNPE_X86_CI}) + set(sub_dir "aarch64-android-clang6.0") +else() + set(sub_dir "x86_64-linux-clang") +endif() + +if (NOT EXISTS $ENV{SNPE_ROOT}/lib/${sub_dir}/) + message(ERROR "SNPE_ROOT directory not exist: $ENV{SNPE_ROOT}/lib/${sub_dir}/") +endif() +message(STATUS "SNPE lib directory $ENV{SNPE_ROOT}/lib/${sub_dir}/") + +set_target_properties(snpe PROPERTIES + IMPORTED_LOCATION "$ENV{SNPE_ROOT}/lib/${sub_dir}/libSNPE.so" + INTERFACE_INCLUDE_DIRECTORIES "$ENV{SNPE_ROOT}/include/zdl" +) + +mmdeploy_add_module(${PROJECT_NAME} snpe_net.cpp) +target_link_libraries(${PROJECT_NAME} PRIVATE snpe) +add_library(mmdeploy::snpe_net ALIAS ${PROJECT_NAME}) diff --git a/csrc/mmdeploy/net/snpe/snpe_net.cpp b/csrc/mmdeploy/net/snpe/snpe_net.cpp new file mode 100644 index 000000000..5fb05b87f --- /dev/null +++ b/csrc/mmdeploy/net/snpe/snpe_net.cpp @@ -0,0 +1,262 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#include "snpe_net.h" + +#include "mmdeploy/core/logger.h" +#include "mmdeploy/core/model.h" +#include "mmdeploy/core/utils/formatter.h" + +namespace mmdeploy { + +SNPENet::~SNPENet() {} + +std::string SNPENet::ShapeStr(zdl::DlSystem::ITensor* pTensor) { + std::string str; + + str += "["; + auto shape = pTensor->getShape(); + for (int i = 0; i < shape.rank(); ++i) { + str += std::to_string(shape[i]); + str += ","; + } + str += ']'; + return str; +} + +void SNPENet::Build(std::unique_ptr& container, + zdl::DlSystem::Runtime_t runtime, zdl::DlSystem::RuntimeList runtimeList, + bool useUserSuppliedBuffers, zdl::DlSystem::PlatformConfig platformConfig) { + zdl::SNPE::SNPEBuilder snpeBuilder(container.get()); + + if (runtimeList.empty()) { + runtimeList.add(runtime); + } + + snpe_ = + snpeBuilder.setOutputLayers({}) + .setRuntimeProcessorOrder(runtimeList) + .setUseUserSuppliedBuffers(useUserSuppliedBuffers) + .setPlatformConfig(platformConfig) + .setPerformanceProfile(zdl::DlSystem::PerformanceProfile_t::SUSTAINED_HIGH_PERFORMANCE) + .build(); + return; +} + +void SNPENet::copy_output(const zdl::DlSystem::ITensor* from, Tensor& to) { + auto hwc_to_chw = [](const zdl::DlSystem::TensorShape& shape) -> bool { + if (shape.rank() != 4 || (shape[1] == 1 && shape[2] > 1 && shape[3] > 1)) { + return false; + } + return true; + }; + + auto output_shape = from->getShape(); + + if (to.size() != from->getSize()) { + TensorShape tensor_shape; + for (int j = 0; j < output_shape.rank(); ++j) { + tensor_shape.push_back(output_shape[j]); + } + + if (hwc_to_chw(output_shape)) { + auto tmp = output_shape[3]; + output_shape[3] = output_shape[1]; + output_shape[1] = tmp; + } + to.Reshape(tensor_shape); + } + + float* pto = to.data(); + + if (output_shape.rank() != 4 || + (output_shape[1] == 1 && output_shape[2] > 1 && output_shape[3] > 1)) { + // skip [1,1,w>1,h>1] for segmentation task + for (auto it = from->cbegin(); it != from->cend(); ++it, ++pto) { + *pto = *it; + } + } else { + const int channel = output_shape[1]; + const int panel = output_shape[2] * output_shape[3]; + + int i = 0; + // HWC to CHW + for (auto it = from->cbegin(); it != from->cend(); ++it, ++i) { + int channel_idx = i % channel; + int panel_idx = i / channel; + pto[channel_idx * panel + panel_idx] = *it; + } + } + return; +} + +void SNPENet::copy_input(const Tensor& from, zdl::DlSystem::ITensor* to) { + if (from.size() != to->getSize()) { + MMDEPLOY_ERROR("input tensor size not match"); + return; + } + + const float* pfrom = from.data(); + + auto input_shape = to->getShape(); + if (input_shape.rank() == 4) { + const int channel = input_shape[3]; + const int panel = input_shape[1] * input_shape[2]; + + int i = 0; + // CHW to HWC + for (auto it = to->begin(); it != to->end(); ++it, ++i) { + int channel_index = i % channel; + int panel_index = (i / channel) % panel; + + *it = pfrom[channel_index * panel + panel_index]; + } + + } else { + for (auto it = to->begin(); it != to->end(); ++it, ++pfrom) { + *it = *pfrom; + } + } +} + +Result SNPENet::Init(const Value& args) { + auto& context = args["context"]; + device_ = context["device"].get(); + stream_ = context["stream"].get(); + if (!device_.is_host()) { + return Status(eNotSupported); + } + + auto name = args["name"].get(); + auto model = context["model"].get(); + OUTCOME_TRY(auto config, model.GetModelConfig(name)); + + std::string content; + OUTCOME_TRY(content, model.ReadFile(config.net)); + char* model_ptr = const_cast(content.data()); + container_ = + zdl::DlContainer::IDlContainer::open(reinterpret_cast(model_ptr), content.size()); + if (container_ == nullptr) { + MMDEPLOY_ERROR("Load .dlc failed: {}", config.net); + return Status(eInvalidArgument); + } + + zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::GPU; + if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(runtime)) { + MMDEPLOY_WARN("Selected runtime not present. Falling back to CPU.\n"); + runtime = zdl::DlSystem::Runtime_t::CPU; + } + + zdl::DlSystem::RuntimeList runtimeList; + // Add CPU backend to support fallback + runtimeList.add(zdl::DlSystem::Runtime_t::CPU); + runtimeList.add(runtime); + zdl::DlSystem::PlatformConfig platformConfig; + Build(container_, runtime, runtimeList, false, platformConfig); + + // init internal input tensor list + const auto& inputTensorNamesRef = snpe_->getInputTensorNames(); + const auto& inputTensorNames = *inputTensorNamesRef; + inputs_internal_.resize(inputTensorNames.size()); + + for (int i = 0; i < inputTensorNames.size(); ++i) { + const auto& inputShape_opt = snpe_->getInputDimensions(inputTensorNames.at(i)); + const auto& inputShape = *inputShape_opt; + + inputs_internal_[i] = zdl::SNPE::SNPEFactory::getTensorFactory().createTensor(inputShape); + + std::string info = + std::string(inputTensorNames.at(i)) + " shape: " + ShapeStr(inputs_internal_[i].get()); + MMDEPLOY_INFO(info); + + input_tensor_map_.add(inputTensorNames.at(i), inputs_internal_[i].get()); + + input_tensors_.emplace_back(TensorDesc{ + Device("cpu"), + DataType::kFLOAT, + {}, + std::string(inputTensorNames.at(i)), + }); + } + + const auto& outputTensorNamesRef = snpe_->getOutputTensorNames(); + const auto& outputTensorNames = *outputTensorNamesRef; + for (int i = 0; i < outputTensorNames.size(); ++i) { + output_tensors_.emplace_back(TensorDesc{ + Device("cpu"), + DataType::kFLOAT, + {}, + std::string(outputTensorNames.at(i)), + }); + } + + return success(); +} + +Result SNPENet::Deinit() { return success(); } + +Result SNPENet::Reshape(Span input_shapes) { + for (size_t i = 0; i < input_shapes.size(); ++i) { + input_tensors_[i].Reshape(input_shapes[i]); + } + return success(); +} + +Result> SNPENet::GetInputTensors() { return input_tensors_; } + +Result> SNPENet::GetOutputTensors() { return output_tensors_; } + +Result SNPENet::Forward() { + OUTCOME_TRY(stream_.Wait()); + + { + // copy input to itensor buffer + for (auto& tensor : input_tensors_) { + const auto& name = tensor.desc().name; + auto pbuffer = input_tensor_map_.getTensor(name.c_str()); + + copy_input(tensor, pbuffer); + } + } + + // A tensor map for SNPE execution outputs + zdl::DlSystem::TensorMap output_map; + { + // real inference + bool success = snpe_->execute(input_tensor_map_, output_map); + if (!success) { + MMDEPLOY_ERROR("snpe Inference error: {}", std::string(zdl::DlSystem::getLastErrorString())); + return Status(eFail); + } + } + + { + // extract output buffer to tensor + auto names = output_map.getTensorNames(); + for (size_t i = 0; i < names.size(); ++i) { + const zdl::DlSystem::ITensor* pbuffer = output_map.getTensor(names.at(i)); + + auto& tensor = output_tensors_[i]; + copy_output(pbuffer, tensor); + } + } + return success(); +} + +class SNPENetCreator : public Creator { + public: + const char* GetName() const override { return "snpe"; } + int GetVersion() const override { return 0; } + std::unique_ptr Create(const Value& args) override { + auto p = std::make_unique(); + if (auto r = p->Init(args)) { + return p; + } else { + MMDEPLOY_ERROR("error creating SNPENet: {}", r.error().message().c_str()); + return nullptr; + } + } +}; + +REGISTER_MODULE(Net, SNPENetCreator); + +} // namespace mmdeploy diff --git a/csrc/mmdeploy/net/snpe/snpe_net.h b/csrc/mmdeploy/net/snpe/snpe_net.h new file mode 100644 index 000000000..405855861 --- /dev/null +++ b/csrc/mmdeploy/net/snpe/snpe_net.h @@ -0,0 +1,61 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_SRC_NET_SNPE_SNPE_NET_H_ +#define MMDEPLOY_SRC_NET_SNPE_SNPE_NET_H_ + +#include +#include +#include + +#include "DiagLog/IDiagLog.hpp" +#include "DlContainer/IDlContainer.hpp" +#include "DlSystem/DlEnums.hpp" +#include "DlSystem/DlError.hpp" +#include "DlSystem/ITensorFactory.hpp" +#include "DlSystem/IUserBuffer.hpp" +#include "DlSystem/PlatformConfig.hpp" +#include "DlSystem/RuntimeList.hpp" +#include "DlSystem/UserBufferMap.hpp" +#include "SNPE/SNPE.hpp" +#include "SNPE/SNPEBuilder.hpp" +#include "SNPE/SNPEFactory.hpp" +#include "mmdeploy/core/net.h" + +namespace mmdeploy { + +class SNPENet : public Net { + public: + ~SNPENet() override; + Result Init(const Value& args) override; + Result Deinit() override; + Result> GetInputTensors() override; + Result> GetOutputTensors() override; + Result Reshape(Span input_shapes) override; + Result Forward() override; + Result ForwardAsync(Event* event) override { return Status(eNotSupported); }; + + private: + void Build(std::unique_ptr& container, + zdl::DlSystem::Runtime_t runtime, zdl::DlSystem::RuntimeList runtimeList, + bool useUserSuppliedBuffers, zdl::DlSystem::PlatformConfig platformConfig); + + std::string ShapeStr(zdl::DlSystem::ITensor* pTensor); + + void copy_output(const zdl::DlSystem::ITensor* from, Tensor& to); + void copy_input(const Tensor& from, zdl::DlSystem::ITensor* to); + + Device device_; + Stream stream_; + std::vector input_tensors_; + std::vector output_tensors_; + + std::unique_ptr snpe_; + std::unique_ptr container_; + + std::vector> inputs_internal_; + zdl::DlSystem::TensorMap input_tensor_map_; +}; + +} // namespace mmdeploy + +#endif // MMDEPLOY_SRC_NET_SNPE_SNPE_NET_H_ diff --git a/csrc/mmdeploy/net/trt/trt_net.cpp b/csrc/mmdeploy/net/trt/trt_net.cpp index e70cb8397..3482785f7 100644 --- a/csrc/mmdeploy/net/trt/trt_net.cpp +++ b/csrc/mmdeploy/net/trt/trt_net.cpp @@ -18,7 +18,7 @@ class TRTLogger : public nvinfer1::ILogger { void log(Severity severity, const char* msg) noexcept override { switch (severity) { case Severity::kINFO: - // MMDEPLOY_INFO("TRTNet: {}", msg); + MMDEPLOY_DEBUG("TRTNet: {}", msg); break; case Severity::kWARNING: MMDEPLOY_WARN("TRTNet: {}", msg); @@ -169,7 +169,7 @@ Result TRTNet::Reshape(Span input_shapes) { } for (int i = 0; i < input_tensors_.size(); ++i) { auto dims = to_dims(input_shapes[i]); - // MMDEPLOY_ERROR("input shape: {}", to_string(dims)); + MMDEPLOY_DEBUG("input shape: {}", to_string(dims)); TRT_TRY(context_->setBindingDimensions(input_ids_[i], dims)); input_tensors_[i].Reshape(input_shapes[i]); } @@ -179,7 +179,7 @@ Result TRTNet::Reshape(Span input_shapes) { } for (int i = 0; i < output_tensors_.size(); ++i) { auto dims = context_->getBindingDimensions(output_ids_[i]); - // MMDEPLOY_ERROR("output shape: {}", to_string(dims)); + MMDEPLOY_DEBUG("output shape: {}", to_string(dims)); output_tensors_[i].Reshape(to_shape(dims)); } return success(); diff --git a/csrc/mmdeploy/preprocess/cuda/pad_impl.cpp b/csrc/mmdeploy/preprocess/cuda/pad_impl.cpp index ff6bc63e2..2ba4b315f 100644 --- a/csrc/mmdeploy/preprocess/cuda/pad_impl.cpp +++ b/csrc/mmdeploy/preprocess/cuda/pad_impl.cpp @@ -14,18 +14,10 @@ namespace cuda { class PadImpl : public ::mmdeploy::PadImpl { public: explicit PadImpl(const Value& args) : ::mmdeploy::PadImpl(args) { -#if PPLCV_VERSION_MAJOR >= 0 && PPLCV_VERSION_MINOR >= 6 && PPLCV_VERSION_PATCH >= 2 map border_map{{"constant", ppl::cv::BORDER_CONSTANT}, {"edge", ppl::cv::BORDER_REPLICATE}, {"reflect", ppl::cv::BORDER_REFLECT_101}, - { "symmetric", - ppl::cv::BORDER_REFLECT }}; -#else - map border_map{{"constant", ppl::cv::BORDER_TYPE_CONSTANT}, - {"edge", ppl::cv::BORDER_TYPE_REPLICATE}, - {"reflect", ppl::cv::BORDER_TYPE_REFLECT_101}, - {"symmetric", ppl::cv::BORDER_TYPE_REFLECT}}; -#endif + {"symmetric", ppl::cv::BORDER_REFLECT}}; if (border_map.find(arg_.padding_mode) == border_map.end()) { MMDEPLOY_ERROR("unsupported padding_mode '{}'", arg_.padding_mode); throw_exception(eNotSupported); diff --git a/csrc/mmdeploy/preprocess/cuda/resize_impl.cpp b/csrc/mmdeploy/preprocess/cuda/resize_impl.cpp index 5b1b61a67..57c9d485d 100644 --- a/csrc/mmdeploy/preprocess/cuda/resize_impl.cpp +++ b/csrc/mmdeploy/preprocess/cuda/resize_impl.cpp @@ -45,7 +45,6 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { private: template ppl::common::RetCode DispatchImpl(Args&&... args) { -#if PPLCV_VERSION_MAJOR >= 0 && PPLCV_VERSION_MINOR >= 6 && PPLCV_VERSION_PATCH >= 2 if (arg_.interpolation == "bilinear") { return ppl::cv::cuda::Resize(std::forward(args)..., ppl::cv::INTERPOLATION_LINEAR); @@ -54,16 +53,6 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { return ppl::cv::cuda::Resize(std::forward(args)..., ppl::cv::INTERPOLATION_NEAREST_POINT); } -#else - if (arg_.interpolation == "bilinear") { - return ppl::cv::cuda::Resize(std::forward(args)..., - ppl::cv::INTERPOLATION_TYPE_LINEAR); - } - if (arg_.interpolation == "nearest") { - return ppl::cv::cuda::Resize(std::forward(args)..., - ppl::cv::INTERPOLATION_TYPE_NEAREST_POINT); - } -#endif return ppl::common::RC_UNSUPPORTED; } diff --git a/csrc/mmdeploy/preprocess/transform/collect.cpp b/csrc/mmdeploy/preprocess/transform/collect.cpp index 119415cd7..2a63a9b57 100644 --- a/csrc/mmdeploy/preprocess/transform/collect.cpp +++ b/csrc/mmdeploy/preprocess/transform/collect.cpp @@ -45,8 +45,7 @@ Result CollectImpl::Process(const Value &input) { } for (auto &key : arg_.keys) { if (!input.contains(key)) { - MMDEPLOY_ERROR("missed key '{}' in input", key); - // return Status(eInvalidArgument); + MMDEPLOY_INFO("missed key '{}' in input", key); return Status(eInvalidArgument); } else { output[key] = input[key]; diff --git a/csrc/mmdeploy/preprocess/transform/compose.cpp b/csrc/mmdeploy/preprocess/transform/compose.cpp index a6d9da2f6..3675da7bf 100644 --- a/csrc/mmdeploy/preprocess/transform/compose.cpp +++ b/csrc/mmdeploy/preprocess/transform/compose.cpp @@ -20,13 +20,14 @@ Compose::Compose(const Value& args, int version) : Transform(args) { MMDEPLOY_DEBUG("creating transform: {} with cfg: {}", type, mmdeploy::to_json(cfg).dump(2)); auto creator = Registry::Get().GetCreator(type, version); if (!creator) { - MMDEPLOY_ERROR("unable to find creator: {}", type); - throw std::invalid_argument("unable to find creator"); + MMDEPLOY_ERROR("Unable to find Transform creator: {}. Available transforms: {}", type, + Registry::Get().List()); + throw_exception(eEntryNotFound); } auto transform = creator->Create(cfg); if (!transform) { - MMDEPLOY_ERROR("failed to create transform: {}", type); - throw std::invalid_argument("failed to create transform"); + MMDEPLOY_ERROR("Failed to create transform: {}, config: {}", type, cfg); + throw_exception(eFail); } transforms_.push_back(std::move(transform)); } diff --git a/csrc/mmdeploy/preprocess/transform/transform.h b/csrc/mmdeploy/preprocess/transform/transform.h index f4fdc8953..446b3a8cb 100644 --- a/csrc/mmdeploy/preprocess/transform/transform.h +++ b/csrc/mmdeploy/preprocess/transform/transform.h @@ -41,12 +41,12 @@ class MMDEPLOY_API Transform : public Module { std::unique_ptr impl(nullptr); auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - MMDEPLOY_WARN("cannot find {} implementation on specific platform {} ", transform_type, + MMDEPLOY_WARN("Cannot find {} implementation on platform {}", transform_type, specified_platform_); for (auto& name : candidate_platforms_) { impl_creator = Registry::Get().GetCreator(name); if (impl_creator) { - MMDEPLOY_INFO("fallback {} implementation to platform {}", transform_type, name); + MMDEPLOY_INFO("Fallback {} implementation to platform {}", transform_type, name); break; } } diff --git a/csrc/mmdeploy/preprocess/transform_module.cpp b/csrc/mmdeploy/preprocess/transform_module.cpp index e843dd78d..33f149a0d 100644 --- a/csrc/mmdeploy/preprocess/transform_module.cpp +++ b/csrc/mmdeploy/preprocess/transform_module.cpp @@ -18,7 +18,8 @@ TransformModule::TransformModule(const Value& args) { const auto type = "Compose"; auto creator = Registry::Get().GetCreator(type, 1); if (!creator) { - MMDEPLOY_ERROR("unable to find creator: {}", type); + MMDEPLOY_ERROR("Unable to find Transform creator: {}. Available transforms: {}", type, + Registry::Get().List()); throw_exception(eEntryNotFound); } auto cfg = args; diff --git a/csrc/mmdeploy/utils/opencv/opencv_utils.cpp b/csrc/mmdeploy/utils/opencv/opencv_utils.cpp index f04086b5d..7f0501ffb 100644 --- a/csrc/mmdeploy/utils/opencv/opencv_utils.cpp +++ b/csrc/mmdeploy/utils/opencv/opencv_utils.cpp @@ -6,6 +6,7 @@ #include "mmdeploy/core/logger.h" #include "mmdeploy/core/utils/formatter.h" +#include "opencv2/imgproc/imgproc.hpp" namespace mmdeploy { namespace cpu { diff --git a/csrc/mmdeploy/utils/opencv/opencv_utils.h b/csrc/mmdeploy/utils/opencv/opencv_utils.h index ebd398ce7..f1bf6b3d5 100644 --- a/csrc/mmdeploy/utils/opencv/opencv_utils.h +++ b/csrc/mmdeploy/utils/opencv/opencv_utils.h @@ -7,7 +7,7 @@ #include "mmdeploy/core/mpl/type_traits.h" #include "mmdeploy/core/serialization.h" #include "mmdeploy/core/tensor.h" -#include "opencv2/opencv.hpp" +#include "opencv2/core/core.hpp" namespace mmdeploy { namespace cpu { diff --git a/demo/csrc/CMakeLists.txt b/demo/csrc/CMakeLists.txt index fad5ceaac..4ce89e215 100644 --- a/demo/csrc/CMakeLists.txt +++ b/demo/csrc/CMakeLists.txt @@ -2,29 +2,46 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy-example) -find_package(MMDeploy REQUIRED) +if (NOT (${CMAKE_PROJECT_NAME} STREQUAL "MMDeploy")) + find_package(MMDeploy REQUIRED) +endif () function(add_example task name) - if (TARGET mmdeploy_${task}) - # Search for c/cpp sources - file(GLOB _SRCS ${name}.c*) - add_executable(${name} ${_SRCS}) - if (NOT MSVC) - # Disable new dtags so that executables can run even without LD_LIBRARY_PATH set - target_link_libraries(${name} PRIVATE -Wl,--disable-new-dtags) + if (TARGET mmdeploy_${task}) + # Search for c/cpp sources + file(GLOB _SRCS ${name}.c*) + add_executable(${name} ${_SRCS}) + if (NOT (MSVC OR APPLE)) + # Disable new dtags so that executables can run even without LD_LIBRARY_PATH set + target_link_libraries(${name} PRIVATE -Wl,--disable-new-dtags) + endif () + if (MMDEPLOY_BUILD_SDK_MONOLITHIC) + target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) + else () + # Load MMDeploy modules + mmdeploy_load_static(${name} MMDeployStaticModules) + mmdeploy_load_dynamic(${name} MMDeployDynamicModules) + # Link to MMDeploy libraries + target_link_libraries(${name} PRIVATE MMDeployLibs ${OpenCV_LIBS}) + endif () + install(TARGETS ${name} RUNTIME DESTINATION bin) endif () - # Load MMDeploy modules - mmdeploy_load_static(${name} MMDeployStaticModules) - mmdeploy_load_dynamic(${name} MMDeployDynamicModules) - # Link to MMDeploy libraries - target_link_libraries(${name} PRIVATE MMDeployLibs ${OpenCV_LIBS}) - endif () endfunction() -add_example(classifier image_classification) -add_example(detector object_detection) -add_example(segmentor image_segmentation) -add_example(restorer image_restorer) -add_example(text_detector ocr) -add_example(pose_detector pose_detection) +add_example(classifier image_classification) +add_example(detector object_detection) +add_example(segmentor image_segmentation) +add_example(restorer image_restorer) +add_example(text_detector ocr) +add_example(pose_detector pose_detection) add_example(rotated_detector rotated_object_detection) + +if (MMDEPLOY_BUILD_SDK_CXX_API) + add_example(classifier classifier) + add_example(detector detector) + add_example(segmentor segmentor) + add_example(restorer restorer) + add_example(text_detector text_ocr) + add_example(pose_detector pose_detector) + add_example(rotated_detector rotated_detector) +endif () diff --git a/demo/csrc/async_ocr.cpp b/demo/csrc/async_ocr.cpp index 81af94dda..d87e7d67d 100644 --- a/demo/csrc/async_ocr.cpp +++ b/demo/csrc/async_ocr.cpp @@ -5,19 +5,19 @@ #include #include -#include "model.h" -#include "text_detector.h" -#include "text_recognizer.h" +#include "mmdeploy/model.h" +#include "mmdeploy/text_detector.h" +#include "mmdeploy/text_recognizer.h" struct ctx_t { - mm_mat_t* mat; - mm_text_detect_t* dets{}; + mmdeploy_mat_t* mat; + mmdeploy_text_detection_t* dets{}; int* det_count; }; mmdeploy_value_t cont(mmdeploy_value_t det_output, void* context) { auto* ctx = static_cast(context); - int ec = MM_SUCCESS; + int ec = MMDEPLOY_SUCCESS; ec = mmdeploy_text_detector_get_result(det_output, &ctx->dets, &ctx->det_count); if (ec) { fprintf(stderr, "failed to get detection result, code = %d\n", ec); @@ -55,26 +55,26 @@ int main(int argc, char* argv[]) { mmdeploy_exec_info dbnet_exec_info{&prep_exec_info, "dbnet", thread}; mmdeploy_exec_info post_exec_info{&dbnet_exec_info, "postprocess", pool}; - mm_handle_t text_detector{}; + mmdeploy_text_detector_t text_detector{}; int status{}; - mm_model_t det_model{}; + mmdeploy_model_t det_model{}; status = mmdeploy_model_create_by_path(det_model_path, &det_model); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create model %s\n", det_model_path); return 1; } - mm_model_t reg_model{}; + mmdeploy_model_t reg_model{}; status = mmdeploy_model_create_by_path(reg_model_path, ®_model); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create model %s\n", det_model_path); return 1; } status = mmdeploy_text_detector_create_v2(det_model, device_name, 0, &post_exec_info, &text_detector); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create text_detector, code: %d\n", (int)status); return 1; } @@ -82,15 +82,16 @@ int main(int argc, char* argv[]) { mmdeploy_exec_info crnn_exec_info{&prep_exec_info, "crnnnet", thread}; post_exec_info.next = &crnn_exec_info; - mm_handle_t text_recognizer{}; + mmdeploy_text_recognizer_t text_recognizer{}; status = mmdeploy_text_recognizer_create_v2(reg_model, device_name, 0, &post_exec_info, &text_recognizer); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create text_recognizer, code: %d\n", (int)status); return 1; } - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; + mmdeploy_mat_t mat{ + img.data, img.rows, img.cols, 3, MMDEPLOY_PIXEL_FORMAT_BGR, MMDEPLOY_DATA_TYPE_UINT8}; mmdeploy_value_t input{}; if ((status = mmdeploy_text_detector_create_input(&mat, 1, &input)) != 0) { @@ -121,7 +122,7 @@ int main(int argc, char* argv[]) { return 1; } - mm_text_recognize_t* texts{}; + mmdeploy_text_recognition_t* texts{}; mmdeploy_text_recognizer_get_result(output, &texts); if (!texts) { fprintf(stderr, "failed to gettext recognizer result\n"); diff --git a/demo/csrc/async_ocr_v2.cpp b/demo/csrc/async_ocr_v2.cpp index 08db9e5e5..756a9b52a 100644 --- a/demo/csrc/async_ocr_v2.cpp +++ b/demo/csrc/async_ocr_v2.cpp @@ -5,19 +5,19 @@ #include #include -#include "model.h" -#include "text_detector.h" -#include "text_recognizer.h" +#include "mmdeploy/model.h" +#include "mmdeploy/text_detector.h" +#include "mmdeploy/text_recognizer.h" struct ctx_t { - mm_mat_t* mat; - mm_text_detect_t* dets{}; + mmdeploy_mat_t* mat; + mmdeploy_text_detection_t* dets{}; int* det_count; - mm_text_recognize_t* regs{}; - mm_handle_t recognizer; + mmdeploy_text_recognition_t* regs{}; + mmdeploy_text_recognizer_t recognizer; }; -int det_to_reg(mm_text_detect_t* results, int* result_count, void* context, +int det_to_reg(mmdeploy_text_detection_t* results, int* result_count, void* context, mmdeploy_sender_t* output) { auto ctx = static_cast(context); ctx->dets = results; @@ -27,7 +27,7 @@ int det_to_reg(mm_text_detect_t* results, int* result_count, void* context, return ec; } -int reg_cont(mm_text_recognize_t* results, void* context, mmdeploy_sender_t*) { +int reg_cont(mmdeploy_text_recognition_t* results, void* context, mmdeploy_sender_t*) { static_cast(context)->regs = results; return 0; } @@ -54,26 +54,25 @@ int main(int argc, char* argv[]) { mmdeploy_exec_info dbnet_exec_info{&prep_exec_info, "dbnet", thread}; mmdeploy_exec_info post_exec_info{&dbnet_exec_info, "postprocess", pool}; - mm_handle_t text_detector{}; + mmdeploy_text_detector_t text_detector{}; int status{}; - mm_model_t det_model{}; + mmdeploy_model_t det_model{}; status = mmdeploy_model_create_by_path(det_model_path, &det_model); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create model %s\n", det_model_path); return 1; } - mm_model_t reg_model{}; + mmdeploy_model_t reg_model{}; status = mmdeploy_model_create_by_path(reg_model_path, ®_model); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create model %s\n", det_model_path); return 1; } - status = - mmdeploy_text_detector_create_v2(det_model, device_name, 0, nullptr, &text_detector); - if (status != MM_SUCCESS) { + status = mmdeploy_text_detector_create_v2(det_model, device_name, 0, nullptr, &text_detector); + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create text_detector, code: %d\n", (int)status); return 1; } @@ -81,15 +80,15 @@ int main(int argc, char* argv[]) { mmdeploy_exec_info crnn_exec_info{&prep_exec_info, "crnnnet", thread}; post_exec_info.next = &crnn_exec_info; - mm_handle_t text_recognizer{}; - status = mmdeploy_text_recognizer_create_v2(reg_model, device_name, 0, nullptr, - &text_recognizer); - if (status != MM_SUCCESS) { + mmdeploy_text_recognizer_t text_recognizer{}; + status = mmdeploy_text_recognizer_create_v2(reg_model, device_name, 0, nullptr, &text_recognizer); + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create text_recognizer, code: %d\n", (int)status); return 1; } - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; + mmdeploy_mat_t mat{ + img.data, img.rows, img.cols, 3, MMDEPLOY_PIXEL_FORMAT_BGR, MMDEPLOY_DATA_TYPE_UINT8}; mmdeploy_sender_t sender{}; diff --git a/demo/csrc/classifier.cxx b/demo/csrc/classifier.cxx new file mode 100644 index 000000000..2d8b2d1e2 --- /dev/null +++ b/demo/csrc/classifier.cxx @@ -0,0 +1,32 @@ + +#include "mmdeploy/classifier.hpp" + +#include + +#include "opencv2/imgcodecs/imgcodecs.hpp" + +int main(int argc, char* argv[]) { + if (argc != 4) { + fprintf(stderr, "usage:\n image_classification device_name model_path image_path\n"); + return 1; + } + auto device_name = argv[1]; + auto model_path = argv[2]; + auto image_path = argv[3]; + cv::Mat img = cv::imread(image_path); + if (!img.data) { + fprintf(stderr, "failed to load image: %s\n", image_path); + return 1; + } + + mmdeploy::Model model(model_path); + mmdeploy::Classifier classifier(model, mmdeploy::Device{device_name, 0}); + + auto res = classifier.Apply(img); + + for (const auto& cls : res) { + fprintf(stderr, "label: %d, score: %.4f\n", cls.label_id, cls.score); + } + + return 0; +} diff --git a/demo/csrc/detector.cxx b/demo/csrc/detector.cxx new file mode 100644 index 000000000..7009d2fd2 --- /dev/null +++ b/demo/csrc/detector.cxx @@ -0,0 +1,69 @@ +#include "mmdeploy/detector.hpp" + +#include +#include +#include + +int main(int argc, char* argv[]) { + if (argc != 4) { + fprintf(stderr, "usage:\n object_detection device_name model_path image_path\n"); + return 1; + } + auto device_name = argv[1]; + auto model_path = argv[2]; + auto image_path = argv[3]; + cv::Mat img = cv::imread(image_path); + if (!img.data) { + fprintf(stderr, "failed to load image: %s\n", image_path); + return 1; + } + + mmdeploy::Model model(model_path); + mmdeploy::Detector detector(model, mmdeploy::Device{device_name, 0}); + + auto dets = detector.Apply(img); + + fprintf(stdout, "bbox_count=%d\n", (int)dets.size()); + + for (int i = 0; i < dets.size(); ++i) { + const auto& box = dets[i].bbox; + const auto& mask = dets[i].mask; + + fprintf(stdout, "box %d, left=%.2f, top=%.2f, right=%.2f, bottom=%.2f, label=%d, score=%.4f\n", + i, box.left, box.top, box.right, box.bottom, dets[i].label_id, dets[i].score); + + // skip detections with invalid bbox size (bbox height or width < 1) + if ((box.right - box.left) < 1 || (box.bottom - box.top) < 1) { + continue; + } + + // skip detections less than specified score threshold + if (dets[i].score < 0.3) { + continue; + } + + // generate mask overlay if model exports masks + if (mask != nullptr) { + fprintf(stdout, "mask %d, height=%d, width=%d\n", i, mask->height, mask->width); + + cv::Mat imgMask(mask->height, mask->width, CV_8UC1, &mask->data[0]); + auto x0 = std::max(std::floor(box.left) - 1, 0.f); + auto y0 = std::max(std::floor(box.top) - 1, 0.f); + cv::Rect roi((int)x0, (int)y0, mask->width, mask->height); + + // split the RGB channels, overlay mask to a specific color channel + cv::Mat ch[3]; + split(img, ch); + int col = 0; // int col = i % 3; + cv::bitwise_or(imgMask, ch[col](roi), ch[col](roi)); + merge(ch, 3, img); + } + + cv::rectangle(img, cv::Point{(int)box.left, (int)box.top}, + cv::Point{(int)box.right, (int)box.bottom}, cv::Scalar{0, 255, 0}); + } + + cv::imwrite("output_detection.png", img); + + return 0; +} diff --git a/demo/csrc/image_classification.cpp b/demo/csrc/image_classification.cpp index af2c13420..5e64581b9 100644 --- a/demo/csrc/image_classification.cpp +++ b/demo/csrc/image_classification.cpp @@ -2,11 +2,11 @@ #include #include -#include "classifier.h" +#include "mmdeploy/classifier.h" -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { if (argc != 4) { - fprintf(stderr, "usage:\n image_classification device_name model_path image_path\n"); + fprintf(stderr, "usage:\n image_classification device_name dump_model_directory image_path\n"); return 1; } auto device_name = argv[1]; @@ -18,20 +18,21 @@ int main(int argc, char *argv[]) { return 1; } - mm_handle_t classifier{}; + mmdeploy_classifier_t classifier{}; int status{}; status = mmdeploy_classifier_create_by_path(model_path, device_name, 0, &classifier); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create classifier, code: %d\n", (int)status); return 1; } - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; + mmdeploy_mat_t mat{ + img.data, img.rows, img.cols, 3, MMDEPLOY_PIXEL_FORMAT_BGR, MMDEPLOY_DATA_TYPE_UINT8}; - mm_class_t *res{}; - int *res_count{}; + mmdeploy_classification_t* res{}; + int* res_count{}; status = mmdeploy_classifier_apply(classifier, &mat, 1, &res, &res_count); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to apply classifier, code: %d\n", (int)status); return 1; } diff --git a/demo/csrc/image_restorer.cpp b/demo/csrc/image_restorer.cpp index 3984c88a8..ed12eefa7 100644 --- a/demo/csrc/image_restorer.cpp +++ b/demo/csrc/image_restorer.cpp @@ -5,9 +5,9 @@ #include #include -#include "restorer.h" +#include "mmdeploy/restorer.h" -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { if (argc != 4) { fprintf(stderr, "usage:\n image_restorer device_name model_path image_path\n"); return 1; @@ -21,19 +21,20 @@ int main(int argc, char *argv[]) { return 1; } - mm_handle_t restorer{}; + mmdeploy_restorer_t restorer{}; int status{}; status = mmdeploy_restorer_create_by_path(model_path, device_name, 0, &restorer); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create restorer, code: %d\n", (int)status); return 1; } - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; + mmdeploy_mat_t mat{ + img.data, img.rows, img.cols, 3, MMDEPLOY_PIXEL_FORMAT_BGR, MMDEPLOY_DATA_TYPE_UINT8}; - mm_mat_t *result{}; + mmdeploy_mat_t* result{}; status = mmdeploy_restorer_apply(restorer, &mat, 1, &result); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to apply restorer, code: %d\n", (int)status); return 1; } diff --git a/demo/csrc/image_segmentation.cpp b/demo/csrc/image_segmentation.cpp index 8502ecec0..fae446b4f 100644 --- a/demo/csrc/image_segmentation.cpp +++ b/demo/csrc/image_segmentation.cpp @@ -7,7 +7,7 @@ #include #include -#include "segmentor.h" +#include "mmdeploy/segmentor.h" using namespace std; @@ -23,7 +23,7 @@ vector gen_palette(int num_classes) { return palette; } -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { if (argc != 4) { fprintf(stderr, "usage:\n image_segmentation device_name model_path image_path\n"); return 1; @@ -37,19 +37,20 @@ int main(int argc, char *argv[]) { return 1; } - mm_handle_t segmentor{}; + mmdeploy_segmentor_t segmentor{}; int status{}; status = mmdeploy_segmentor_create_by_path(model_path, device_name, 0, &segmentor); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create segmentor, code: %d\n", (int)status); return 1; } - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; + mmdeploy_mat_t mat{ + img.data, img.rows, img.cols, 3, MMDEPLOY_PIXEL_FORMAT_BGR, MMDEPLOY_DATA_TYPE_UINT8}; - mm_segment_t *result{}; + mmdeploy_segmentation_t* result{}; status = mmdeploy_segmentor_apply(segmentor, &mat, 1, &result); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to apply segmentor, code: %d\n", (int)status); return 1; } diff --git a/demo/csrc/object_detection.cpp b/demo/csrc/object_detection.cpp index 3ed7ac4e5..07f652067 100644 --- a/demo/csrc/object_detection.cpp +++ b/demo/csrc/object_detection.cpp @@ -3,9 +3,9 @@ #include #include -#include "detector.h" +#include "mmdeploy/detector.h" -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { if (argc != 4) { fprintf(stderr, "usage:\n object_detection device_name model_path image_path\n"); return 1; @@ -19,20 +19,21 @@ int main(int argc, char *argv[]) { return 1; } - mm_handle_t detector{}; + mmdeploy_detector_t detector{}; int status{}; status = mmdeploy_detector_create_by_path(model_path, device_name, 0, &detector); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create detector, code: %d\n", (int)status); return 1; } - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; + mmdeploy_mat_t mat{ + img.data, img.rows, img.cols, 3, MMDEPLOY_PIXEL_FORMAT_BGR, MMDEPLOY_DATA_TYPE_UINT8}; - mm_detect_t *bboxes{}; - int *res_count{}; + mmdeploy_detection_t* bboxes{}; + int* res_count{}; status = mmdeploy_detector_apply(detector, &mat, 1, &bboxes, &res_count); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to apply detector, code: %d\n", (int)status); return 1; } @@ -40,8 +41,8 @@ int main(int argc, char *argv[]) { fprintf(stdout, "bbox_count=%d\n", *res_count); for (int i = 0; i < *res_count; ++i) { - const auto &box = bboxes[i].bbox; - const auto &mask = bboxes[i].mask; + const auto& box = bboxes[i].bbox; + const auto& mask = bboxes[i].mask; fprintf(stdout, "box %d, left=%.2f, top=%.2f, right=%.2f, bottom=%.2f, label=%d, score=%.4f\n", i, box.left, box.top, box.right, box.bottom, bboxes[i].label_id, bboxes[i].score); diff --git a/demo/csrc/ocr.cpp b/demo/csrc/ocr.cpp index e2d5c10fd..90ac1f8dc 100644 --- a/demo/csrc/ocr.cpp +++ b/demo/csrc/ocr.cpp @@ -3,10 +3,10 @@ #include #include -#include "text_detector.h" -#include "text_recognizer.h" +#include "mmdeploy/text_detector.h" +#include "mmdeploy/text_recognizer.h" -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { if (argc != 5) { fprintf(stderr, "usage:\n ocr device_name det_model_path reg_model_path image_path\n"); return 1; @@ -21,36 +21,38 @@ int main(int argc, char *argv[]) { return 1; } - mm_handle_t text_detector{}; + mmdeploy_text_detector_t text_detector{}; int status{}; status = mmdeploy_text_detector_create_by_path(det_model_path, device_name, 0, &text_detector); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create text_detector, code: %d\n", (int)status); return 1; } - mm_handle_t text_recognizer{}; - status = mmdeploy_text_recognizer_create_by_path(reg_model_path, device_name, 0, &text_recognizer); - if (status != MM_SUCCESS) { + mmdeploy_text_recognizer_t text_recognizer{}; + status = + mmdeploy_text_recognizer_create_by_path(reg_model_path, device_name, 0, &text_recognizer); + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create text_recognizer, code: %d\n", (int)status); return 1; } - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; + mmdeploy_mat_t mat{ + img.data, img.rows, img.cols, 3, MMDEPLOY_PIXEL_FORMAT_BGR, MMDEPLOY_DATA_TYPE_UINT8}; - mm_text_detect_t *bboxes{}; - int *bbox_count{}; + mmdeploy_text_detection_t* bboxes{}; + int* bbox_count{}; status = mmdeploy_text_detector_apply(text_detector, &mat, 1, &bboxes, &bbox_count); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to apply text_detector, code: %d\n", (int)status); return 1; } fprintf(stdout, "bbox_count=%d\n", *bbox_count); - mm_text_recognize_t *texts{}; + mmdeploy_text_recognition_t* texts{}; status = mmdeploy_text_recognizer_apply_bbox(text_recognizer, &mat, 1, bboxes, bbox_count, &texts); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to apply text_recognizer, code: %d\n", (int)status); return 1; } @@ -59,7 +61,7 @@ int main(int argc, char *argv[]) { fprintf(stdout, "box[%d]: %s\n", i, texts[i].text); std::vector poly_points; for (int j = 0; j < 4; ++j) { - auto const &pt = bboxes[i].bbox[j]; + auto const& pt = bboxes[i].bbox[j]; fprintf(stdout, "x: %.2f, y: %.2f, ", pt.x, pt.y); poly_points.push_back({(int)pt.x, (int)pt.y}); } diff --git a/demo/csrc/pose_detection.cpp b/demo/csrc/pose_detection.cpp index 253e965a8..11d0ca648 100644 --- a/demo/csrc/pose_detection.cpp +++ b/demo/csrc/pose_detection.cpp @@ -4,7 +4,7 @@ #include #include -#include "pose_detector.h" +#include "mmdeploy/pose_detector.h" int main(int argc, char *argv[]) { if (argc != 4) { @@ -20,19 +20,20 @@ int main(int argc, char *argv[]) { return 1; } - mm_handle_t pose_estimator{}; + mmdeploy_pose_detector_t pose_detector{}; int status{}; - status = mmdeploy_pose_detector_create_by_path(model_path, device_name, 0, &pose_estimator); - if (status != MM_SUCCESS) { + status = mmdeploy_pose_detector_create_by_path(model_path, device_name, 0, &pose_detector); + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create pose_estimator, code: %d\n", (int)status); return 1; } - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; + mmdeploy_mat_t mat{ + img.data, img.rows, img.cols, 3, MMDEPLOY_PIXEL_FORMAT_BGR, MMDEPLOY_DATA_TYPE_UINT8}; - mm_pose_detect_t *res{}; - status = mmdeploy_pose_detector_apply(pose_estimator, &mat, 1, &res); - if (status != MM_SUCCESS) { + mmdeploy_pose_detection_t *res{}; + status = mmdeploy_pose_detector_apply(pose_detector, &mat, 1, &res); + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to apply pose estimator, code: %d\n", (int)status); return 1; } @@ -43,7 +44,7 @@ int main(int argc, char *argv[]) { cv::imwrite("output_pose.png", img); mmdeploy_pose_detector_release_result(res, 1); - mmdeploy_pose_detector_destroy(pose_estimator); + mmdeploy_pose_detector_destroy(pose_detector); return 0; } diff --git a/demo/csrc/pose_detector.cxx b/demo/csrc/pose_detector.cxx new file mode 100644 index 000000000..6b0803d1b --- /dev/null +++ b/demo/csrc/pose_detector.cxx @@ -0,0 +1,33 @@ + +#include "mmdeploy/pose_detector.hpp" + +#include +#include +#include + +int main(int argc, char *argv[]) { + if (argc != 4) { + fprintf(stderr, "usage:\n pose_detection device_name model_path image_path\n"); + return 1; + } + auto device_name = argv[1]; + auto model_path = argv[2]; + auto image_path = argv[3]; + cv::Mat img = cv::imread(image_path); + if (!img.data) { + fprintf(stderr, "failed to load image: %s\n", image_path); + return 1; + } + + using namespace mmdeploy; + + PoseDetector detector{Model(model_path), Device(device_name)}; + auto res = detector.Apply(img); + + for (int i = 0; i < res[0].length; i++) { + cv::circle(img, {(int)res[0].point[i].x, (int)res[0].point[i].y}, 1, {0, 255, 0}, 2); + } + cv::imwrite("output_pose.png", img); + + return 0; +} diff --git a/demo/csrc/restorer.cxx b/demo/csrc/restorer.cxx new file mode 100644 index 000000000..7c3eefd82 --- /dev/null +++ b/demo/csrc/restorer.cxx @@ -0,0 +1,34 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#include "mmdeploy/restorer.hpp" + +#include +#include +#include + +int main(int argc, char* argv[]) { + if (argc != 4) { + fprintf(stderr, "usage:\n image_restorer device_name model_path image_path\n"); + return 1; + } + auto device_name = argv[1]; + auto model_path = argv[2]; + auto image_path = argv[3]; + cv::Mat img = cv::imread(image_path); + if (!img.data) { + fprintf(stderr, "failed to load image: %s\n", image_path); + return 1; + } + + using namespace mmdeploy; + + Restorer restorer{Model{model_path}, Device{device_name}}; + + auto result = restorer.Apply(img); + + cv::Mat sr_img(result->height, result->width, CV_8UC3, result->data); + cv::cvtColor(sr_img, sr_img, cv::COLOR_RGB2BGR); + cv::imwrite("output_restorer.bmp", sr_img); + + return 0; +} diff --git a/demo/csrc/rotated_detector.cxx b/demo/csrc/rotated_detector.cxx new file mode 100644 index 000000000..d590273d3 --- /dev/null +++ b/demo/csrc/rotated_detector.cxx @@ -0,0 +1,51 @@ + +#include "mmdeploy/rotated_detector.hpp" + +#include +#include +#include + +int main(int argc, char* argv[]) { + if (argc != 4) { + fprintf(stderr, "usage:\n oriented_object_detection device_name model_path image_path\n"); + return 1; + } + auto device_name = argv[1]; + auto model_path = argv[2]; + auto image_path = argv[3]; + cv::Mat img = cv::imread(image_path); + if (!img.data) { + fprintf(stderr, "failed to load image: %s\n", image_path); + return 1; + } + + mmdeploy::Model model(model_path); + mmdeploy::RotatedDetector detector(model, mmdeploy::Device{device_name, 0}); + + auto dets = detector.Apply(img); + + for (const auto& det : dets) { + if (det.score < 0.1) { + continue; + } + auto& rbbox = det.rbbox; + float xc = rbbox[0]; + float yc = rbbox[1]; + float w = rbbox[2]; + float h = rbbox[3]; + float ag = rbbox[4]; + float wx = w / 2 * std::cos(ag); + float wy = w / 2 * std::sin(ag); + float hx = -h / 2 * std::sin(ag); + float hy = h / 2 * std::cos(ag); + cv::Point p1 = {int(xc - wx - hx), int(yc - wy - hy)}; + cv::Point p2 = {int(xc + wx - hx), int(yc + wy - hy)}; + cv::Point p3 = {int(xc + wx + hx), int(yc + wy + hy)}; + cv::Point p4 = {int(xc - wx + hx), int(yc - wy + hy)}; + cv::drawContours(img, std::vector>{{p1, p2, p3, p4}}, -1, {0, 255, 0}, + 2); + } + cv::imwrite("output_rotated_detection.png", img); + + return 0; +} diff --git a/demo/csrc/rotated_object_detection.cpp b/demo/csrc/rotated_object_detection.cpp index 6735cf6fa..937c65fdb 100644 --- a/demo/csrc/rotated_object_detection.cpp +++ b/demo/csrc/rotated_object_detection.cpp @@ -4,7 +4,7 @@ #include #include -#include "rotated_detector.h" +#include "mmdeploy/rotated_detector.h" int main(int argc, char *argv[]) { if (argc != 4) { @@ -20,20 +20,21 @@ int main(int argc, char *argv[]) { return 1; } - mm_handle_t detector{}; + mmdeploy_rotated_detector_t detector{}; int status{}; status = mmdeploy_rotated_detector_create_by_path(model_path, device_name, 0, &detector); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to create rotated detector, code: %d\n", (int)status); return 1; } - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; + mmdeploy_mat_t mat{ + img.data, img.rows, img.cols, 3, MMDEPLOY_PIXEL_FORMAT_BGR, MMDEPLOY_DATA_TYPE_UINT8}; - mm_rotated_detect_t *rbboxes{}; + mmdeploy_rotated_detection_t *rbboxes{}; int *res_count{}; status = mmdeploy_rotated_detector_apply(detector, &mat, 1, &rbboxes, &res_count); - if (status != MM_SUCCESS) { + if (status != MMDEPLOY_SUCCESS) { fprintf(stderr, "failed to apply rotated detector, code: %d\n", (int)status); return 1; } diff --git a/demo/csrc/segmentor.cxx b/demo/csrc/segmentor.cxx new file mode 100644 index 000000000..0c1dde49d --- /dev/null +++ b/demo/csrc/segmentor.cxx @@ -0,0 +1,58 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#include "mmdeploy/segmentor.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std; + +vector gen_palette(int num_classes) { + std::mt19937 gen; + std::uniform_int_distribution uniform_dist(0, 255); + + vector palette; + palette.reserve(num_classes); + for (auto i = 0; i < num_classes; ++i) { + palette.emplace_back(uniform_dist(gen), uniform_dist(gen), uniform_dist(gen)); + } + return palette; +} + +int main(int argc, char* argv[]) { + if (argc != 4) { + fprintf(stderr, "usage:\n image_segmentation device_name model_path image_path\n"); + return 1; + } + auto device_name = argv[1]; + auto model_path = argv[2]; + auto image_path = argv[3]; + cv::Mat img = cv::imread(image_path); + if (!img.data) { + fprintf(stderr, "failed to load image: %s\n", image_path); + return 1; + } + + using namespace mmdeploy; + + Segmentor segmentor{Model{model_path}, Device{device_name}}; + + auto result = segmentor.Apply(img); + + auto palette = gen_palette(result->classes + 1); + + cv::Mat color_mask = cv::Mat::zeros(result->height, result->width, CV_8UC3); + int pos = 0; + for (auto iter = color_mask.begin(); iter != color_mask.end(); ++iter) { + *iter = palette[result->mask[pos++]]; + } + + img = img * 0.5 + color_mask * 0.5; + cv::imwrite("output_segmentation.png", img); + + return 0; +} diff --git a/demo/csrc/text_ocr.cxx b/demo/csrc/text_ocr.cxx new file mode 100644 index 000000000..853d68107 --- /dev/null +++ b/demo/csrc/text_ocr.cxx @@ -0,0 +1,46 @@ + +#include +#include +#include + +#include "mmdeploy/text_detector.hpp" +#include "mmdeploy/text_recognizer.hpp" + +int main(int argc, char* argv[]) { + if (argc != 5) { + fprintf(stderr, "usage:\n ocr device_name det_model_path reg_model_path image_path\n"); + return 1; + } + const auto device_name = argv[1]; + auto det_model_path = argv[2]; + auto reg_model_path = argv[3]; + auto image_path = argv[4]; + cv::Mat img = cv::imread(image_path); + if (!img.data) { + fprintf(stderr, "failed to load image: %s\n", image_path); + return 1; + } + + using namespace mmdeploy; + + TextDetector detector{Model(det_model_path), Device(device_name)}; + TextRecognizer recognizer{Model(reg_model_path), Device(device_name)}; + + auto bboxes = detector.Apply(img); + auto texts = recognizer.Apply(img, {bboxes.begin(), bboxes.size()}); + + for (int i = 0; i < bboxes.size(); ++i) { + fprintf(stdout, "box[%d]: %s\n", i, texts[i].text); + std::vector poly_points; + for (const auto& pt : bboxes[i].bbox) { + fprintf(stdout, "x: %.2f, y: %.2f, ", pt.x, pt.y); + poly_points.emplace_back((int)pt.x, (int)pt.y); + } + fprintf(stdout, "\n"); + cv::polylines(img, poly_points, true, cv::Scalar{0, 255, 0}); + } + + cv::imwrite("output_ocr.png", img); + + return 0; +} diff --git a/demo/java/ImageClassification.java b/demo/java/ImageClassification.java new file mode 100644 index 000000000..6016f53b7 --- /dev/null +++ b/demo/java/ImageClassification.java @@ -0,0 +1,48 @@ +import mmdeploy.Classifier; +import mmdeploy.PixelFormat; +import mmdeploy.DataType; +import mmdeploy.Mat; + +import javax.imageio.ImageIO; +import java.awt.image.BufferedImage; +import java.awt.image.DataBufferByte; +import java.io.File; +import java.io.IOException; + +public class ImageClassification { + + public static void main(String[] args) { + // Parse arguments + if (args.length != 3) { + System.out.println("usage:\njava ImageClassification deviceName modelPath imagePath"); + return; + } + String deviceName = args[0]; + String modelPath = args[1]; + String imagePath = args[2]; + + // create classifier + Classifier classifier = null; + + try { + classifier = new Classifier(modelPath, deviceName, 0); + // load image + Mat img = Utils.loadImage(imagePath); + + // apply classifier + Classifier.Result[] result = classifier.apply(img); + + // print results + for (Classifier.Result value : result) { + System.out.printf("label: %d, score: %.4f\n", value.label_id, value.score); + } + } catch (Exception e) { + System.out.println("exception: " + e.getMessage()); + } finally { + // release classifier + if (classifier != null) { + classifier.release(); + } + } + } +} diff --git a/demo/java/ImageRestorer.java b/demo/java/ImageRestorer.java new file mode 100644 index 000000000..957a494b0 --- /dev/null +++ b/demo/java/ImageRestorer.java @@ -0,0 +1,48 @@ +import mmdeploy.Restorer; +import mmdeploy.PixelFormat; +import mmdeploy.DataType; +import mmdeploy.Mat; + +import javax.imageio.ImageIO; +import java.awt.image.BufferedImage; +import java.awt.image.DataBufferByte; +import java.io.File; +import java.io.IOException; + +public class ImageRestorer { + + public static void main(String[] args) { + // Parse arguments + if (args.length != 3) { + System.out.println("usage:\njava ImageRestorer deviceName modelPath imagePath"); + return; + } + String deviceName = args[0]; + String modelPath = args[1]; + String imagePath = args[2]; + + // create restorer + Restorer restorer = null; + + try { + restorer = new Restorer(modelPath, deviceName, 0); + // load image + Mat img = Utils.loadImage(imagePath); + + // apply restorer + Restorer.Result[] result = restorer.apply(img); + + // print results + for (Restorer.Result value : result) { + System.out.printf("Restore image height=%d, width=%d\n", value.res.shape[0], value.res.shape[1]); + } + } catch (Exception e) { + System.out.println("exception: " + e.getMessage()); + } finally { + // release restorer + if (restorer != null) { + restorer.release(); + } + } + } +} diff --git a/demo/java/ImageSegmentation.java b/demo/java/ImageSegmentation.java new file mode 100644 index 000000000..86953fb0e --- /dev/null +++ b/demo/java/ImageSegmentation.java @@ -0,0 +1,48 @@ +import mmdeploy.Segmentor; +import mmdeploy.PixelFormat; +import mmdeploy.DataType; +import mmdeploy.Mat; + +import javax.imageio.ImageIO; +import java.awt.image.BufferedImage; +import java.awt.image.DataBufferByte; +import java.io.File; +import java.io.IOException; + +public class ImageSegmentation { + + public static void main(String[] args) { + // Parse arguments + if (args.length != 3) { + System.out.println("usage:\njava ImageSegmentation deviceName modelPath imagePath"); + return; + } + String deviceName = args[0]; + String modelPath = args[1]; + String imagePath = args[2]; + + // create segmentor + Segmentor segmentor = null; + + try { + segmentor = new Segmentor(modelPath, deviceName, 0); + // load image + Mat img = Utils.loadImage(imagePath); + + // apply segmentor + Segmentor.Result[] result = segmentor.apply(img); + + // print results + for (Segmentor.Result value : result) { + System.out.printf("mask height=%d, width=%d\n", value.height, value.width); + } + } catch (Exception e) { + System.out.println("exception: " + e.getMessage()); + } finally { + // release segmentor + if (segmentor != null) { + segmentor.release(); + } + } + } +} diff --git a/demo/java/ObjectDetection.java b/demo/java/ObjectDetection.java new file mode 100644 index 000000000..01eee41b4 --- /dev/null +++ b/demo/java/ObjectDetection.java @@ -0,0 +1,68 @@ +import mmdeploy.Detector; +import mmdeploy.PixelFormat; +import mmdeploy.DataType; +import mmdeploy.Mat; + +import javax.imageio.ImageIO; + +import java.awt.Color; +import java.awt.image.BufferedImage; +import java.awt.image.DataBufferByte; +import java.awt.Graphics; +import java.io.File; +import java.io.IOException; + +public class ObjectDetection { + + public static void main(String[] args) { + // Parse arguments + if (args.length != 3) { + System.out.println("usage:\njava ObjectDetection deviceName modelPath imagePath"); + return; + } + String deviceName = args[0]; + String modelPath = args[1]; + String imagePath = args[2]; + + // create detector + Detector detector = null; + try { + detector = new Detector(modelPath, deviceName, 0); + // load image + BufferedImage srcImg = ImageIO.read(new File(imagePath)); + Mat img = Utils.bufferedImage2Mat(srcImg); + + // apply detector + Detector.Result[] result = detector.apply(img); + // print results + Graphics ghandle = srcImg.createGraphics(); + for (int i = 0; i < result.length; i++) { + Detector.Result value = result[i]; + System.out.printf("box %d, left=%.2f, top=%.2f, right=%.2f, bottom=%.2f, label=%d, score=%.4f\n", + i, value.bbox.left, value.bbox.top, value.bbox.right, value.bbox.bottom, value.label_id, value.score); + if ((value.bbox.right - value.bbox.left) < 1 || (value.bbox.bottom - value.bbox.top) < 1) { + continue; + } + + // skip detections less than specified score threshold + if (value.score < 0.3) { + continue; + } + if (value.mask != null) { + System.out.printf("mask %d, height=%d, width=%d\n", i, value.mask.shape[0], value.mask.shape[1]); + } + ghandle.setColor(new Color(0, 255, 0)); + ghandle.drawRect((int)value.bbox.left, (int)value.bbox.top, (int)value.bbox.right - (int)value.bbox.left + 1, (int)value.bbox.bottom - (int)value.bbox.top + 1); + } + ghandle.dispose(); + ImageIO.write(srcImg, "png", new File("output_detection.png")); + } catch (Exception e) { + System.out.println("exception: " + e.getMessage()); + } finally { + // release detector + if (detector != null) { + detector.release(); + } + } + } +} diff --git a/demo/java/Ocr.java b/demo/java/Ocr.java new file mode 100644 index 000000000..1edc04d77 --- /dev/null +++ b/demo/java/Ocr.java @@ -0,0 +1,60 @@ +import mmdeploy.TextDetector; +import mmdeploy.TextRecognizer; +import mmdeploy.PixelFormat; +import mmdeploy.DataType; +import mmdeploy.Mat; + +import javax.imageio.ImageIO; +import java.awt.image.BufferedImage; +import java.awt.image.DataBufferByte; +import java.io.File; +import java.io.IOException; + +public class Ocr { + + public static void main(String[] args) { + // Parse arguments + if (args.length != 4) { + System.out.println("usage:\njava TextDetection deviceName detModelPath recModelPath imagePath"); + return; + } + String deviceName = args[0]; + String detModelPath = args[1]; + String recModelPath = args[2]; + String imagePath = args[3]; + + // create text detector and recognizer + TextDetector text_detector = null; + TextRecognizer text_recognizer = null; + + try { + text_detector = new TextDetector(detModelPath, deviceName, 0); + text_recognizer = new TextRecognizer(recModelPath, deviceName, 0); + // load image + Mat img = Utils.loadImage(imagePath); + + // apply text detector + TextDetector.Result[] detResult = text_detector.apply(img); + int [] detResultCount = {detResult.length}; + TextRecognizer.Result[] recResult = text_recognizer.applyBbox(img, detResult, detResultCount); + // print results + for (int i = 0; i < detResultCount[0]; ++i) { + System.out.printf("box[%d]: %s\n", i, new String(recResult[i].text)); + for (int j = 0; j < 4; ++j) { + System.out.printf("x: %.2f, y: %.2f, ", detResult[i].bbox[j].x, detResult[i].bbox[j].y); + } + System.out.printf("\n"); + } + } catch (Exception e) { + System.out.println("exception: " + e.getMessage()); + } finally { + // release text detector and recognizer + if (text_recognizer != null) { + text_recognizer.release(); + } + if (text_detector != null) { + text_detector.release(); + } + } + } +} diff --git a/demo/java/PoseDetection.java b/demo/java/PoseDetection.java new file mode 100644 index 000000000..e2c381ac3 --- /dev/null +++ b/demo/java/PoseDetection.java @@ -0,0 +1,50 @@ +import mmdeploy.PoseDetector; +import mmdeploy.PixelFormat; +import mmdeploy.DataType; +import mmdeploy.Mat; + +import javax.imageio.ImageIO; +import java.awt.image.BufferedImage; +import java.awt.image.DataBufferByte; +import java.io.File; +import java.io.IOException; + +public class PoseDetection { + + public static void main(String[] args) { + // Parse arguments + if (args.length != 3) { + System.out.println("usage:\njava PoseDetection deviceName modelPath imagePath"); + return; + } + String deviceName = args[0]; + String modelPath = args[1]; + String imagePath = args[2]; + + // create pose estimator + PoseDetector pose_estimator = null; + + try { + pose_estimator = new PoseDetector(modelPath, deviceName, 0); + // load image + Mat img = Utils.loadImage(imagePath); + + // apply pose estimator + PoseDetector.Result[] result = pose_estimator.apply(img); + + // print results + for (PoseDetector.Result value : result) { + for (int i = 0; i < value.point.length; i++) { + System.out.printf("point %d, x: %d, y: %d\n", i, (int)value.point[i].x, (int)value.point[i].y); + } + } + } catch (Exception e) { + System.out.println("exception: " + e.getMessage()); + } finally { + // release pose estimator + if (pose_estimator != null) { + pose_estimator.release(); + } + } + } +} diff --git a/demo/java/README.md b/demo/java/README.md new file mode 100644 index 000000000..b1e3dc42b --- /dev/null +++ b/demo/java/README.md @@ -0,0 +1,22 @@ +# Usage + +**step 1.** Compile Utils with Java APIs. + +``` +cd demo/java +javac --class-path ../../csrc/mmdeploy/apis/java/ Utils.java +cd ../.. +``` + +**step 2.** Run the demo in the console. + +Use **ImageClassification** as example. + +First, you should set your model path and image path to `${MODEL_PATH}` and `${IMAGE_PATH}`. And then follow the bash codes. + +```bash +export TASK=ImageClassification +export LD_LIBRARY_PATH=${PWD}/build/lib:${LD_LIBRARY_PATH} +cd demo/java +java -cp ../../csrc/mmdeploy/apis/java:./ ${TASK}.java cpu ${MODEL_PATH} ${IMAGE_PATH} +``` diff --git a/demo/java/Utils.java b/demo/java/Utils.java new file mode 100644 index 000000000..daf0607f1 --- /dev/null +++ b/demo/java/Utils.java @@ -0,0 +1,21 @@ +import mmdeploy.PixelFormat; +import mmdeploy.DataType; +import mmdeploy.Mat; + +import javax.imageio.ImageIO; +import java.awt.image.BufferedImage; +import java.awt.image.DataBufferByte; +import java.io.File; +import java.io.IOException; + +public class Utils { + public static Mat loadImage(String path) throws IOException { + BufferedImage img = ImageIO.read(new File(path)); + return bufferedImage2Mat(img); + } + public static Mat bufferedImage2Mat(BufferedImage img) { + byte[] data = ((DataBufferByte) img.getData().getDataBuffer()).getData(); + return new Mat(img.getHeight(), img.getWidth(), img.getColorModel().getNumComponents(), + PixelFormat.BGR, DataType.INT8, data); + } +} diff --git a/demo/python/image_classification.py b/demo/python/image_classification.py index aae3f744b..9ef5ce103 100644 --- a/demo/python/image_classification.py +++ b/demo/python/image_classification.py @@ -8,11 +8,11 @@ from mmdeploy_python import Classifier def parse_args(): parser = argparse.ArgumentParser( description='show how to use sdk python api') + parser.add_argument('device_name', help='name of device, cuda or cpu') parser.add_argument( - 'model_path', help='the directory path of mmdeploy model') - parser.add_argument('image_path', help='the path of an image') - parser.add_argument( - '--device-name', default='cpu', help='the name of device, cuda or cpu') + 'model_path', + help='path of mmdeploy SDK model dumped by model converter') + parser.add_argument('image_path', help='path of an image') args = parser.parse_args() return args @@ -21,9 +21,10 @@ def main(): args = parse_args() img = cv2.imread(args.image_path) - classifier = Classifier(args.model_path, args.device_name, 0) - result = classifier([img]) - for label_id, score in result[0]: + classifier = Classifier( + model_path=args.model_path, device_name=args.device_name, device_id=0) + result = classifier(img) + for label_id, score in result: print(label_id, score) diff --git a/demo/python/image_restorer.py b/demo/python/image_restorer.py index 8b0274c62..ed10b153f 100644 --- a/demo/python/image_restorer.py +++ b/demo/python/image_restorer.py @@ -8,11 +8,10 @@ from mmdeploy_python import Restorer def parse_args(): parser = argparse.ArgumentParser( description='show how to use sdk python api') + parser.add_argument('device_name', help='name of device, cuda or cpu') parser.add_argument( - 'model_path', help='the directory path of mmdeploy model') - parser.add_argument('image_path', help='the path of an image') - parser.add_argument( - '--device-name', default='cpu', help='the name of device, cuda or cpu') + 'model_path', help='path of SDK model dumped by model converter') + parser.add_argument('image_path', help='path of an image') args = parser.parse_args() return args @@ -22,8 +21,9 @@ def main(): img = cv2.imread(args.image_path) - restorer = Restorer(args.model_path, args.device_name, 0) - result = restorer([img])[0] + restorer = Restorer( + model_path=args.model_path, device_name=args.device_name, device_id=0) + result = restorer(img) # convert to BGR result = result[..., ::-1] diff --git a/demo/python/image_segmentation.py b/demo/python/image_segmentation.py index 3c106a565..32391f434 100644 --- a/demo/python/image_segmentation.py +++ b/demo/python/image_segmentation.py @@ -9,11 +9,11 @@ from mmdeploy_python import Segmentor def parse_args(): parser = argparse.ArgumentParser( description='show how to use sdk python api') + parser.add_argument('device_name', help='name of device, cuda or cpu') parser.add_argument( - 'model_path', help='the directory path of mmdeploy model') - parser.add_argument('image_path', help='the path of an image') - parser.add_argument( - '--device-name', default='cpu', help='the name of device, cuda or cpu') + 'model_path', + help='path of mmdeploy SDK model dumped by model converter') + parser.add_argument('image_path', help='path of an image') args = parser.parse_args() return args @@ -32,8 +32,9 @@ def main(): img = cv2.imread(args.image_path) - segmentor = Segmentor(args.model_path, args.device_name, 0) - seg = segmentor([img])[0] + segmentor = Segmentor( + model_path=args.model_path, device_name=args.device_name, device_id=0) + seg = segmentor(img) palette = get_palette() color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) diff --git a/demo/python/object_detection.py b/demo/python/object_detection.py index 8a9df839e..a584d4dd4 100644 --- a/demo/python/object_detection.py +++ b/demo/python/object_detection.py @@ -2,18 +2,17 @@ import argparse import cv2 -import numpy as np from mmdeploy_python import Detector def parse_args(): parser = argparse.ArgumentParser( description='show how to use sdk python api') + parser.add_argument('device_name', help='name of device, cuda or cpu') parser.add_argument( - 'model_path', help='the directory path of mmdeploy model') - parser.add_argument('image_path', help='the path of an image') - parser.add_argument( - '--device-name', default='cpu', help='the name of device, cuda or cpu') + 'model_path', + help='path of mmdeploy SDK model dumped by model converter') + parser.add_argument('image_path', help='path of an image') args = parser.parse_args() return args @@ -22,11 +21,9 @@ def main(): args = parse_args() img = cv2.imread(args.image_path) - detector = Detector(args.model_path, args.device_name, 0) - bboxes, labels, masks = detector([img])[0] - assert (isinstance(bboxes, np.ndarray)) - assert (isinstance(labels, np.ndarray)) - assert (isinstance(masks, list)) + detector = Detector( + model_path=args.model_path, device_name=args.device_name, device_id=0) + bboxes, labels, masks = detector(img) indices = [i for i in range(len(bboxes))] for index, bbox, label_id in zip(indices, bboxes, labels): diff --git a/demo/python/ocr.py b/demo/python/ocr.py index b6d2dda08..6f02b5b04 100644 --- a/demo/python/ocr.py +++ b/demo/python/ocr.py @@ -2,23 +2,26 @@ import argparse import cv2 -from mmdeploy_python import TextDetector +from mmdeploy_python import TextDetector, TextRecognizer def parse_args(): parser = argparse.ArgumentParser( description='show how to use sdk python api') + parser.add_argument('device_name', help='name of device, cuda or cpu') + parser.add_argument('image_path', help='path of an image') parser.add_argument( '--textdet', default='', - help='the directory path of mmdeploy text-detector sdk model') + help='path of mmdeploy text-detector SDK model dumped by' + 'model converter', + ) parser.add_argument( '--textrecog', default='', - help='the directory path of mmdeploy text-recognizer sdk model') - parser.add_argument('image_path', help='the path of an image') - parser.add_argument( - '--device-name', default='cpu', help='the name of device, cuda or cpu') + help='path of mmdeploy text-recognizer SDK model dumped by' + 'model converter', + ) args = parser.parse_args() return args @@ -29,15 +32,37 @@ def main(): img = cv2.imread(args.image_path) if args.textdet: - detector = TextDetector(args.textdet, args.device_name, 0) - bboxes = detector([img])[0] + detector = TextDetector( + model_path=args.textdet, device_name=args.device_name, device_id=0) + bboxes = detector(img) + print(f'bboxes.shape={bboxes.shape}') + print(f'bboxes={bboxes}') + if len(bboxes) > 0: + pts = ((bboxes[:, 0:8] + 0.5).reshape(len(bboxes), -1, + 2).astype(int)) + cv2.polylines(img, pts, True, (0, 255, 0), 2) + cv2.imwrite('output_ocr.png', img) - pts = (bboxes[:, 0:8] + 0.5).reshape(len(bboxes), -1, 2).astype(int) - cv2.polylines(img, pts, True, (0, 255, 0), 2) - cv2.imwrite('output_ocr.png', img) + if len(bboxes) > 0 and args.textrecog: + recognizer = TextRecognizer( + model_path=args.textrecog, + device_name=args.device_name, + device_id=0, + ) + texts = recognizer(img, bboxes.flatten().tolist()) + print(texts) - if args.textrecog: - print('API of TextRecognizer does not support bbox as argument yet') + elif args.textrecog: + recognizer = TextRecognizer( + model_path=args.textrecog, + device_name=args.device_name, + device_id=0, + ) + texts = recognizer(img) + print(texts) + else: + print('do nothing since neither text detection sdk model or ' + 'text recognition sdk model in input') if __name__ == '__main__': diff --git a/demo/python/pose_detection.py b/demo/python/pose_detection.py index d5656b5af..2eebd12bb 100644 --- a/demo/python/pose_detection.py +++ b/demo/python/pose_detection.py @@ -9,16 +9,17 @@ from mmdeploy_python import PoseDetector def parse_args(): parser = argparse.ArgumentParser( description='show how to use sdk python api') + parser.add_argument('device_name', help='name of device, cuda or cpu') parser.add_argument( - 'model_path', help='the directory path of mmdeploy model') - parser.add_argument('image_path', help='the path of an image') + 'model_path', + help='path of mmdeploy SDK model dumped by model converter') + parser.add_argument('image_path', help='path of an image') parser.add_argument( '--bbox', default=None, nargs='+', + type=int, help='bounding box of an object in format (x, y, w, h)') - parser.add_argument( - '--device-name', default='cpu', help='the name of device, cuda or cpu') args = parser.parse_args() return args @@ -28,17 +29,18 @@ def main(): img = cv2.imread(args.image_path) - bboxes = [] + detector = PoseDetector( + model_path=args.model_path, device_name=args.device_name, device_id=0) + if args.bbox is None: - bbox = [0, 0, img.shape[1], img.shape[0]] + result = detector(img) else: - # x, y, w, h -> left, top, right, bottom + # converter (x, y, w, h) -> (left, top, right, bottom) + print(args.bbox) bbox = np.array(args.bbox, dtype=int) bbox[2:] += bbox[:2] - bboxes.append(bbox) - - detector = PoseDetector(args.model_path, args.device_name, 0) - result = detector([img], [bboxes])[0] + result = detector(img, bbox) + print(result) _, point_num, _ = result.shape points = result[:, :, :2].reshape(point_num, 2) diff --git a/demo/python/rotated_object_detection.py b/demo/python/rotated_object_detection.py index 3ac288cb4..4f02d5d1f 100644 --- a/demo/python/rotated_object_detection.py +++ b/demo/python/rotated_object_detection.py @@ -10,11 +10,10 @@ from mmdeploy_python import RotatedDetector def parse_args(): parser = argparse.ArgumentParser( description='show how to use sdk python api') + parser.add_argument('device_name', help='name of device, cuda or cpu') parser.add_argument( - 'model_path', help='the directory path of mmdeploy model') - parser.add_argument('image_path', help='the path of an image') - parser.add_argument( - '--device-name', default='cpu', help='the name of device, cuda or cpu') + 'model_path', help='path of SDK model dumped by model converter') + parser.add_argument('image_path', help='path of an image') args = parser.parse_args() return args @@ -23,9 +22,10 @@ def main(): args = parse_args() img = cv2.imread(args.image_path) - detector = RotatedDetector(args.model_path, args.device_name, 0) - rbboxes, labels = detector([img])[0] - # print(rbboxes, labels) + detector = RotatedDetector( + model_path=args.model_path, device_name=args.device_name, device_id=0) + rbboxes, labels = detector(img) + indices = [i for i in range(len(rbboxes))] for index, rbbox, label_id in zip(indices, rbboxes, labels): [cx, cy, w, h, angle], score = rbbox[0:5], rbbox[-1] diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile index 8ebaab9e4..f076f26e9 100644 --- a/docker/CPU/Dockerfile +++ b/docker/CPU/Dockerfile @@ -1,9 +1,9 @@ FROM openvino/ubuntu18_dev:2021.4.2 ARG PYTHON_VERSION=3.7 -ARG TORCH_VERSION=1.8.0 -ARG TORCHVISION_VERSION=0.9.0 +ARG TORCH_VERSION=1.10.0 +ARG TORCHVISION_VERSION=0.11.0 ARG ONNXRUNTIME_VERSION=1.8.1 -ARG MMCV_VERSION=1.4.0 +ARG MMCV_VERSION=1.5.3 USER root ### change the system source for installing libs @@ -93,6 +93,7 @@ RUN git clone https://github.com/open-mmlab/mmdeploy.git &&\ ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:/opt/intel/openvino/deployment_tools/ngraph/lib:/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64:${LD_LIBRARY_PATH}" RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ -DCMAKE_CXX_COMPILER=g++-7 \ -DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \ -Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn \ @@ -102,9 +103,5 @@ RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \ -DMMDEPLOY_TARGET_BACKENDS="ort;ncnn;openvino" \ -DMMDEPLOY_CODEBASES=all &&\ cmake --build . -- -j$(nproc) && cmake --install . &&\ - cd install/example && mkdir -p build && cd build &&\ - cmake .. -DMMDeploy_DIR=/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy \ - -DInferenceEngine_DIR=/opt/intel/openvino/deployment_tools/inference_engine/share \ - -Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn &&\ - cmake --build . && export SPDLOG_LEVEL=warn &&\ + export SPDLOG_LEVEL=warn &&\ if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for CPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for CPU devices successfully!" ; fi diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile index 9d08e144c..fa9319289 100644 --- a/docker/GPU/Dockerfile +++ b/docker/GPU/Dockerfile @@ -1,12 +1,12 @@ -FROM nvcr.io/nvidia/tensorrt:21.04-py3 +FROM nvcr.io/nvidia/tensorrt:22.04-py3 -ARG CUDA=10.2 +ARG CUDA=11.3 ARG PYTHON_VERSION=3.8 -ARG TORCH_VERSION=1.8.0 -ARG TORCHVISION_VERSION=0.9.0 +ARG TORCH_VERSION=1.10.0 +ARG TORCHVISION_VERSION=0.11.0 ARG ONNXRUNTIME_VERSION=1.8.1 -ARG MMCV_VERSION=1.4.0 -ARG PPLCV_VERSION=0.6.2 +ARG MMCV_VERSION=1.5.3 +ARG PPLCV_VERSION=0.7.0 ENV FORCE_CUDA="1" ENV DEBIAN_FRONTEND=noninteractive @@ -72,13 +72,14 @@ RUN git clone https://github.com/openppl-public/ppl.cv.git &&\ ./build.sh cuda ENV BACKUP_LD_LIBRARY_PATH=$LD_LIBRARY_PATH -ENV LD_LIBRARY_PATH=/usr/local/cuda-11.3/compat/lib.real/:$LD_LIBRARY_PATH +ENV LD_LIBRARY_PATH=/usr/local/cuda/compat/lib.real/:$LD_LIBRARY_PATH RUN cd /root/workspace/mmdeploy &&\ rm -rf build/CM* build/cmake-install.cmake build/Makefile build/csrc &&\ mkdir -p build && cd build &&\ cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ -DCMAKE_CXX_COMPILER=g++ \ -Dpplcv_DIR=/root/workspace/ppl.cv/cuda-build/install/lib/cmake/ppl \ -DTENSORRT_DIR=${TENSORRT_DIR} \ @@ -88,9 +89,7 @@ RUN cd /root/workspace/mmdeploy &&\ -DMMDEPLOY_TARGET_BACKENDS="ort;trt" \ -DMMDEPLOY_CODEBASES=all &&\ make -j$(nproc) && make install &&\ - cd install/example && mkdir -p build && cd build &&\ - cmake -DMMDeploy_DIR=/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy .. &&\ - make -j$(nproc) && export SPDLOG_LEVEL=warn &&\ + export SPDLOG_LEVEL=warn &&\ if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for GPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for GPU devices successfully!" ; fi ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${BACKUP_LD_LIBRARY_PATH}" diff --git a/docs/en/01-how-to-build/android.md b/docs/en/01-how-to-build/android.md index c35938ede..0285f9cf2 100644 --- a/docs/en/01-how-to-build/android.md +++ b/docs/en/01-how-to-build/android.md @@ -7,8 +7,7 @@ - [Install Dependencies for SDK](#install-dependencies-for-sdk) - [Build MMDeploy](#build-mmdeploy) - [Build Options Spec](#build-options-spec) - - [Build SDK](#build-sdk) - - [Build Demo](#build-demo) + - [Build SDK and Demos](#build-sdk-and-demos) ______________________________________________________________________ @@ -40,12 +39,12 @@ This doc is only for how to build SDK using android toolchain on linux. - ANDROID NDK 19+ - **Make sure android ndk version >= 19.0**. If not, you can follow instructions below to install android ndk r23b. For more versions of android ndk, please refer to [android ndk website](https://developer.android.com/ndk/downloads). + **Make sure android ndk version >= 19.0**. If not, you can follow instructions below to install android ndk r23c. For more versions of android ndk, please refer to [android ndk website](https://developer.android.com/ndk/downloads). ```bash - wget https://dl.google.com/android/repository/android-ndk-r23b-linux.zip - unzip android-ndk-r23b-linux.zip - cd android-ndk-r23b + wget https://dl.google.com/android/repository/android-ndk-r23c-linux.zip + unzip android-ndk-r23c-linux.zip + cd android-ndk-r23c export NDK_PATH=${PWD} ``` @@ -67,7 +66,7 @@ You can skip this chapter if only interested in model converter. OpenCV
(>=3.0)

-export OPENCV_VERSION=4.5.4
+export OPENCV_VERSION=4.6.0
 wget https://github.com/opencv/opencv/releases/download/${OPENCV_VERSION}/opencv-${OPENCV_VERSION}-android-sdk.zip
 unzip opencv-${OPENCV_VERSION}-android-sdk.zip
 export OPENCV_ANDROID_SDK_DIR=${PWD}/OpenCV-android-sdk
@@ -78,18 +77,29 @@ export OPENCV_ANDROID_SDK_DIR=${PWD}/OpenCV-android-sdk
   
     ncnn 
     A high-performance neural network inference computing framework supporting for android.
- Now, MMDeploy supports v20220216 and has to use git clone to download it.
+ Now, MMDeploy supports 20220721 and has to use git clone to download it. For supported android ABI, see here .

-git clone -b 20220216 https://github.com/Tencent/ncnn.git
+git clone -b 20220721 https://github.com/Tencent/ncnn.git
 cd ncnn
 git submodule update --init
 export NCNN_DIR=${PWD}
-mkdir -p build
-cd build
-cmake -DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-30 -DNCNN_VULKAN=ON -DNCNN_DISABLE_EXCEPTION=OFF -DNCNN_DISABLE_RTTI=OFF ..
-make install
+
+export ANDROID_ABI=arm64-v8a
+
+mkdir -p build\_${ANDROID_ABI}
+cd build\_${ANDROID_ABI}
+
+cmake -DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake -DANDROID_ABI="${ANDROID_ABI}" -DANDROID_PLATFORM=android-30 -DNCNN_VULKAN=ON -DNCNN_DISABLE_EXCEPTION=OFF -DNCNN_DISABLE_RTTI=OFF ..
+make -j$(nproc) install
 
- + + + + + OpenJDK + It is necessary for building Java API.
+ See Java API build for building tutorials. + @@ -120,6 +130,12 @@ make install OFF switch to build MMDeploy SDK python package + + MMDEPLOY_BUILD_SDK_JAVA_API + {ON, OFF} + OFF + switch to build MMDeploy SDK Java API + MMDEPLOY_BUILD_TEST {ON, OFF} @@ -139,14 +155,14 @@ make install Enabling inference engine.
By default, no target inference engine is set, since it highly depends on the use case.
Only ncnn backend is supported for android platform now.
After specifying the inference engine, it's package path has to be passed to cmake as follows,
1. ncnn: ncnn. ncnn_DIR is needed. -
-Dncnn_DIR=${NCNN_DIR}/build/install/lib/cmake/ncnn
+
-Dncnn_DIR=${NCNN_DIR}/build_${ANDROID_ABI}/install/lib/cmake/ncnn
MMDEPLOY_CODEBASES {"mmcls", "mmdet", "mmseg", "mmedit", "mmocr", "all"} N/A - Enable codebase's postprocess modules. It MUST be set by a semicolon separated list of codebase names. The currently supported codebases are 'mmcls', 'mmdet', 'mmedit', 'mmseg', 'mmocr'. Instead of listing them one by one, you can also pass all to enable them all, i.e., -DMMDEPLOY_CODEBASES=all + Enable codebase's postprocess modules. It MUST be set by a semicolon separated list of codebase names. The currently supported codebases are 'mmcls', 'mmdet', 'mmedit', 'mmseg', 'mmocr'. Instead of listing them one by one, you can also pass all to enable them all, i.e., -DMMDEPLOY_CODEBASES=allPlease manually edit csrc/mmdeploy/apis/java/native/CMakeLists.txt to avoid compilation errors. MMDEPLOY_SHARED_LIBS @@ -157,40 +173,28 @@ make install -#### Build SDK +#### Build SDK and Demos MMDeploy provides a recipe as shown below for building SDK with ncnn as inference engine for android. - cpu + ncnn ```Bash + export ANDROID_ABI=arm64-v8a cd ${MMDEPLOY_DIR} - mkdir -p build && cd build + mkdir -p build_${ANDROID_ABI} && cd build_${ANDROID_ABI} cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ - -DOpenCV_DIR=${OPENCV_ANDROID_SDK_DIR}/sdk/native/jni/abi-arm64-v8a \ - -Dncnn_DIR=${NCNN_DIR}/build/install/lib/cmake/ncnn \ + -DMMDEPLOY_BUILD_SDK_JAVA_API=ON \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ + -DOpenCV_DIR=${OPENCV_ANDROID_SDK_DIR}/sdk/native/jni/abi-${ANDROID_ABI} \ + -Dncnn_DIR=${NCNN_DIR}/build_${ANDROID_ABI}/install/lib/cmake/ncnn \ -DMMDEPLOY_TARGET_BACKENDS=ncnn \ -DMMDEPLOY_CODEBASES=all \ - -DMMDEPLOY_SHARED_LIBS=OFF \ + -DMMDEPLOY_SHARED_LIBS=ON \ -DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake \ - -DANDROID_ABI=arm64-v8a \ + -DANDROID_ABI=${ANDROID_ABI} \ -DANDROID_PLATFORM=android-30 \ -DANDROID_CPP_FEATURES="rtti exceptions" make -j$(nproc) && make install ``` - -#### Build Demo - -```Bash -cd ${MMDEPLOY_DIR}/build/install/example -mkdir -p build && cd build -cmake .. \ - -DOpenCV_DIR=${OPENCV_ANDROID_SDK_DIR}/sdk/native/jni/abi-arm64-v8a \ - -Dncnn_DIR=${NCNN_DIR}/build/install/lib/cmake/ncnn \ - -DMMDeploy_DIR=${MMDEPLOY_DIR}/build/install/lib/cmake/MMDeploy \ - -DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake \ - -DANDROID_ABI=arm64-v8a \ - -DANDROID_PLATFORM=android-30 -make -j$(nproc) -``` diff --git a/docs/en/01-how-to-build/build_from_docker.md b/docs/en/01-how-to-build/build_from_docker.md index 0157c92e0..b2f759bfa 100644 --- a/docs/en/01-how-to-build/build_from_docker.md +++ b/docs/en/01-how-to-build/build_from_docker.md @@ -1,8 +1,8 @@ -## Use Docker Image +# Use Docker Image We provide two dockerfiles for CPU and GPU respectively. For CPU users, we install MMDeploy with ONNXRuntime, ncnn and OpenVINO backends. For GPU users, we install MMDeploy with TensorRT backend. Besides, users can install mmdeploy with different versions when building the docker image. -### Build docker image +## Build docker image For CPU users, we can build the docker image with the latest MMDeploy through: @@ -37,15 +37,15 @@ cd mmdeploy docker build docker/CPU/ -t mmdeploy:inside --build-arg USE_SRC_INSIDE=true ``` -### Run docker container +## Run docker container After building the docker image succeed, we can use `docker run` to launch the docker service. GPU docker image for example: ``` -docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu +docker run --gpus all -it mmdeploy:master-gpu ``` -### FAQs +## FAQs 1. CUDA error: the provided PTX was compiled with an unsupported toolchain: diff --git a/docs/en/01-how-to-build/build_from_source.md b/docs/en/01-how-to-build/build_from_source.md index d20b8a456..2aa6ecc85 100644 --- a/docs/en/01-how-to-build/build_from_source.md +++ b/docs/en/01-how-to-build/build_from_source.md @@ -37,3 +37,4 @@ Please visit the following links to find out how to build MMDeploy according to - [Windows](windows.md) - [Android-aarch64](android.md) - [NVIDIA Jetson](jetsons.md) +- [SNPE](snpe.md) diff --git a/docs/en/01-how-to-build/jetsons.md b/docs/en/01-how-to-build/jetsons.md index a780bfd6a..2accf7534 100644 --- a/docs/en/01-how-to-build/jetsons.md +++ b/docs/en/01-how-to-build/jetsons.md @@ -235,7 +235,7 @@ You can find a full list of custom plugins from [here](../ops/tensorrt.md). # build TensorRT custom operators mkdir -p build && cd build cmake .. -DMMDEPLOY_TARGET_BACKENDS="trt" -make -j$(nproc) +make -j$(nproc) && make install # install model converter cd ${MMDEPLOY_DIR} @@ -251,13 +251,14 @@ It takes about 5 minutes to install model converter on a Jetson Nano. So, please ### Install C/C++ Inference SDK -1. Build SDK Libraries +Build SDK Libraries and its demo as below: ```shell mkdir -p build && cd build cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ -DMMDEPLOY_TARGET_BACKENDS="trt" \ -DMMDEPLOY_CODEBASES=all \ @@ -269,15 +270,6 @@ make -j$(nproc) && make install It takes about 9 minutes to build SDK libraries on a Jetson Nano. So, please be patient until the installation is complete. ``` -2. Build SDK demos - -```shell -cd ${MMDEPLOY_DIR}/build/install/example -mkdir -p build && cd build -cmake .. -DMMDeploy_DIR=${MMDEPLOY_DIR}/build/install/lib/cmake/MMDeploy -make -j$(nproc) -``` - ### Run a Demo #### Object Detection demo diff --git a/docs/en/01-how-to-build/linux-x86_64.md b/docs/en/01-how-to-build/linux-x86_64.md index 101cbc99e..126a19242 100644 --- a/docs/en/01-how-to-build/linux-x86_64.md +++ b/docs/en/01-how-to-build/linux-x86_64.md @@ -11,8 +11,7 @@ - [Build Model Converter](#build-model-converter) - [Build Custom Ops](#build-custom-ops) - [Install Model Converter](#install-model-converter) - - [Build SDK](#build-sdk) - - [Build Demo](#build-demo) + - [Build SDK and Demo](#build-sdk-and-demo) ______________________________________________________________________ @@ -110,13 +109,12 @@ sudo apt-get install libopencv-dev pplcv A high-performance image processing library of openPPL.
- It is optional which only be needed if cuda platform is required. - Now, MMDeploy supports v0.6.2 and has to use git clone to download it.
+ It is optional which only be needed if cuda platform is required.

 git clone https://github.com/openppl-public/ppl.cv.git
 cd ppl.cv
 export PPLCV_DIR=$(pwd)
-git checkout tags/v0.6.2 -b v0.6.2
+git checkout tags/v0.7.0 -b v0.7.0
 ./build.sh cuda
 
@@ -249,7 +247,7 @@ If you want to make the above environment variables permanent, you could add the ```bash echo '# set env for onnxruntime' >> ~/.bashrc echo "export ONNXRUNTIME_DIR=${ONNXRUNTIME_DIR}" >> ~/.bashrc -echo 'export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH' >> ~/.bashrc +echo "export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH" >> ~/.bashrc source ~/.bashrc ``` @@ -284,6 +282,11 @@ export MMDEPLOY_DIR=$(pwd) OFF switch to build MMDeploy SDK python package + + MMDEPLOY_BUILD_SDK_JAVA_API + {ON, OFF} + switch to build MMDeploy SDK Java API + MMDEPLOY_BUILD_TEST {ON, OFF} @@ -348,7 +351,7 @@ If one of inference engines among ONNXRuntime, TensorRT, ncnn and libtorch is se cd ${MMDEPLOY_DIR} mkdir -p build && cd build cmake -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_TARGET_BACKENDS=ort -DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} .. - make -j$(nproc) + make -j$(nproc) && make install ``` - **TensorRT** Custom Ops @@ -357,7 +360,7 @@ If one of inference engines among ONNXRuntime, TensorRT, ncnn and libtorch is se cd ${MMDEPLOY_DIR} mkdir -p build && cd build cmake -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_TARGET_BACKENDS=trt -DTENSORRT_DIR=${TENSORRT_DIR} -DCUDNN_DIR=${CUDNN_DIR} .. - make -j$(nproc) + make -j$(nproc) && make install ``` - **ncnn** Custom Ops @@ -366,7 +369,7 @@ If one of inference engines among ONNXRuntime, TensorRT, ncnn and libtorch is se cd ${MMDEPLOY_DIR} mkdir -p build && cd build cmake -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_TARGET_BACKENDS=ncnn -Dncnn_DIR=${NCNN_DIR}/build/install/lib/cmake/ncnn .. - make -j$(nproc) + make -j$(nproc) && make install ``` - **TorchScript** Custom Ops @@ -375,7 +378,7 @@ If one of inference engines among ONNXRuntime, TensorRT, ncnn and libtorch is se cd ${MMDEPLOY_DIR} mkdir -p build && cd build cmake -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_TARGET_BACKENDS=torchscript -DTorch_DIR=${Torch_DIR} .. - make -j$(nproc) + make -j$(nproc) && make install ``` #### Install Model Converter @@ -391,7 +394,7 @@ pip install -e . To use optional dependencies, install them manually with `pip install -r requirements/optional.txt` or specify desired extras when calling `pip` (e.g. `pip install -e .[optional]`). Valid keys for the extras field are: `all`, `tests`, `build`, `optional`. -### Build SDK +### Build SDK and Demo MMDeploy provides two recipes as shown below for building SDK with ONNXRuntime and TensorRT as inference engines respectively. You can also activate other engines after the model. @@ -405,6 +408,7 @@ You can also activate other engines after the model. -DCMAKE_CXX_COMPILER=g++-7 \ -DMMDEPLOY_BUILD_SDK=ON \ -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ -DMMDEPLOY_TARGET_DEVICES=cpu \ -DMMDEPLOY_TARGET_BACKENDS=ort \ -DMMDEPLOY_CODEBASES=all \ @@ -422,6 +426,7 @@ You can also activate other engines after the model. -DCMAKE_CXX_COMPILER=g++-7 \ -DMMDEPLOY_BUILD_SDK=ON \ -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ -DMMDEPLOY_TARGET_BACKENDS=trt \ -DMMDEPLOY_CODEBASES=all \ @@ -431,12 +436,3 @@ You can also activate other engines after the model. make -j$(nproc) && make install ``` - -### Build Demo - -```Bash -cd ${MMDEPLOY_DIR}/build/install/example -mkdir -p build && cd build -cmake .. -DMMDeploy_DIR=${MMDEPLOY_DIR}/build/install/lib/cmake/MMDeploy -make -j$(nproc) -``` diff --git a/docs/en/01-how-to-build/snpe.md b/docs/en/01-how-to-build/snpe.md new file mode 100644 index 000000000..81aa5ca07 --- /dev/null +++ b/docs/en/01-how-to-build/snpe.md @@ -0,0 +1,194 @@ +# Build for SNPE + +It is quite simple to support snpe backend: Client/Server mode. + +this mode + +1. Can split `model convert` and `inference` environments; + +- Inference irrelevant matters are done on host +- We can get the real running results of gpu/npu instead of cpu simulation values + +2. Can cover cost-sensitive device, armv7/risc-v/mips chips meet product requirements, but often have limited support for Python; + +3. Can simplify mmdeploy installation steps. If you only want to convert snpe model and test, you don't need to compile the .whl package. + +## 1. Run inference server + +Download the prebuilt snpe inference server package, `adb push` it to the phone and execute. +Note that **the phone must have a qcom chip**. + +```bash +$ wget https://media.githubusercontent.com/media/tpoisonooo/mmdeploy_snpe_testdata/main/snpe-inference-server-1.59.tar.gz +... +$ sudo apt install adb +$ adb push snpe-inference-server-1.59.tar.gz /data/local/tmp/ + +# decompress and execute +$ adb shell +venus:/ $ cd /data/local/tmp +130|venus:/data/local/tmp $ tar xvf snpe-inference-server-1.59.tar.gz +... +130|venus:/data/local/tmp $ source export1.59.sh +130|venus:/data/local/tmp $ ./inference_server +... + Server listening on [::]:60000 +``` + +At this point the inference service should print all the ipv6 and ipv4 addresses of the device and listen on the port. + +tips: + +- If `adb devices` cannot find the device, may be: + - Some cheap cables can only charge and cannot transmit data + - or the "developer mode" of the phone is not turned on +- If you need to compile the binary by self, please refer to [NDK Cross Compiling snpe Inference Service](../appendix/cross_build_snpe_service.md) +- If a `segmentation fault` occurs when listening on a port, it may be because: + - The port number is already occupied, use another port + +## 2. Build mmdeploy + +### 1) Environment + +| Matters | Version | Remarks | +| ------- | ------------------ | ---------------------- | +| host OS | ubuntu18.04 x86_64 | snpe specified version | +| Python | **3.6.0** | snpe specified version | + +### 2) Installation + +Download [snpe-1.59 from the official website](https://developer.qualcomm.com/qfile/69652/snpe-1.59.0.zip) + +```bash +$ unzip snpe-1.59.0.zip +$ export SNPE_ROOT=${PWD}/snpe-1.59.0.3230 +$ cd /path/to/mmdeploy +$ export PYTHONPATH=${PWD}/service/snpe/client:${SNPE_ROOT}/lib/python:${PYTHONPATH} +$ export LD_LIBRARY_PATH=${SNPE_ROOT}/lib/x86_64-linux-clang:${LD_LIBRARY_PATH} +$ export PATH=${SNPE_ROOT}/bin/x86_64-linux-clang:${PATH} +$ python3 -m pip install -e . +``` + +## 3. Test the model + +Take Resnet-18 as an example. First refer to [documentation to install mmcls](https://github.com/open-mmlab/mmclassification) and use `tools/deploy.py` to convert the model. + +```bash +$ export MODEL_CONFIG=/path/to/mmclassification/configs/resnet/resnet18_8xb16_cifar10.py +$ export MODEL_PATH=https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth + +# Convert the model +$ cd /path/to/mmdeploy +$ python3 tools/deploy.py configs/mmcls/classification_snpe_static.py $MODEL_CONFIG $MODEL_PATH /path/to/test.png --work-dir resnet18 --device cpu --uri 10.0.0.1\:60000 --dump-info + +# Test +$ python3 tools/test.py configs/mmcls/classification_snpe_static.py $MODEL_CONFIG --model reset18/end2end.dlc --metrics accuracy precision f1_score recall --uri 10.0.0.1\:60000 +``` + +Note that `--uri` is required to specify the ip and port of the snpe inference service, ipv4 and ipv6 addresses can be used. + +## 4. Build SDK with Android SDK + +If you also need to compile mmdeploy SDK with Android NDK, please continue reading. + +### 1) Download NDK and OpenCV package and setup environment + +```bash +# Download android OCV +$ export OPENCV_VERSION=4.5.4 +$ wget https://github.com/opencv/opencv/releases/download/${OPENCV_VERSION}/opencv-${OPENCV_VERSION}-android-sdk.zip +$ unzip opencv-${OPENCV_VERSION}-android-sdk.zip + +$ export ANDROID_OCV_ROOT=`realpath opencv-${OPENCV_VERSION}-android-sdk` + +# Download ndk r23b +$ wget https://dl.google.com/android/repository/android-ndk-r23b-linux.zip +$ unzip android-ndk-r23b-linux.zip + +$ export ANDROID_NDK_ROOT=`realpath android-ndk-r23b` +``` + +### 2) Compile mmdeploy SDK + +```bash +$ cd /path/to/mmdeploy +$ mkdir build && cd build +$ cmake .. \ + -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_CODEBASES=all \ + -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \ + -DMMDEPLOY_CODEBASES=all -DMMDEPLOY_TARGET_BACKENDS=snpe \ + -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-30 \ + -DANDROID_STL=c++_static \ + -DOpenCV_DIR=${ANDROID_OCV_ROOT}/sdk/native/jni/abi-arm64-v8a \ + -DMMDEPLOY_SHARED_LIBS=ON + + $ make && make install +``` + +| Options | Description | +| ----------------------------- | ------------------------------------------------------------ | +| DMMDEPLOY_CODEBASES=all | Compile all algorithms' post-process | +| CMAKE_TOOLCHAIN_FILE | Load NDK parameters, mainly used to select compiler | +| MMDEPLOY_TARGET_BACKENDS=snpe | Inference backend | +| ANDROID_STL=c++\_static | In case of NDK environment can not find suitable c++ library | +| MMDEPLOY_SHARED_LIBS=ON | snpe does not provide static library | + +### 3) Compile demo + +```bash +$ cd /path/to/install/example +$ mkdir build && cd build + +$ cmake .. \ + -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_CODEBASES=all \ + -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \ + -DMMDEPLOY_CODEBASES=all -DMMDEPLOY_TARGET_BACKENDS=snpe \ + -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-30 \ + -DANDROID_STL=c++_static \ + -DOpenCV_DIR=${ANDROID_OCV_ROOT}/sdk/native/jni/abi-arm64-v8a \ + -DMMDEPLOY_SHARED_LIBS=ON \ + -DMMDeploy_DIR=${PWD}/../../lib/cmake/MMDeploy + +$ make +$ tree -L 1 +... +├── image_restorer +├── image_segmentation +├── object_detection +├── ocr +├── pose_detection +└── rotated_object_detection +``` + +Just `adb push` the binary file and .so to the device and execute. + +### 4) Run the demo + +First make sure that`--dump-info`is used during convert model, so that the `resnet18` directory has the files required by the SDK such as `pipeline.json`. + +`adb push` the model directory, executable file and .so to the device. + +```bash +$ cd /path/to/mmdeploy +$ adb push resnet18 /data/local/tmp +$ adb push tests/data/tiger.jpeg /data/local/tmp/resnet18/ + +$ cd /path/to/install/ +$ adb push lib /data/local/tmp + +$ cd /path/to/install/example/build +$ adb push image_classification /data/local/tmp/resnet18/ +``` + +Set up environment variable and execute the sample. + +```bash +$ adb push /path/to/mmcls/demo/demo.JPEG /data/local/tmp +$ adb shell +venus:/ $ cd /data/local/tmp/resnet18 +venus:/data/local/tmp/resnet18 $ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/data/local/tmp/lib + +venus:/data/local/tmp/resnet18 $ ./image_classification cpu ./ tiger.jpeg +.. +label: 3, score: 0.3214 +``` diff --git a/docs/en/01-how-to-build/windows.md b/docs/en/01-how-to-build/windows.md index e41cc5a6d..5154fc328 100644 --- a/docs/en/01-how-to-build/windows.md +++ b/docs/en/01-how-to-build/windows.md @@ -12,14 +12,11 @@ - [Build Model Converter](#build-model-converter) - [Build Custom Ops](#build-custom-ops) - [Install Model Converter](#install-model-converter) - - [Build SDK](#build-sdk) - - [Build Demo](#build-demo) + - [Build SDK and Demos](#build-sdk-and-demos) - [Note](#note) ______________________________________________________________________ -Currently, MMDeploy only provides build-from-source method for windows platform. Prebuilt package will be released in the future. - ## Build From Source All the commands listed in the following chapters are verified on **Windows 10**. @@ -97,16 +94,15 @@ You can skip this chapter if you are only interested in the model converter. pplcv A high-performance image processing library of openPPL.
- It is optional which only be needed if cuda platform is required. - Now, MMDeploy supports v0.6.2 and has to use git clone to download it.
+ It is optional which only be needed if cuda platform is required.

 git clone https://github.com/openppl-public/ppl.cv.git
 cd ppl.cv
-git checkout tags/v0.6.2 -b v0.6.2
+git checkout tags/v0.7.0 -b v0.7.0
 $env:PPLCV_DIR = "$pwd"
 mkdir pplcv-build
 cd pplcv-build
-cmake .. -G "Visual Studio 16 2019" -T v142 -A x64 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=install -DHPCC_USE_CUDA=ON -DHPCC_MSVC_MD=ON
+cmake .. -G "Visual Studio 16 2019" -T v142 -A x64 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=install -DHPCC_USE_CUDA=ON -DPPLCV_USE_MSVC_STATIC_RUNTIME=OFF
 cmake --build . --config Release -- /m
 cmake --install . --config Release
 cd ../..
@@ -286,6 +282,7 @@ mkdir build -ErrorAction SilentlyContinue
 cd build
 cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DMMDEPLOY_TARGET_BACKENDS="ort" -DONNXRUNTIME_DIR="$env:ONNXRUNTIME_DIR"
 cmake --build . --config Release -- /m
+cmake --install . --config Release
 ```
 
 - **TensorRT** Custom Ops
@@ -295,6 +292,7 @@ mkdir build -ErrorAction SilentlyContinue
 cd build
 cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DMMDEPLOY_TARGET_BACKENDS="trt" -DTENSORRT_DIR="$env:TENSORRT_DIR" -DCUDNN_DIR="$env:CUDNN_DIR"
 cmake --build . --config Release -- /m
+cmake --install . --config Release
 ```
 
 - **ncnn** Custom Ops
@@ -314,7 +312,7 @@ pip install -e .
   To use optional dependencies, install them manually with `pip install -r requirements/optional.txt` or specify desired extras when calling `pip` (e.g. `pip install -e .[optional]`).
   Valid keys for the extras field are: `all`, `tests`, `build`, `optional`.
 
-#### Build SDK
+#### Build SDK and Demos
 
 MMDeploy provides two recipes as shown below for building SDK with ONNXRuntime and TensorRT as inference engines respectively.
 You can also activate other engines after the model.
@@ -327,6 +325,8 @@ You can also activate other engines after the model.
   cd build
   cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 `
       -DMMDEPLOY_BUILD_SDK=ON `
+      -DMMDEPLOY_BUILD_EXAMPLES=ON `
+      -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON `
       -DMMDEPLOY_TARGET_DEVICES="cpu" `
       -DMMDEPLOY_TARGET_BACKENDS="ort" `
       -DMMDEPLOY_CODEBASES="all" `
@@ -344,6 +344,8 @@ You can also activate other engines after the model.
   cd build
   cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 `
     -DMMDEPLOY_BUILD_SDK=ON `
+    -DMMDEPLOY_BUILD_EXAMPLES=ON `
+    -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON `
     -DMMDEPLOY_TARGET_DEVICES="cuda" `
     -DMMDEPLOY_TARGET_BACKENDS="trt" `
     -DMMDEPLOY_CODEBASES="all" `
@@ -355,20 +357,6 @@ You can also activate other engines after the model.
   cmake --install . --config Release
   ```
 
-#### Build Demo
-
-```PowerShell
-cd $env:MMDEPLOY_DIR\build\install\example
-mkdir build -ErrorAction SilentlyContinue
-cd build
-cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 `
-  -DMMDeploy_DIR="$env:MMDEPLOY_DIR/build/install/lib/cmake/MMDeploy"
-
-cmake --build . --config Release -- /m
-
-$env:path = "$env:MMDEPLOY_DIR/build/install/bin;" + $env:path
-```
-
 ### Note
 
 1. Release / Debug libraries can not be mixed. If MMDeploy is built with Release mode, all its dependent thirdparty libraries have to be built in Release mode too and vice versa.
diff --git a/docs/en/02-how-to-run/how_to_evaluate_a_model.md b/docs/en/02-how-to-run/how_to_evaluate_a_model.md
index afe91c58f..b6cea2429 100644
--- a/docs/en/02-how-to-run/how_to_evaluate_a_model.md
+++ b/docs/en/02-how-to-run/how_to_evaluate_a_model.md
@@ -24,6 +24,7 @@ ${MODEL_CFG} \
 [--cfg-options ${CFG_OPTIONS}] \
 [--metric-options ${METRIC_OPTIONS}]
 [--log2file work_dirs/output.txt]
+[--batch-size ${BATCH_SIZE}]
 ```
 
 ## Description of all arguments
@@ -42,6 +43,7 @@ ${MODEL_CFG} \
 - `--metric-options`: Custom options for evaluation. The key-value pair in xxx=yyy
   format will be kwargs for dataset.evaluate() function.
 - `--log2file`: log evaluation results (and speed) to file.
+- `--batch-size`: the batch size for inference, which would override `samples_per_gpu` in data config. Default is `1`. Note that not all models support `batch_size>1`.
 
 \* Other arguments in `tools/test.py` are used for speed test. They have no concern with evaluation.
 
diff --git a/docs/en/02-how-to-run/prebuilt_package_windows.md b/docs/en/02-how-to-run/prebuilt_package_windows.md
new file mode 100644
index 000000000..30ad795a1
--- /dev/null
+++ b/docs/en/02-how-to-run/prebuilt_package_windows.md
@@ -0,0 +1,384 @@
+# How to use prebuilt package on Windows10
+
+- [How to use prebuilt package on Windows10](#how-to-use-prebuilt-package-on-windows10)
+  - [Prerequisite](#prerequisite)
+    - [ONNX Runtime](#onnx-runtime)
+    - [TensorRT](#tensorrt)
+  - [Model Convert](#model-convert)
+    - [ONNX Runtime Example](#onnx-runtime-example)
+    - [TensorRT Example](#tensorrt-example)
+  - [Model Inference](#model-inference)
+    - [Backend Inference](#backend-inference)
+      - [ONNXRuntime](#onnxruntime)
+      - [TensorRT](#tensorrt-1)
+    - [Python SDK](#python-sdk)
+      - [ONNXRuntime](#onnxruntime-1)
+      - [TensorRT](#tensorrt-2)
+    - [C SDK](#c-sdk)
+      - [ONNXRuntime](#onnxruntime-2)
+      - [TensorRT](#tensorrt-3)
+  - [Troubleshooting](#troubleshooting)
+
+______________________________________________________________________
+
+This tutorial takes `mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1.zip` and `mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip` as examples to show how to use the prebuilt packages.
+
+The directory structure of the prebuilt package is as follows, where the `dist` folder is about model converter, and the `sdk` folder is related to model inference.
+
+```
+.
+|-- dist
+`-- sdk
+    |-- bin
+    |-- example
+    |-- include
+    |-- lib
+    `-- python
+```
+
+## Prerequisite
+
+In order to use the prebuilt package, you need to install some third-party dependent libraries.
+
+1. Follow the [get_started](../get_started.md) documentation to create a virtual python environment and install pytorch, torchvision and mmcv-full. To use the C interface of the SDK, you need to install [vs2019+](https://visualstudio.microsoft.com/), [OpenCV](https://github.com/opencv/opencv/releases).
+
+   :point_right: It is recommended to use `pip` instead of `conda` to install pytorch and torchvision
+
+2. Clone the mmdeploy repository
+
+   ```bash
+   git clone https://github.com/open-mmlab/mmdeploy.git
+   ```
+
+   :point_right: The main purpose here is to use the configs, so there is no need to compile `mmdeploy`.
+
+3. Install mmclassification
+
+   ```bash
+   git clone https://github.com/open-mmlab/mmclassification.git
+   cd mmclassification
+   pip install -e .
+   ```
+
+4. Prepare a PyTorch model as our example
+
+   Download the pth [resnet18_8xb32_in1k_20210831-fbbb1da6.pth](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth). The corresponding config of the model is [resnet18_8xb32_in1k.py](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_8xb32_in1k.py)
+
+After the above work is done, the structure of the current working directory should be:
+
+```
+.
+|-- mmclassification
+|-- mmdeploy
+|-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+```
+
+### ONNX Runtime
+
+In order to use `ONNX Runtime` backend, you should also do the following steps.
+
+5. Install `mmdeploy` (Model Converter) and `mmdeploy_python` (SDK Python API).
+
+   ```bash
+   # download mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1.zip
+   pip install .\mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\dist\mmdeploy-0.7.0-py38-none-win_amd64.whl
+   pip install .\mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\sdk\python\mmdeploy_python-0.7.0-cp38-none-win_amd64.whl
+   ```
+
+   :point_right: If you have installed it before, please uninstall it first.
+
+6. Install onnxruntime package
+
+   ```
+   pip install onnxruntime==1.8.1
+   ```
+
+7. Download [`onnxruntime`](https://github.com/microsoft/onnxruntime/releases/tag/v1.8.1), and add environment variable.
+
+   As shown in the figure, add the lib directory of onnxruntime to the `PATH`.
+
+   ![sys-path](https://user-images.githubusercontent.com/16019484/181463801-1d7814a8-b256-46e9-86f2-c08de0bc150b.png)
+   :exclamation: Restart powershell to make the environment variables setting take effect. You can check whether the settings are in effect by `echo $env:PATH`.
+
+### TensorRT
+
+In order to use `TensorRT` backend, you should also do the following steps.
+
+5. Install `mmdeploy` (Model Converter) and `mmdeploy_python` (SDK Python API).
+
+   ```bash
+   # download mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip
+   pip install .\mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\dist\mmdeploy-0.7.0-py38-none-win_amd64.whl
+   pip install .\mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\python\mmdeploy_python-0.7.0-cp38-none-win_amd64.whl
+   ```
+
+   :point_right: If you have installed it before, please uninstall it first.
+
+6. Install TensorRT related package and set environment variables
+
+   - CUDA Toolkit 11.1
+   - TensorRT 8.2.3.0
+   - cuDNN 8.2.1.0
+
+   Add the runtime libraries of TensorRT and cuDNN to the `PATH`. You can refer to the path setting of onnxruntime. Don't forget to install python package of TensorRT.
+
+   :exclamation: Restart powershell to make the environment variables setting take effect. You can check whether the settings are in effect by echo `$env:PATH`.
+
+   :exclamation: It is recommended to add only one version of the TensorRT/cuDNN runtime libraries to the `PATH`. It is better not to copy the runtime libraries of TensorRT/cuDNN to the cuda directory in `C:\`.
+
+7. Install pycuda by `pip install pycuda`
+
+## Model Convert
+
+### ONNX Runtime Example
+
+The following describes how to use the prebuilt package to do model conversion based on the previous downloaded pth.
+
+After preparation work, the structure of the current working directory should be:
+
+```
+..
+|-- mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1
+|-- mmclassification
+|-- mmdeploy
+`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+```
+
+Model conversion can be performed like below:
+
+```python
+from mmdeploy.apis import torch2onnx
+from mmdeploy.backend.sdk.export_info import export2SDK
+
+img = 'mmclassification/demo/demo.JPEG'
+work_dir = 'work_dir/onnx/resnet'
+save_file = 'end2end.onnx'
+deploy_cfg = 'mmdeploy/configs/mmcls/classification_onnxruntime_dynamic.py'
+model_cfg = 'mmclassification/configs/resnet/resnet18_8xb32_in1k.py'
+model_checkpoint = 'resnet18_8xb32_in1k_20210831-fbbb1da6.pth'
+device = 'cpu'
+
+# 1. convert model to onnx
+torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg,
+  model_checkpoint, device)
+
+# 2. extract pipeline info for sdk use (dump-info)
+export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint)
+```
+
+The structure of the converted model directory:
+
+```bash
+.\work_dir\
+`-- onnx
+    `-- resnet
+        |-- deploy.json
+        |-- detail.json
+        |-- end2end.onnx
+        `-- pipeline.json
+```
+
+### TensorRT Example
+
+The following describes how to use the prebuilt package to do model conversion based on the previous downloaded pth.
+
+After installation of mmdeploy-tensorrt prebuilt package, the structure of the current working directory should be:
+
+```
+..
+|-- mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0
+|-- mmclassification
+|-- mmdeploy
+`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+```
+
+Model conversion can be performed like below:
+
+```python
+from mmdeploy.apis import torch2onnx
+from mmdeploy.apis.tensorrt import onnx2tensorrt
+from mmdeploy.backend.sdk.export_info import export2SDK
+import os
+
+img = 'mmclassification/demo/demo.JPEG'
+work_dir = 'work_dir/trt/resnet'
+save_file = 'end2end.onnx'
+deploy_cfg = 'mmdeploy/configs/mmcls/classification_tensorrt_static-224x224.py'
+model_cfg = 'mmclassification/configs/resnet/resnet18_8xb32_in1k.py'
+model_checkpoint = 'resnet18_8xb32_in1k_20210831-fbbb1da6.pth'
+device = 'cpu'
+
+# 1. convert model to IR(onnx)
+torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg,
+  model_checkpoint, device)
+
+# 2. convert IR to tensorrt
+onnx_model = os.path.join(work_dir, save_file)
+save_file = 'end2end.engine'
+model_id = 0
+device = 'cuda'
+onnx2tensorrt(work_dir, save_file, model_id, deploy_cfg, onnx_model, device)
+
+# 3. extract pipeline info for sdk use (dump-info)
+export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint)
+```
+
+The structure of the converted model directory:
+
+```
+.\work_dir\
+`-- trt
+    `-- resnet
+        |-- deploy.json
+        |-- detail.json
+        |-- end2end.engine
+        |-- end2end.onnx
+        `-- pipeline.json
+```
+
+## Model Inference
+
+You can obtain two model folders after model conversion.
+
+```
+.\work_dir\onnx\resnet
+.\work_dir\trt\resnet
+```
+
+The structure of current working directory:
+
+```
+.
+|-- mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0
+|-- mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1
+|-- mmclassification
+|-- mmdeploy
+|-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+`-- work_dir
+```
+
+### Backend Inference
+
+:exclamation: It should be emphasized that `inference_model` is not for deployment, but shields the difference of backend inference api(`TensorRT`, `ONNX Runtime` etc.). The main purpose of this api is to check whether the converted model can be inferred normally.
+
+#### ONNXRuntime
+
+```python
+from mmdeploy.apis import inference_model
+
+model_cfg = 'mmclassification/configs/resnet/resnet18_8xb32_in1k.py'
+deploy_cfg = 'mmdeploy/configs/mmcls/classification_onnxruntime_dynamic.py'
+backend_files = ['work_dir/onnx/resnet/end2end.onnx']
+img = 'mmclassification/demo/demo.JPEG'
+device = 'cpu'
+result = inference_model(model_cfg, deploy_cfg, backend_files, img, device)
+```
+
+#### TensorRT
+
+```python
+from mmdeploy.apis import inference_model
+
+model_cfg = 'mmclassification/configs/resnet/resnet18_8xb32_in1k.py'
+deploy_cfg = 'mmdeploy/configs/mmcls/classification_tensorrt_static-224x224.py'
+backend_files = ['work_dir/trt/resnet/end2end.engine']
+img = 'mmclassification/demo/demo.JPEG'
+device = 'cuda'
+result = inference_model(model_cfg, deploy_cfg, backend_files, img, device)
+```
+
+### Python SDK
+
+The following describes how to use the SDK's Python API for inference
+
+#### ONNXRuntime
+
+```bash
+python .\mmdeploy\demo\python\image_classification.py cpu .\work_dir\onnx\resnet\ .\mmclassification\demo\demo.JPEG
+```
+
+#### TensorRT
+
+```
+ python .\mmdeploy\demo\python\image_classification.py cuda .\work_dir\trt\resnet\ .\mmclassification\demo\demo.JPEG
+```
+
+### C SDK
+
+The following describes how to use the SDK's C API for inference
+
+#### ONNXRuntime
+
+1. Build examples
+
+   Under `mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\sdk\example` directory
+
+   ```
+   // Path should be modified according to the actual location
+   mkdir build
+   cd build
+   cmake .. -A x64 -T v142 `
+     -DOpenCV_DIR=C:\Deps\opencv\build\x64\vc15\lib `
+     -DMMDeploy_DIR=C:\workspace\mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\sdk\lib\cmake\MMDeploy `
+     -DONNXRUNTIME_DIR=C:\Deps\onnxruntime\onnxruntime-win-gpu-x64-1.8.1
+
+   cmake --build . --config Release
+   ```
+
+2. Add environment variables or copy the runtime libraries to the same level directory of exe
+
+   :point_right: The purpose is to make the exe find the relevant dll
+
+   If choose to add environment variables, add the runtime libraries path of `mmdeploy` (`mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\sdk\bin`) to the `PATH`.
+
+   If choose to copy the dynamic libraries, copy the dll in the bin directory to the same level directory of the just compiled exe (build/Release).
+
+3. Inference:
+
+   It is recommended to use `CMD` here.
+
+   Under `mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\\sdk\\example\\build\\Release` directory:
+
+   ```
+   .\image_classification.exe cpu C:\workspace\work_dir\onnx\resnet\ C:\workspace\mmclassification\demo\demo.JPEG
+   ```
+
+#### TensorRT
+
+1. Build examples
+
+   Under `mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example` directory
+
+   ```
+   // Path should be modified according to the actual location
+   mkdir build
+   cd build
+   cmake .. -A x64 -T v142 `
+     -DOpenCV_DIR=C:\Deps\opencv\build\x64\vc15\lib `
+     -DMMDeploy_DIR=C:\workspace\mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8 2.3.0\sdk\lib\cmake\MMDeploy `
+     -DTENSORRT_DIR=C:\Deps\tensorrt\TensorRT-8.2.3.0 `
+     -DCUDNN_DIR=C:\Deps\cudnn\8.2.1
+   cmake --build . --config Release
+   ```
+
+2. Add environment variables or copy the runtime libraries to the same level directory of exe
+
+   :point_right: The purpose is to make the exe find the relevant dll
+
+   If choose to add environment variables, add the runtime libraries path of `mmdeploy` (`mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\bin`) to the `PATH`.
+
+   If choose to copy the dynamic libraries, copy the dll in the bin directory to the same level directory of the just compiled exe (build/Release).
+
+3. Inference
+
+   It is recommended to use `CMD` here.
+
+   Under `mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example\\build\\Release` directory
+
+   ```
+   .\image_classification.exe cuda C:\workspace\work_dir\trt\resnet C:\workspace\mmclassification\demo\demo.JPEG
+   ```
+
+## Troubleshooting
+
+If you encounter problems, please refer to [FAQ](../faq.md)
diff --git a/docs/en/02-how-to-run/write_config.md b/docs/en/02-how-to-run/write_config.md
index 8eef707d2..92297d4de 100644
--- a/docs/en/02-how-to-run/write_config.md
+++ b/docs/en/02-how-to-run/write_config.md
@@ -1,4 +1,4 @@
-## How to write config
+# How to write config
 
 This tutorial describes how to write a config for model conversion and deployment. A deployment config includes `onnx config`, `codebase config`, `backend config`.
 
@@ -24,11 +24,11 @@ This tutorial describes how to write a config for model conversion and deploymen
 
 
 
-### 1. How to write onnx config
+## 1. How to write onnx config
 
 Onnx config to describe how to export a model from pytorch to onnx.
 
-#### Description of onnx config arguments
+### Description of onnx config arguments
 
 - `type`: Type of config dict. Default is `onnx`.
 - `export_params`: If specified, all parameters will be exported. Set this to False if you want to export an untrained model.
@@ -39,7 +39,7 @@ Onnx config to describe how to export a model from pytorch to onnx.
 - `output_names`: Names to assign to the output nodes of the graph.
 - `input_shape`: The height and width of input tensor to the model.
 
-##### Example
+### Example
 
 ```python
 onnx_config = dict(
@@ -53,13 +53,13 @@ onnx_config = dict(
     input_shape=None)
 ```
 
-#### If you need to use dynamic axes
+### If you need to use dynamic axes
 
 If the dynamic shape of inputs and outputs is required, you need to add dynamic_axes dict in onnx config.
 
 - `dynamic_axes`: Describe the dimensional information about input and output.
 
-##### Example
+#### Example
 
 ```python
     dynamic_axes={
@@ -79,28 +79,28 @@ If the dynamic shape of inputs and outputs is required, you need to add dynamic_
     }
 ```
 
-### 2. How to write codebase config
+## 2. How to write codebase config
 
 Codebase config part contains information like codebase type and task type.
 
-#### Description of codebase config arguments
+### Description of codebase config arguments
 
 - `type`: Model's codebase, including `mmcls`, `mmdet`, `mmseg`, `mmocr`, `mmedit`.
 - `task`: Model's task type, referring to [List of tasks in all codebases](#list-of-tasks-in-all-codebases).
 
-##### Example
+#### Example
 
 ```python
 codebase_config = dict(type='mmcls', task='Classification')
 ```
 
-### 3. How to write backend config
+## 3. How to write backend config
 
 The backend config is mainly used to specify the backend on which model runs and provide the information needed when the model runs on the backend , referring to [ONNX Runtime](../05-supported-backends/onnxruntime.md), [TensorRT](../05-supported-backends/tensorrt.md), [ncnn](../05-supported-backends/ncnn.md), [PPLNN](../05-supported-backends/pplnn.md).
 
 - `type`: Model's backend, including `onnxruntime`, `ncnn`, `pplnn`, `tensorrt`, `openvino`.
 
-#### Example
+### Example
 
 ```python
 backend_config = dict(
@@ -117,7 +117,7 @@ backend_config = dict(
     ])
 ```
 
-### 4. A complete example of mmcls on TensorRT
+## 4. A complete example of mmcls on TensorRT
 
 Here we provide a complete deployment config from mmcls on TensorRT.
 
@@ -159,7 +159,7 @@ onnx_config = dict(
     input_shape=[224, 224])
 ```
 
-### 5. The name rules of our deployment config
+## 5. The name rules of our deployment config
 
 There is a specific naming convention for the filename of deployment config files.
 
@@ -171,20 +171,12 @@ There is a specific naming convention for the filename of deployment config file
 - `backend name`: Backend's name. Note if you use the quantization function, you need to indicate the quantization type. Just like `tensorrt-int8`.
 - `dynamic or static`: Dynamic or static export. Note if the backend needs explicit shape information, you need to add a description of input size with `height x width` format. Just like `dynamic-512x1024-2048x2048`, it means that the min input shape is `512x1024` and the max input shape is `2048x2048`.
 
-#### Example
+### Example
 
 ```bash
 detection_tensorrt-int8_dynamic-320x320-1344x1344.py
 ```
 
-### 6. How to write model config
+## 6. How to write model config
 
 According to model's codebase, write the model config file. Model's config file is used to initialize the model, referring to [MMClassification](https://github.com/open-mmlab/mmclassification/blob/master/docs/tutorials/config.md), [MMDetection](https://github.com/open-mmlab/mmdetection/blob/master/docs_zh-CN/tutorials/config.md), [MMSegmentation](https://github.com/open-mmlab/mmsegmentation/blob/master/docs_zh-CN/tutorials/config.md), [MMOCR](https://github.com/open-mmlab/mmocr/tree/main/configs), [MMEditing](https://github.com/open-mmlab/mmediting/blob/master/docs_zh-CN/config.md).
-
-### 7. Reminder
-
-None
-
-### 8. FAQs
-
-None
diff --git a/docs/en/03-benchmark/benchmark.md b/docs/en/03-benchmark/benchmark.md
index 14580c8c1..8b44c32ee 100644
--- a/docs/en/03-benchmark/benchmark.md
+++ b/docs/en/03-benchmark/benchmark.md
@@ -559,6 +559,27 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
     89.85
     90.41
   
+  
+    Vision Transformer
+    top-1
+    85.43
+    85.43
+    -
+    85.43
+    85.42
+    -
+    -
+  
+  
+    top-5
+    97.77
+    97.77
+    -
+    97.77
+    97.76
+    -
+    -
+  
 
 
 
@@ -754,6 +775,29 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../ - - + + Swin-Transformer + Instance Segmentation + COCO2017 + box AP + 42.7 + - + 42.7 + 42.5 + 37.7 + - + - + + + mask AP + 39.3 + - + 39.3 + 39.3 + 35.4 + - + - + @@ -1441,6 +1485,18 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../ - - + + Segmenter + ADE20K + mIoU + 44.32 + 44.29 + 44.29 + 44.29 + 43.34 + 43.35 + - + @@ -1580,8 +1636,32 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../ mAP 0.756 0.756 + 0.758 + 0.730 - - + + + GlidingVertex + Rotated Detection + DOTA-v1.0 + mAP + 0.732 + - + 0.733 + 0.731 + - + - + + + RoI Transformer + Rotated Detection + DOTA-v1.0 + mAP + 0.761 + - + 0.758 + - - - diff --git a/docs/en/03-benchmark/benchmark_edge.md b/docs/en/03-benchmark/benchmark_edge.md new file mode 100644 index 000000000..5f9ec0782 --- /dev/null +++ b/docs/en/03-benchmark/benchmark_edge.md @@ -0,0 +1,57 @@ +# Test on embedded device + +Here are the test conclusions of our edge devices. You can directly obtain the results of your own environment with [model profiling](../02-how-to-run/how_to_evaluate_a_model.md). + +## Software and hardware environment + +- host OS ubuntu 18.04 +- backend SNPE-1.59 +- device Mi11 (qcom 888) + +## mmcls + +| model | dataset | spatial | fp32 top-1 (%) | snpe gpu hybrid fp32 top-1 (%) | latency (ms) | +| :------------------------------------------------------------------------------------------------------------------------------: | :---------: | :-----: | :------------: | :----------------------------: | :----------: | +| [ShuffleNetV2](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | ImageNet-1k | 224x224 | 69.55 | 69.83\* | 20±7 | +| [MobilenetV2](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py) | ImageNet-1k | 224x224 | 71.86 | 72.14\* | 15±6 | + +tips: + +1. The ImageNet-1k dataset is too large to test, only part of the dataset is used (8000/50000) +2. The heating of device will downgrade the frequency, so the time consumption will actually fluctuate. Here are the stable values after running for a period of time. This result is closer to the actual demand. + +## mmocr detection + +| model | dataset | spatial | fp32 hmean | snpe gpu hybrid hmean | latency(ms) | +| :---------------------------------------------------------------------------------------------------------------: | :-------: | :------: | :--------: | :-------------------: | :---------: | +| [PANet](https://github.com/open-mmlab/mmocr/blob/main/configs/textdet/panet/panet_r18_fpem_ffm_600e_icdar2015.py) | ICDAR2015 | 1312x736 | 0.795 | 0.785 @thr=0.9 | 3100±100 | + +## mmpose + +| model | dataset | spatial | snpe hybrid AR@IoU=0.50 | snpe hybrid AP@IoU=0.50 | latency(ms) | +| :---------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------: | :-----: | :---------------------: | :---------------------: | :---------: | +| [pose_hrnet_w32](https://github.com/open-mmlab/mmpose/blob/master/configs/animal/2d_kpt_sview_rgb_img/topdown_heatmap/animalpose/hrnet_w32_animalpose_256x256.py) | Animalpose | 256x256 | 0.997 | 0.989 | 630±50 | + +tips: + +- Test `pose_hrnet` using AnimalPose's test dataset instead of val dataset. + +## mmseg + +| model | dataset | spatial | mIoU | latency(ms) | +| :---------------------------------------------------------------------------------------------------------------: | :--------: | :------: | :---: | :---------: | +| [fcn](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py) | Cityscapes | 512x1024 | 71.11 | 4915±500 | + +tips: + +- `fcn` works fine with 512x1024 size. Cityscapes dataset uses 1024x2048 resolution which causes device to reboot. + +## Notes + +- We needs to manually split the mmdet model into two parts. Because + - In snpe source code, `onnx_to_ir.py` can only parse onnx input while `ir_to_dlc.py` does not support `topk` operator + - UDO (User Defined Operator) does not work with `snpe-onnx-to-dlc` +- mmedit model + - `srcnn` requires cubic resize which snpe does not support + - `esrgan` converts fine, but loading the model causes the device to reboot +- mmrotate depends on [e2cnn](https://pypi.org/project/e2cnn/) and needs to be installed manually [its Python3.6 compatible branch](https://github.com/QUVA-Lab/e2cnn) diff --git a/docs/en/03-benchmark/supported_models.md b/docs/en/03-benchmark/supported_models.md index 93972d5f7..bace6e2a4 100644 --- a/docs/en/03-benchmark/supported_models.md +++ b/docs/en/03-benchmark/supported_models.md @@ -2,75 +2,79 @@ The table below lists the models that are guaranteed to be exportable to other backends. -| Model | Codebase | TorchScript | OnnxRuntime | TensorRT | ncnn | PPLNN | OpenVINO | Model config | -| :------------------------- | :--------------- | :---------: | :---------: | :------: | :--: | :---: | :------: | :---------------------------------------------------------------------------------------------: | -| RetinaNet | MMDetection | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet) | -| Faster R-CNN | MMDetection | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) | -| YOLOv3 | MMDetection | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo) | -| YOLOX | MMDetection | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox) | -| FCOS | MMDetection | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos) | -| FSAF | MMDetection | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf) | -| Mask R-CNN | MMDetection | Y | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) | -| SSD[\*](#note) | MMDetection | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd) | -| FoveaBox | MMDetection | Y | Y | N | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox) | -| ATSS | MMDetection | N | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss) | -| GFL | MMDetection | N | Y | Y | N | ? | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl) | -| Cascade R-CNN | MMDetection | N | Y | Y | N | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn) | -| Cascade Mask R-CNN | MMDetection | N | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn) | -| VFNet | MMDetection | N | N | N | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/vfnet) | -| ResNet | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet) | -| ResNeXt | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext) | -| SE-ResNet | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet) | -| MobileNetV2 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2) | -| ShuffleNetV1 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1) | -| ShuffleNetV2 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2) | -| VisionTransformer | MMClassification | Y | Y | ? | Y | ? | ? | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) | -| FCN | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fcn) | -| PSPNet[\*static](#note) | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/pspnet) | -| DeepLabV3 | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3) | -| DeepLabV3+ | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3plus) | -| Fast-SCNN[\*static](#note) | MMSegmentation | Y | Y | Y | N | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fastscnn) | -| UNet | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/unet) | -| ANN[\*](#note) | MMSegmentation | ? | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ann) | -| APCNet | MMSegmentation | ? | Y | Y | Y | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/apcnet) | -| BiSeNetV1 | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/bisenetv1) | -| BiSeNetV2 | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/bisenetv2) | -| CGNet | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/cgnet) | -| DMNet | MMSegmentation | ? | Y | N | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/dmnet) | -| DNLNet | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/dnlnet) | -| EMANet | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/emanet) | -| EncNet | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/encnet) | -| ERFNet | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/erfnet) | -| FastFCN | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fastfcn) | -| GCNet | MMSegmentation | ? | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/gcnet) | -| ICNet[\*](#note) | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/icnet) | -| ISANet | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/isanet) | -| NonLocal Net | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/nonlocal_net) | -| OCRNet | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ocrnet) | -| PointRend | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/point_rend) | -| Semantic FPN | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/sem_fpn) | -| STDC | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/stdc) | -| UPerNet[\*](#note) | MMSegmentation | ? | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/upernet) | -| DANet | MMSegmentation | ? | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/danet) | -| SRCNN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srcnn) | -| ESRGAN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/esrgan) | -| SRGAN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srresnet_srgan) | -| SRResNet | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srresnet_srgan) | -| Real-ESRGAN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/real_esrgan) | -| EDSR | MMEditing | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/edsr) | -| RDN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/rdn) | -| DBNet | MMOCR | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textdet/dbnet) | -| PANet | MMOCR | Y | Y | Y | Y | ? | Y | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textdet/panet) | -| DBNet | MMOCR | Y | Y | Y | Y | ? | Y | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textdet/psenet) | -| CRNN | MMOCR | Y | Y | Y | Y | Y | N | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/crnn) | -| SAR | MMOCR | N | Y | N | N | N | N | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/sar) | -| SATRN | MMOCR | Y | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/satrn) | -| HRNet | MMPose | N | Y | Y | Y | N | Y | [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#hrnet-cvpr-2019) | -| MSPN | MMPose | N | Y | Y | Y | N | Y | [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#mspn-arxiv-2019) | -| LiteHRNet | MMPose | N | Y | Y | N | N | Y | [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#litehrnet-cvpr-2021) | -| PointPillars | MMDetection3d | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars) | -| CenterPoint (pillar) | MMDetection3d | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/centerpoint) | -| RotatedRetinaNet | RotatedDetection | N | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/rotated_retinanet/README.md) | +| Model | Codebase | TorchScript | OnnxRuntime | TensorRT | ncnn | PPLNN | OpenVINO | Model config | +| :-------------------------- | :--------------- | :---------: | :---------: | :------: | :--: | :---: | :------: | :---------------------------------------------------------------------------------------------: | +| RetinaNet | MMDetection | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet) | +| Faster R-CNN | MMDetection | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) | +| YOLOv3 | MMDetection | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo) | +| YOLOX | MMDetection | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox) | +| FCOS | MMDetection | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos) | +| FSAF | MMDetection | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf) | +| Mask R-CNN | MMDetection | Y | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) | +| SSD[\*](#note) | MMDetection | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd) | +| FoveaBox | MMDetection | Y | Y | N | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox) | +| ATSS | MMDetection | N | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss) | +| GFL | MMDetection | N | Y | Y | N | ? | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl) | +| Cascade R-CNN | MMDetection | N | Y | Y | N | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn) | +| Cascade Mask R-CNN | MMDetection | N | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn) | +| Swin Transformer[\*](#note) | MMDetection | N | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/swin) | +| VFNet | MMDetection | N | N | N | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/vfnet) | +| ResNet | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet) | +| ResNeXt | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext) | +| SE-ResNet | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet) | +| MobileNetV2 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2) | +| ShuffleNetV1 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1) | +| ShuffleNetV2 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2) | +| VisionTransformer | MMClassification | Y | Y | Y | Y | ? | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) | +| FCN | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fcn) | +| PSPNet[\*static](#note) | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/pspnet) | +| DeepLabV3 | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3) | +| DeepLabV3+ | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3plus) | +| Fast-SCNN[\*static](#note) | MMSegmentation | Y | Y | Y | N | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fastscnn) | +| UNet | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/unet) | +| ANN[\*](#note) | MMSegmentation | ? | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ann) | +| APCNet | MMSegmentation | ? | Y | Y | Y | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/apcnet) | +| BiSeNetV1 | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/bisenetv1) | +| BiSeNetV2 | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/bisenetv2) | +| CGNet | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/cgnet) | +| DMNet | MMSegmentation | ? | Y | N | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/dmnet) | +| DNLNet | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/dnlnet) | +| EMANet | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/emanet) | +| EncNet | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/encnet) | +| ERFNet | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/erfnet) | +| FastFCN | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fastfcn) | +| GCNet | MMSegmentation | ? | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/gcnet) | +| ICNet[\*](#note) | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/icnet) | +| ISANet | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/isanet) | +| NonLocal Net | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/nonlocal_net) | +| OCRNet | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ocrnet) | +| PointRend | MMSegmentation | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/point_rend) | +| Semantic FPN | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/sem_fpn) | +| STDC | MMSegmentation | ? | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/stdc) | +| UPerNet[\*](#note) | MMSegmentation | ? | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/upernet) | +| DANet | MMSegmentation | ? | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/danet) | +| Segmenter | MMSegmentation | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/segmenter) | +| SRCNN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srcnn) | +| ESRGAN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/esrgan) | +| SRGAN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srresnet_srgan) | +| SRResNet | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srresnet_srgan) | +| Real-ESRGAN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/real_esrgan) | +| EDSR | MMEditing | Y | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/edsr) | +| RDN | MMEditing | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/rdn) | +| DBNet | MMOCR | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textdet/dbnet) | +| PANet | MMOCR | Y | Y | Y | Y | ? | Y | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textdet/panet) | +| DBNet | MMOCR | Y | Y | Y | Y | ? | Y | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textdet/psenet) | +| CRNN | MMOCR | Y | Y | Y | Y | Y | N | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/crnn) | +| SAR | MMOCR | N | Y | N | N | N | N | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/sar) | +| SATRN | MMOCR | Y | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/satrn) | +| HRNet | MMPose | N | Y | Y | Y | N | Y | [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#hrnet-cvpr-2019) | +| MSPN | MMPose | N | Y | Y | Y | N | Y | [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#mspn-arxiv-2019) | +| LiteHRNet | MMPose | N | Y | Y | N | N | Y | [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#litehrnet-cvpr-2021) | +| PointPillars | MMDetection3d | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars) | +| CenterPoint (pillar) | MMDetection3d | ? | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/centerpoint) | +| RotatedRetinaNet | RotatedDetection | N | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/rotated_retinanet/README.md) | +| Oriented RCNN | RotatedDetection | N | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/oriented_rcnn/README.md) | +| Gliding Vertex | RotatedDetection | N | N | Y | N | N | N | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/gliding_vertex/README.md) | ### Note @@ -78,3 +82,4 @@ The table below lists the models that are guaranteed to be exportable to other b - static: This model only support static export. Please use `static` deploy config, just like $MMDEPLOY_DIR/configs/mmseg/segmentation_tensorrt_static-1024x2048.py. - SSD: When you convert SSD model, you need to use min shape deploy config just like 300x300-512x512 rather than 320x320-1344x1344, for example $MMDEPLOY_DIR/configs/mmdet/detection/detection_tensorrt_dynamic-300x300-512x512.py. - YOLOX: YOLOX with ncnn only supports static shape. +- Swin Transformer: For TensorRT, only version 8.4+ is supported. diff --git a/docs/en/04-supported-codebases/mmcls.md b/docs/en/04-supported-codebases/mmcls.md index 8495b8f54..582a2cae2 100644 --- a/docs/en/04-supported-codebases/mmcls.md +++ b/docs/en/04-supported-codebases/mmcls.md @@ -8,11 +8,12 @@ Please refer to [install.md](https://github.com/open-mmlab/mmclassification/blob ## List of MMClassification models supported by MMDeploy -| Model | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO | Model config | -| :----------- | :----------: | :------: | :--: | :---: | :------: | :----------------------------------------------------------------------------------------: | -| ResNet | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet) | -| ResNeXt | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext) | -| SE-ResNet | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet) | -| MobileNetV2 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2) | -| ShuffleNetV1 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1) | -| ShuffleNetV2 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2) | +| Model | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO | Model config | +| :---------------- | :----------: | :------: | :--: | :---: | :------: | :---------------------------------------------------------------------------------------------: | +| ResNet | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet) | +| ResNeXt | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext) | +| SE-ResNet | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet) | +| MobileNetV2 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2) | +| ShuffleNetV1 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1) | +| ShuffleNetV2 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2) | +| VisionTransformer | Y | Y | Y | ? | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) | diff --git a/docs/en/04-supported-codebases/mmdet.md b/docs/en/04-supported-codebases/mmdet.md index 6b06d4529..ba9b36b5e 100644 --- a/docs/en/04-supported-codebases/mmdet.md +++ b/docs/en/04-supported-codebases/mmdet.md @@ -26,3 +26,4 @@ Please refer to [get_started.md](https://github.com/open-mmlab/mmdetection/blob/ | RepPoints | ObjectDetection | N | Y | N | ? | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints) | | Cascade Mask R-CNN | InstanceSegmentation | Y | N | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn) | | Mask R-CNN | InstanceSegmentation | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) | +| Swin Transformer | InstanceSegmentation | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/swin) | diff --git a/docs/en/04-supported-codebases/mmrotate.md b/docs/en/04-supported-codebases/mmrotate.md index 22f15a293..554503c88 100644 --- a/docs/en/04-supported-codebases/mmrotate.md +++ b/docs/en/04-supported-codebases/mmrotate.md @@ -11,7 +11,9 @@ Please refer to [official installation guide](https://mmrotate.readthedocs.io/en | Model | Task | ONNX Runtime | TensorRT | NCNN | PPLNN | OpenVINO | Model config | | :--------------- | :--------------- | :----------: | :------: | :--: | :---: | :------: | :--------------------------------------------------------------------------------------------: | | RotatedRetinaNet | RotatedDetection | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/rotated_retinanet/README.md) | -| Oriented RCNN | RotatedDetection | Y | N | N | N | N | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/oriented_rcnn/README.md) | +| Oriented RCNN | RotatedDetection | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/oriented_rcnn/README.md) | +| Gliding Vertex | RotatedDetection | N | Y | N | N | N | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/gliding_vertex/README.md) | +| RoI Transformer | RotatedDetection | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/roi_trans/README.md) | ### Example diff --git a/docs/en/04-supported-codebases/mmseg.md b/docs/en/04-supported-codebases/mmseg.md index 0f4ac32aa..65e4c3fb9 100644 --- a/docs/en/04-supported-codebases/mmseg.md +++ b/docs/en/04-supported-codebases/mmseg.md @@ -37,7 +37,7 @@ Please refer to [get_started.md](https://github.com/open-mmlab/mmsegmentation/bl | STDC | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/stdc) | | UPerNet[\*](#static_shape) | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/upernet) | | DANet | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/danet) | -| Segmenter[\*](#static_shape) | Y | Y | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/segmenter) | +| Segmenter[\*](#static_shape) | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/segmenter) | | SegFormer[\*](#static_shape) | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/segformer) | | SETR | Y | N | N | N | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/setr) | | CCNet | N | N | N | N | N | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ccnet) | diff --git a/docs/en/05-supported-backends/pplnn.md b/docs/en/05-supported-backends/pplnn.md index 8c0b32a0e..c0875ce9f 100644 --- a/docs/en/05-supported-backends/pplnn.md +++ b/docs/en/05-supported-backends/pplnn.md @@ -1,6 +1,6 @@ # PPLNN Support -This tutorial is based on Linux systems like Ubuntu-18.04. +MMDeploy supports ppl.nn v0.8.1 and later. This tutorial is based on Linux systems like Ubuntu-18.04. ## Installation diff --git a/docs/en/06-developer-guide/add_test_units_for_backend_ops.md b/docs/en/06-developer-guide/add_test_units_for_backend_ops.md index 5f9fa300b..8c517857b 100644 --- a/docs/en/06-developer-guide/add_test_units_for_backend_ops.md +++ b/docs/en/06-developer-guide/add_test_units_for_backend_ops.md @@ -1,16 +1,16 @@ -## How to add test units for backend ops +# How to add test units for backend ops This tutorial introduces how to add unit test for backend ops. When you add a custom op under `backend_ops`, you need to add the corresponding test unit. Test units of ops are included in `tests/test_ops/test_ops.py`. -### Prerequisite +## Prerequisite - `Compile new ops`: After adding a new custom op, needs to recompile the relevant backend, referring to [build.md](../01-how-to-build/build_from_source.md). -### 1. Add the test program test_XXXX() +## 1. Add the test program test_XXXX() You can put unit test for ops in `tests/test_ops/`. Usually, the following program template can be used for your custom op. -#### example of ops unit test +### example of ops unit test ```python @pytest.mark.parametrize('backend', [TEST_TENSORRT, TEST_ONNXRT]) # 1.1 backend test class @@ -49,26 +49,26 @@ def test_roi_align(backend, save_dir=save_dir) ``` -#### 1.1 backend test class +### 1.1 backend test class We provide some functions and classes for difference backends, such as `TestOnnxRTExporter`, `TestTensorRTExporter`, `TestNCNNExporter`. -#### 1.2 set parameters of op +### 1.2 set parameters of op Set some parameters of op, such as ’pool_h‘, ’pool_w‘, ’spatial_scale‘, ’sampling_ratio‘ in roi_align. You can set multiple parameters to test op. -#### 1.3 op input data initialization +### 1.3 op input data initialization Initialization required input data. -#### 1.4 initialize op model to be tested +### 1.4 initialize op model to be tested The model containing custom op usually has two forms. - `torch model`: Torch model with custom operators. Python code related to op is required, refer to `roi_align` unit test. - `onnx model`: Onnx model with custom operators. Need to call onnx api to build, refer to `multi_level_roi_align` unit test. -#### 1.5 call the backend test class interface +### 1.5 call the backend test class interface Call the backend test class `run_and_validate` to run and verify the result output by the op on the backend. @@ -86,7 +86,7 @@ Call the backend test class `run_and_validate` to run and verify the result outp save_dir=None): ``` -##### Parameter Description +#### Parameter Description - `model`: Input model to be tested and it can be torch model or any other backend model. - `input_list`: List of test data, which is mapped to the order of input_names. @@ -99,7 +99,7 @@ Call the backend test class `run_and_validate` to run and verify the result outp - `expected_result`: Expected ground truth values for verification. - `save_dir`: The folder used to save the output files. -### 2. Test Methods +## 2. Test Methods Use pytest to call the test function to test ops. diff --git a/docs/en/06-developer-guide/partition_model.md b/docs/en/06-developer-guide/partition_model.md new file mode 100644 index 000000000..3cfd27c1f --- /dev/null +++ b/docs/en/06-developer-guide/partition_model.md @@ -0,0 +1,89 @@ +# How to get partitioned ONNX models + +MMDeploy supports exporting PyTorch models to partitioned onnx models. With this feature, users can define their partition policy and get partitioned onnx models at ease. In this tutorial, we will briefly introduce how to support partition a model step by step. In the example, we would break YOLOV3 model into two parts and extract the first part without the post-processing (such as anchor generating and NMS) in the onnx model. + +## Step 1: Mark inputs/outpupts + +To support the model partition, we need to add `Mark` nodes in the ONNX model. This could be done with mmdeploy's `@mark` decorator. Note that to make the `mark` work, the marking operation should be included in a rewriting function. + +At first, we would mark the model input, which could be done by marking the input tensor `img` in the `forward` method of `BaseDetector` class, which is the parent class of all detector classes. Thus we name this marking point as `detector_forward` and mark the inputs as `input`. Since there could be three outputs for detectors such as `Mask RCNN`, the outputs are marked as `dets`, `labels`, and `masks`. The following code shows the idea of adding mark functions and calling the mark functions in the rewrite. For source code, you could refer to [mmdeploy/codebase/mmdet/models/detectors/base.py](https://github.com/open-mmlab/mmdeploy/blob/86a50e343a3a45d7bc2ba3256100accc4973e71d/mmdeploy/codebase/mmdet/models/detectors/base.py) + +```python +from mmdeploy.core import FUNCTION_REWRITER, mark + +@mark( + 'detector_forward', inputs=['input'], outputs=['dets', 'labels', 'masks']) +def __forward_impl(ctx, self, img, img_metas=None, **kwargs): + ... + + +@FUNCTION_REWRITER.register_rewriter( + 'mmdet.models.detectors.base.BaseDetector.forward') +def base_detector__forward(ctx, self, img, img_metas=None, **kwargs): + ... + # call the mark function + return __forward_impl(...) +``` + +Then, we have to mark the output feature of `YOLOV3Head`, which is the input argument `pred_maps` in `get_bboxes` method of `YOLOV3Head` class. We could add a internal function to only mark the `pred_maps` inside [`yolov3_head__get_bboxes`](https://github.com/open-mmlab/mmdeploy/blob/86a50e343a3a45d7bc2ba3256100accc4973e71d/mmdeploy/codebase/mmdet/models/dense_heads/yolo_head.py#L14) function as following. + +```python +from mmdeploy.core import FUNCTION_REWRITER, mark + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.models.dense_heads.YOLOV3Head.get_bboxes') +def yolov3_head__get_bboxes(ctx, + self, + pred_maps, + img_metas, + cfg=None, + rescale=False, + with_nms=True): + # mark pred_maps + @mark('yolo_head', inputs=['pred_maps']) + def __mark_pred_maps(pred_maps): + return pred_maps + pred_maps = __mark_pred_maps(pred_maps) + ... +``` + +Note that `pred_maps` is a list of `Tensor` and it has three elements. Thus, three `Mark` nodes with op name as `pred_maps.0`, `pred_maps.1`, `pred_maps.2` would be added in the onnx model. + +## Step 2: Add partition config + +After marking necessary nodes that would be used to split the model, we could add a deployment config file `configs/mmdet/detection/yolov3_partition_onnxruntime_static.py`. If you are not familiar with how to write config, you could check [write_config.md](../02-how-to-run/write_config.md). + +In the config file, we need to add `partition_config`. The key part is `partition_cfg`, which contains elements of dict that designates the start nodes and end nodes of each model segments. Since we only want to keep `YOLOV3` without post-processing, we could set the `start` as `['detector_forward:input']`, and `end` as `['yolo_head:input']`. Note that `start` and `end` can have multiple marks. + +```python +_base_ = ['./detection_onnxruntime_static.py'] + +onnx_config = dict(input_shape=[608, 608]) +partition_config = dict( + type='yolov3_partition', # the partition policy name + apply_marks=True, # should always be set to True + partition_cfg=[ + dict( + save_file='yolov3.onnx', # filename to save the partitioned onnx model + start=['detector_forward:input'], # [mark_name:input/output, ...] + end=['yolo_head:input']) # [mark_name:input/output, ...] + ]) + +``` + +## Step 3: Get partitioned onnx models + +Once we have marks of nodes and the deployment config with `parition_config` being set properly, we could use the [tool](../useful_tools.md) `torch2onnx` to export the model to onnx and get the partition onnx files. + +```shell +python tools/torch2onnx.py \ +configs/mmdet/detection/yolov3_partition_onnxruntime_static.py \ +../mmdetection/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py \ +https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth \ +../mmdetection/demo/demo.jpg \ +--work-dir ./work-dirs/mmdet/yolov3/ort/partition +``` + +After run the script above, we would have the partitioned onnx file `yolov3.onnx` in the `work-dir`. You can use the visualization tool [netron](https://netron.app/) to check the model structure. + +With the partitioned onnx file, you could refer to [useful_tools.md](../useful_tools.md) to do the following procedures such as `mmdeploy_onnx2ncnn`, `onnx2tensorrt`. diff --git a/docs/en/06-developer-guide/support_new_backend.md b/docs/en/06-developer-guide/support_new_backend.md index d5ccce4e4..7872334a8 100644 --- a/docs/en/06-developer-guide/support_new_backend.md +++ b/docs/en/06-developer-guide/support_new_backend.md @@ -1,8 +1,8 @@ -## How to support new backends +# How to support new backends MMDeploy supports a number of backend engines. We welcome the contribution of new backends. In this tutorial, we will introduce the general procedures to support a new backend in MMDeploy. -### Prerequisites +## Prerequisites Before contributing the codes, there are some requirements for the new backend that need to be checked: @@ -10,7 +10,7 @@ Before contributing the codes, there are some requirements for the new backend t - If the backend requires model files or weight files other than a ".onnx" file, a conversion tool that converts the ".onnx" file to model files and weight files is required. The tool can be a Python API, a script, or an executable program. - It is highly recommended that the backend provides a Python interface to load the backend files and inference for validation. -### Support backend conversion +## Support backend conversion The backends in MMDeploy must support the ONNX. The backend loads the ".onnx" file directly, or converts the ".onnx" to its own format using the conversion tool. In this section, we will introduce the steps to support backend conversion. @@ -155,7 +155,7 @@ The backends in MMDeploy must support the ONNX. The backend loads the ".onnx" fi 7. Add docstring and unit tests for new code :). -### Support backend inference +## Support backend inference Although the backend engines are usually implemented in C/C++, it is convenient for testing and debugging if the backend provides Python inference interface. We encourage the contributors to support backend inference in the Python interface of MMDeploy. In this section we will introduce the steps to support backend inference. @@ -203,7 +203,7 @@ Although the backend engines are usually implemented in C/C++, it is convenient # Postprocess data # ... - @TimeCounter.count_time() + @TimeCounter.count_time('onnxruntime') def __ort_execute(self, io_binding: ort.IOBinding): # Only do the inference self.sess.run_with_iobinding(io_binding) @@ -230,7 +230,7 @@ Although the backend engines are usually implemented in C/C++, it is convenient 5. Add docstring and unit tests for new code :). -### Support new backends using MMDeploy as a third party +## Support new backends using MMDeploy as a third party Previous parts show how to add a new backend in MMDeploy, which requires changing its source codes. However, if we treat MMDeploy as a third party, the methods above are no longer efficient. To this end, adding a new backend requires us pre-install another package named `aenum`. We can install it directly through `pip install aenum`. diff --git a/docs/en/06-developer-guide/support_new_model.md b/docs/en/06-developer-guide/support_new_model.md index 740680807..ae456a45b 100644 --- a/docs/en/06-developer-guide/support_new_model.md +++ b/docs/en/06-developer-guide/support_new_model.md @@ -1,8 +1,8 @@ -## How to support new models +# How to support new models We provide several tools to support model conversion. -### Function Rewriter +## Function Rewriter The PyTorch neural network is written in python that eases the development of the algorithm. But the use of Python control flow and third-party libraries make it difficult to export the network to an intermediate representation. We provide a 'monkey patch' tool to rewrite the unsupported function to another one that can be exported. Here is an example: @@ -26,7 +26,7 @@ It is easy to use the function rewriter. Just add a decorator with arguments: The arguments are the same as the original function, except a context `ctx` as the first argument. The context provides some useful information such as the deployment config `ctx.cfg` and the original function (which has been overridden) `ctx.origin_func`. -### Module Rewriter +## Module Rewriter If you want to replace a whole module with another one, we have another rewriter as follows: @@ -66,7 +66,7 @@ Just like function rewriter, add a decorator with arguments: All instances of the module in the network will be replaced with instances of this new class. The original module and the deployment config will be passed as the first two arguments. -### Custom Symbolic +## Custom Symbolic The mappings between PyTorch and ONNX are defined in PyTorch with symbolic functions. The custom symbolic function can help us to bypass some ONNX nodes which are unsupported by inference engine. diff --git a/docs/en/appendix/cross_build_snpe_service.md b/docs/en/appendix/cross_build_snpe_service.md new file mode 100644 index 000000000..f5aba17d8 --- /dev/null +++ b/docs/en/appendix/cross_build_snpe_service.md @@ -0,0 +1,166 @@ +# Cross compile snpe inference server on Ubuntu 18 + +mmdeploy has provided a prebuilt package, if you want to compile it by self, or need to modify the `.proto` file, you can refer to this document. + +Note that the official gRPC documentation does not have complete support for the NDK. + +## 1. Environment + +| Item | Version | Remarks | +| ------------------ | -------------- | --------------------------------------------------------- | +| snpe | 1.59 | 1.60 uses clang-8.0, which may cause compatibility issues | +| host OS | ubuntu18.04 | snpe1.59 specified version | +| NDK | r17c | snpe1.59 specified version | +| gRPC | commit 6f698b5 | - | +| Hardware equipment | qcom888 | qcom chip required | + +## 2. Cross compile gRPC with NDK + +1. Pull gRPC repo, compile `protoc` and `grpc_cpp_plugin` on host + +```bash +# Install dependencies +$ apt-get update && apt-get install -y libssl-dev +# Compile +$ git clone https://github.com/grpc/grpc --recursive=1 --depth=1 +$ mkdir -p cmake/build +$ pushd cmake/build + +$ cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DgRPC_INSTALL=ON \ + -DgRPC_BUILD_TESTS=OFF \ + -DgRPC_SSL_PROVIDER=package \ + ../.. +# Install to host +$ make -j +$ sudo make install +``` + +2. Download the NDK and cross-compile the static libraries with android aarch64 format + +```bash +$ wget https://dl.google.com/android/repository/android-ndk-r17c-linux-x86_64.zip +$ unzip android-ndk-r17c-linux-x86_64.zip + +$ export ANDROID_NDK=/path/to/android-ndk-r17c + +$ cd /path/to/grpc +$ mkdir -p cmake/build_aarch64 && pushd cmake/build_aarch64 + +$ cmake ../.. \ + -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \ + -DANDROID_ABI=arm64-v8a \ + -DANDROID_PLATFORM=android-26 \ + -DANDROID_TOOLCHAIN=clang \ + -DANDROID_STL=c++_shared \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=/tmp/android_grpc_install_shared + +$ make -j +$ make install +``` + +3. At this point `/tmp/android_grpc_install` should have the complete installation file + +```bash +$ cd /tmp/android_grpc_install +$ tree -L 1 +. +├── bin +├── include +├── lib +└── share +``` + +## 3. \[Skipable\] Self-test whether NDK gRPC is available + +1. Compile the helloworld that comes with gRPC + +```bash +$ cd /path/to/grpc/examples/cpp/helloworld/ +$ mkdir cmake/build_aarch64 -p && pushd cmake/build_aarch64 + +$ cmake ../.. \ + -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \ + -DANDROID_ABI=arm64-v8a \ + -DANDROID_PLATFORM=android-26 \ + -DANDROID_STL=c++_shared \ + -DANDROID_TOOLCHAIN=clang \ + -DCMAKE_BUILD_TYPE=Release \ + -Dabsl_DIR=/tmp/android_grpc_install_shared/lib/cmake/absl \ + -DProtobuf_DIR=/tmp/android_grpc_install_shared/lib/cmake/protobuf \ + -DgRPC_DIR=/tmp/android_grpc_install_shared/lib/cmake/grpc + +$ make -j +$ ls greeter* +greeter_async_client greeter_async_server greeter_callback_server greeter_server +greeter_async_client2 greeter_callback_client greeter_client +``` + +2. Turn on debug mode on your phone, push the binary to `/data/local/tmp` + +```bash +$ adb push greeter* /data/local/tmp +``` + +3. `adb shell` into the phone, execute client/server + +```bash +/data/local/tmp $ ./greeter_client +Greeter received: Hello world +``` + +## 4. Cross compile snpe inference server + +1. Open the [snpe tools website](https://developer.qualcomm.com/software/qualcomm-neural-processing-sdk/tools) and download version 1.59. Unzip and set environment variables + +> Note that snpe >= 1.60 starts using `clang-8.0`, which may cause incompatibility with `libc++_shared.so` on older devices. + +```bash +$ export SNPE_ROOT=/path/to/snpe-1.59.0.3230 +``` + +2. Open the snpe server directory within mmdeploy, use the options when cross-compiling gRPC + +```bash +$ cd /path/to/mmdeploy +$ cd service/snpe/server + +$ mkdir -p build && cd build +$ export ANDROID_NDK=/path/to/android-ndk-r17c +$ cmake .. \ + -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \ + -DANDROID_ABI=arm64-v8a \ + -DANDROID_PLATFORM=android-26 \ + -DANDROID_STL=c++_shared \ + -DANDROID_TOOLCHAIN=clang \ + -DCMAKE_BUILD_TYPE=Release \ + -Dabsl_DIR=/tmp/android_grpc_install_shared/lib/cmake/absl \ + -DProtobuf_DIR=/tmp/android_grpc_install_shared/lib/cmake/protobuf \ + -DgRPC_DIR=/tmp/android_grpc_install_shared/lib/cmake/grpc + + $ make -j + $ file inference_server +inference_server: ELF 64-bit LSB shared object, ARM aarch64, version 1 (SYSV), dynamically linked, interpreter /system/bin/linker64, BuildID[sha1]=252aa04e2b982681603dacb74b571be2851176d2, with debug_info, not stripped +``` + +Finally, you can see `infernece_server`, `adb push` it to the device and execute. + +## 5. Regenerate the proto interface + +If you have changed `inference.proto`, you need to regenerate the .cpp and .py interfaces + +```Shell +$ python3 -m pip install grpc_tools --user +$ python3 -m grpc_tools.protoc -I./ --python_out=./client/ --grpc_python_out=./client/ inference.proto + +$ ln -s `which protoc-gen-grpc` +$ protoc --cpp_out=./ --grpc_out=./ --plugin=protoc-gen-grpc=grpc_cpp_plugin inference.proto +``` + +## Reference + +- snpe tutorial https://developer.qualcomm.com/sites/default/files/docs/snpe/cplus_plus_tutorial.html +- gRPC cross build script https://raw.githubusercontent.com/grpc/grpc/master/test/distrib/cpp/run_distrib_test_cmake_aarch64_cross.sh +- stackoverflow https://stackoverflow.com/questions/54052229/build-grpc-c-for-android-using-ndk-arm-linux-androideabi-clang-compiler diff --git a/docs/en/conf.py b/docs/en/conf.py index a0a825ff2..6dcaeb0c1 100644 --- a/docs/en/conf.py +++ b/docs/en/conf.py @@ -55,6 +55,7 @@ extensions = [ 'sphinx_markdown_tables', 'myst_parser', 'sphinx_copybutton', + 'sphinxcontrib.mermaid' ] # yapf: disable autodoc_mock_imports = ['tensorrt'] diff --git a/docs/en/faq.md b/docs/en/faq.md index dec0174d3..1eea9b08e 100644 --- a/docs/en/faq.md +++ b/docs/en/faq.md @@ -50,6 +50,40 @@ print(torch.__file__) ``` +- enable_language(CUDA) error + + ``` + -- Selecting Windows SDK version 10.0.19041.0 to target Windows 10.0.19044. + -- Found CUDA: C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1 (found version "11.1") + CMake Error at C:/Software/cmake/cmake-3.23.1-windows-x86_64/share/cmake-3.23/Modules/CMakeDetermineCompilerId.cmake:491 (message): + No CUDA toolset found. + Call Stack (most recent call first): + C:/Software/cmake/cmake-3.23.1-windows-x86_64/share/cmake-3.23/Modules/CMakeDetermineCompilerId.cmake:6 (CMAKE_DETERMINE_COMPILER_ID_BUILD) + C:/Software/cmake/cmake-3.23.1-windows-x86_64/share/cmake-3.23/Modules/CMakeDetermineCompilerId.cmake:59 (__determine_compiler_id_test) + C:/Software/cmake/cmake-3.23.1-windows-x86_64/share/cmake-3.23/Modules/CMakeDetermineCUDACompiler.cmake:339 (CMAKE_DETERMINE_COMPILER_ID) + C:/workspace/mmdeploy-0.6.0-windows-amd64-cuda11.1-tensorrt8.2.3.0/sdk/lib/cmake/MMDeploy/MMDeployConfig.cmake:27 (enable_language) + CMakeLists.txt:5 (find_package) + ``` + + **Cause:** CUDA Toolkit 11.1 was installed before Visual Studio, so the VS plugin was not installed. Or the version of VS is too new, so that the installation of the VS plugin is skipped during the installation of the CUDA Toolkit + + **Solution:** This problem can be solved by manually copying the four files in `C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.1\extras\visual_studio_integration\MSBuildExtensions` to `C:\Software\Microsoft Visual Studio\2022\Community\Msbuild\Microsoft\VC\v170\BuildCustomizations` The specific path should be changed according to the actual situation. + +### ONNX Runtime + +- Under Windows system, when visualizing model inference result failed with the following error: + ``` + onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Failed to load library, error code: 193 + ``` + **Cause:** In latest Windows systems, there are two `onnxruntime.dll` under the system path, and they will be loaded first, causing conflicts. + ``` + C:\Windows\SysWOW64\onnxruntime.dll + C:\Windows\System32\onnxruntime.dll + ``` + **Solution:** Choose one of the following two options + 1. Copy the dll in the lib directory of the downloaded onnxruntime to the directory where mmdeploy_onnxruntime_ops.dll locates (It is recommended to use Everything to search the ops dll) + 2. Rename the two dlls in the system path so that they cannot be loaded. + ### Pip - pip installed package but could not `import` them. diff --git a/docs/en/get_started.md b/docs/en/get_started.md index a28ec9e20..e5a004c5c 100644 --- a/docs/en/get_started.md +++ b/docs/en/get_started.md @@ -4,8 +4,6 @@ MMDeploy provides useful tools for deploying OpenMMLab models to various platfor With the help of them, you can not only do model deployment using our pre-defined pipelines but also customize your own deployment pipeline. -In the following chapters, we will describe the general routine and demonstrate a "hello-world" example - deploying Faster R-CNN model from [MMDetection](https://github.com/open-mmlab/mmdetection) to NVIDIA TensorRT. - ## Introduction In MMDeploy, the deployment pipeline can be illustrated by a sequential modules, i.e., Model Converter, MMDeploy Model and Inference SDK. @@ -36,8 +34,7 @@ In order to do an end-to-end model deployment, MMDeploy requires Python 3.6+ and **Step 1.** Create a conda environment and activate it. ```shell -export PYTHON_VERSION=3.7 -conda create --name mmdeploy python=${PYTHON_VERSION} -y +conda create --name mmdeploy python=3.8 -y conda activate mmdeploy ``` @@ -46,18 +43,17 @@ conda activate mmdeploy On GPU platforms: ```shell - export PYTORCH_VERSION=1.8.0 - export TORCHVISION_VERSION=0.9.0 - export CUDA_VERSION=11.1 - conda install pytorch==${PYTORCH_VERSION} torchvision==${TORCHVISION_VERSION} cudatoolkit=${CUDA_VERSION} -c pytorch -c conda-forge +conda install pytorch=={pytorch_version} torchvision=={torchvision_version} cudatoolkit={cudatoolkit_version} -c pytorch -c conda-forge ``` On CPU platforms: ```shell -export PYTORCH_VERSION=1.8.0 -export TORCHVISION_VERSION=0.9.0 -conda install pytorch==${PYTORCH_VERSION} torchvision==${TORCHVISION_VERSION} cpuonly -c pytorch +conda install pytorch=={pytorch_version} torchvision=={torchvision_version} cpuonly -c pytorch +``` + +```{note} +On GPU platform, please ensure that {cudatoolkit_version} matches your host CUDA toolkit version. Otherwise, it probably brings in conflicts when deploying model with TensorRT. ``` ## Installation @@ -67,125 +63,150 @@ We recommend that users follow our best practices installing MMDeploy. **Step 0.** Install [MMCV](https://github.com/open-mmlab/mmcv). ```shell - export MMCV_VERSION=1.5.0 - export CUDA_STRING="${CUDA_VERSION/./""}" - python -m pip install mmcv-full==${MMCV_VERSION} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA_STRING}/torch${PYTORCH_VERSION}/index.html +pip install -U openmim +mim install mmcv-full ``` -**Step 1.** Install MMDeploy. +**Step 1.** Install MMDeploy and inference engine -Since v0.5.0, MMDeploy provides prebuilt packages, which can be found from [here](https://github.com/open-mmlab/mmdeploy/releases). -You can download them according to your target platform and device. +We recommend using MMDeploy precompiled package as our best practice. +You can download them from [here](https://github.com/open-mmlab/mmdeploy/releases) according to your target platform and device. -Take the MMDeploy-TensorRT package on NVIDIA for example: +The supported platform and device matrix is presented as following: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OS-ArchDeviceONNX RuntimeTensorRT
Linux-x86_64CPUYN/A
CUDANY
Windows-x86_64CPUYN/A
CUDANY
+ +**Note: if MMDeploy prebuilt package doesn't meet your target platforms or devices, please [build MMDeploy from source](./01-how-to-build/build_from_source.md)** + +Take the latest precompiled package as example, you can install it as follows: + +
+Linux-x86_64, CPU, ONNX Runtime 1.8.1 ```shell -export MMDEPLOY_VERSION=0.5.0 -export TENSORRT_VERSION=8.2.3.0 -export PYTHON_VERSION=3.7 -export PYTHON_STRING="${PYTHON_VERSION/./""}" - -wget https://github.com/open-mmlab/mmdeploy/releases/download/v${MMDEPLOY_VERSION}/mmdeploy-${MMDEPLOY_VERSION}-linux-x86_64-cuda${CUDA_VERSION}-tensorrt${TENSORRT_VERSION}.tar.gz -tar -zxvf mmdeploy-${MMDEPLOY_VERSION}-linux-x86_64-cuda${CUDA_VERSION}-tensorrt${TENSORRT_VERSION}.tar.gz -cd mmdeploy-${MMDEPLOY_VERSION}-linux-x86_64-cuda${CUDA_VERSION}-tensorrt${TENSORRT_VERSION} -python -m pip install dist/mmdeploy-*-py${PYTHON_STRING}*.whl -python -m pip install sdk/python/mmdeploy_python-*-cp${PYTHON_STRING}*.whl -export LD_LIBRARY_PATH=$(pwd)/sdk/lib:$LD_LIBRARY_PATH +# install MMDeploy +wget https://github.com/open-mmlab/mmdeploy/releases/download/v0.7.0/mmdeploy-0.7.0-linux-x86_64-onnxruntime1.8.1.tar.gz +tar -zxvf mmdeploy-0.7.0-linux-x86_64-onnxruntime1.8.1.tar.gz +cd mmdeploy-0.7.0-linux-x86_64-onnxruntime1.8.1 +pip install dist/mmdeploy-0.7.0-py3-none-linux_x86_64.whl +pip install sdk/python/mmdeploy_python-0.7.0-cp38-none-linux_x86_64.whl cd .. +# install inference engine: ONNX Runtime +pip install onnxruntime==1.8.1 +wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz +tar -zxvf onnxruntime-linux-x64-1.8.1.tgz +export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-1.8.1 +export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH ``` -```{note} -If MMDeploy prebuilt package doesn meet your target platforms or devices, please build MMDeploy from its source by following the build documents -``` +
-**step 2.** Install the inference backend - -Based on the above MMDeploy-TensorRT package, we need to download and install [TensorRT](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-tar), including [cuDNN](https://developer.nvidia.com/cudnn). - -**Be aware that TensorRT version and cuDNN version must matches your CUDA Toolkit version** - -The following shows an example of installing TensorRT 8.2.3.0 and cuDNN 8.2: +
+Linux-x86_64, CUDA 11.x, TensorRT 8.2.3.0 ```shell -export TENSORRT_VERSION=8.2.3.0 -CUDA_MAJOR="${CUDA_VERSION/\.*/""}" - -# !!! Download tensorrt package from NVIDIA that matches your CUDA Toolkit version to the current working directory -tar -zxvf TensorRT-${TENSORRT_VERSION}*cuda-${CUDA_MAJOR}*.tar.gz -python -m pip install TensorRT-${TENSORRT_VERSION}/python/tensorrt-*-cp${PYTHON_STRING}*.whl -python -m pip install pycuda -export TENSORRT_DIR=$(pwd)/TensorRT-${TENSORRT_VERSION} +# install MMDeploy +wget https://github.com/open-mmlab/mmdeploy/releases/download/v0.7.0/mmdeploy-0.7.0-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz +tar -zxvf mmdeploy-v0.7.0-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz +cd mmdeploy-0.7.0-linux-x86_64-cuda11.1-tensorrt8.2.3.0 +pip install dist/mmdeploy-0.7.0-py3-none-linux_x86_64.whl +pip install sdk/python/mmdeploy_python-0.7.0-cp38-none-linux_x86_64.whl +cd .. +# install inference engine: TensorRT +# !!! Download TensorRT-8.2.3.0 CUDA 11.x tar package from NVIDIA, and extract it to the current directory +pip install TensorRT-8.2.3.0/python/tensorrt-8.2.3.0-cp38-none-linux_x86_64.whl +pip install pycuda +export TENSORRT_DIR=$(pwd)/TensorRT-8.2.3.0 export LD_LIBRARY_PATH=${TENSORRT_DIR}/lib:$LD_LIBRARY_PATH - -# !!! Download cuDNN package from NVIDIA that matches your CUDA Toolkit and TensorRT version to the current working directory -tar -zxvf cudnn-${CUDA_MAJOR}.*-linux-x64*.tgz +# !!! Download cuDNN 8.2.1 CUDA 11.x tar package from NVIDIA, and extract it to the current directory export CUDNN_DIR=$(pwd)/cuda export LD_LIBRARY_PATH=$CUDNN_DIR/lib64:$LD_LIBRARY_PATH ``` -In the next chapters, we are going to present our 'Hello, world' example based on the above settings. +
-For the installation of all inference backends supported by MMDeploy right now, please refer to: +
+Windows-x86_64 +
-- [ONNX Runtime](05-supported-backends/onnxruntime.md) -- [TensorRT](05-supported-backends/tensorrt.md) -- [PPL.NN](05-supported-backends/pplnn.md) -- [ncnn](05-supported-backends/ncnn.md) -- [OpenVINO](05-supported-backends/openvino.md) -- [LibTorch](05-supported-backends/torchscript.md) +Please learn its prebuilt package from [this](./02-how-to-run/prebuilt_package_windows.md) guide. ## Convert Model -After the installation, you can enjoy the model deployment journey starting from converting PyTorch model to backend model. +After the installation, you can enjoy the model deployment journey starting from converting PyTorch model to backend model by running `tools/deploy.py`. Based on the above settings, we provide an example to convert the Faster R-CNN in [MMDetection](https://github.com/open-mmlab/mmdetection) to TensorRT as below: ```shell -# clone mmdeploy repo. We are going to use the pre-defined pipeline config from the source code -git clone --recursive https://github.com/open-mmlab/mmdeploy.git -python -m pip install -r mmdeploy/requirements/runtime.txt -export MMDEPLOY_DIR=$(pwd)/mmdeploy +# clone mmdeploy to get the deployment config. `--recursive` is not necessary +git clone https://github.com/open-mmlab/mmdeploy.git # clone mmdetection repo. We have to use the config file to build PyTorch nn module -python -m pip install mmdet==2.24.0 git clone https://github.com/open-mmlab/mmdetection.git -export MMDET_DIR=$(pwd)/mmdetection +cd mmdetection +pip install -v -e . +cd .. # download Faster R-CNN checkpoint -export CHECKPOINT_DIR=$(pwd)/checkpoints -wget -P ${CHECKPOINT_DIR} https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth - -# set working directory, where the mmdeploy model is saved -export WORK_DIR=$(pwd)/mmdeploy_models +wget -P checkpoints https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth # run the command to start model conversion -python ${MMDEPLOY_DIR}/tools/deploy.py \ - ${MMDEPLOY_DIR}/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py \ - ${MMDET_DIR}/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ - ${CHECKPOINT_DIR}/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - ${MMDET_DIR}/demo/demo.jpg \ - --work-dir ${WORK_DIR} \ - --device cuda:0 \ +python mmdeploy/tools/deploy.py \ + mmdeploy/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py \ + mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + mmdetection/demo/demo.jpg \ + --work-dir mmdeploy_model/faster-rcnn \ + --device cuda \ --dump-info ``` -`${MMDEPLOY_DIR}/tools/deploy.py` does everything you need to convert a model. Read [how_to_convert_model](./02-how-to-run/convert_model.md) for more details. The converted model and its meta info will be found in the path specified by `--work-dir`. And they make up of MMDeploy Model that can be fed to MMDeploy SDK to do model inference. -`detection_tensorrt_dynamic-320x320-1344x1344.py` is a config file that contains all arguments you need to customize the conversion pipeline. The name is formed as: +For more details about model conversion, you can read [how_to_convert_model](./02-how-to-run/convert_model.md). If you want to customize the conversion pipeline, you can edit the config file by following [this](./02-how-to-run/write_config.md) tutorial. -```bash -_-[backend options]_.py +```{tip} +If MMDeploy-ONNXRuntime prebuild package is installed, you can convert the above model to onnx model and perform ONNX Runtime inference +just by 'changing detection_tensorrt_dynamic-320x320-1344x1344.py' to 'detection_onnxruntime_dynamic.py' and making '--device' as 'cpu'. ``` -If you want to customize the conversion pipeline, you can edit the config file by following [this](./02-how-to-run/write_config.md) tutorial. - ## Inference Model -After model conversion, we can perform inference both by Model Converter and Inference SDK. - -The former is developed by Python, while the latter is mainly written by C/C++. +After model conversion, we can perform inference not only by Model Converter but also by Inference SDK. ### Inference by Model Converter @@ -194,43 +215,50 @@ Take the previous converted Faster R-CNN tensorrt model for example, ```python from mmdeploy.apis import inference_model -import os - -model_cfg = os.getenv('MMDET_DIR') + '/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -deploy_cfg = os.getenv('MMDEPLOY_DIR') + '/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py' -backend_files = os.getenv('WORK_DIR') + '/end2end.engine' - -result = inference_model(model_cfg, deploy_cfg, backend_files, img=img, device=device) +result = inference_model( + model_cfg='mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', + deploy_cfg='mmdeploy/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py', + backend_files=['mmdeploy_model/faster-rcnn/end2end.engine'], + img='mmdetection/demo/demo.jpg', + device='cuda:0') ``` -The data type and data layout is exactly the same with the OpenMMLab PyTorch model inference results. - ```{note} -You can certainly use the infernce backend API directly to do inference. But since MMDeploy has being developed several custom operators, it's necessary to load them first before calling the infernce backend API. +'backend_files' in this API refers to backend engine file path, which MUST be put in a list, since some inference engines like OpenVINO and ncnn separate the network structure and its weights into two files. ``` ### Inference by SDK -You can use SDK API to do model inference with the mmdeploy model generated by Model Converter. +You can directly run MMDeploy demo programs in the precompiled package to get inference results. -In the following section, we will provide examples of deploying the converted Faster R-CNN model talked above with different FFI. +```shell +cd mmdeploy-0.7.0-linux-x86_64-cuda11.1-tensorrt8.2.3.0 +# run python demo +python sdk/example/python/object_detection.py cuda ../mmdeploy_model/faster-rcnn ../mmdetection/demo/demo.jpg +# run C/C++ demo +export LD_LIBRARY_PATH=$(pwd)/sdk/lib:$LD_LIBRARY_PATH +./sdk/bin/object_detection cuda ../mmdeploy_model/faster-rcnn ../mmdetection/demo/demo.jpg +``` + +```{note} +In the above command, the input model is SDK Model path. It is NOT engine file path but actually the path passed to --work-dir. It not only includes engine files but also meta information like 'deploy.json' and 'pipeline.json'. +``` + +In the next section, we will provide examples of deploying the converted Faster R-CNN model talked above with SDK different FFI (Foreign Function Interface). #### Python API ```python from mmdeploy_python import Detector -import os import cv2 -# get mmdeploy model path of faster r-cnn -model_path = os.getenv('WORK_DIR') -# use mmdetection demo image as an input image -image_path = '/'.join((os.getenv('MMDET_DIR'), 'demo/demo.jpg')) - -img = cv2.imread(image_path) -detector = Detector(model_path, 'cuda', 0) -bboxes, labels, _ = detector([img])[0] +img = cv2.imread('mmdetection/demo/demo.jpg') +# create a detector +detector = Detector(model_path='mmdeploy_models/faster-rcnn', device_name='cuda', device_id=0) +# run the inference +bboxes, labels, _ = detector(img) +# Filter the result according to threshold indices = [i for i in range(len(bboxes))] for index, bbox, label_id in zip(indices, bboxes, labels): [left, top, right, bottom], score = bbox[0:4].astype(int), bbox[4] @@ -243,73 +271,45 @@ cv2.imwrite('output_detection.png', img) You can find more examples from [here](https://github.com/open-mmlab/mmdeploy/tree/master/demo/python). -```{note} -If you build MMDeploy from the source, please add ${MMDEPLOY_DIR}/build/lib to the environment variable PYTHONPATH. -Otherwise, you will run into an error like ’ModuleNotFoundError: No module named 'mmdeploy_python' -``` +#### C++ API -#### C API +Using SDK C++ API should follow next pattern, -Using SDK C API should follow next pattern, - -```mermaid -graph LR - A[create inference handle] --> B(read image) - B --> C(apply handle) - C --> D[deal with inference result] - D -->E[destroy result buffer] - E -->F[destroy handle] -``` +![image](https://user-images.githubusercontent.com/4560679/182554739-7fff57fc-5c84-44ed-b139-4749fae27404.png) Now let's apply this procedure on the above Faster R-CNN model. ```C++ #include #include -#include "detector.h" +#include "mmdeploy/detector.hpp" int main() { const char* device_name = "cuda"; int device_id = 0; + std::string model_path = "mmdeploy_model/faster-rcnn"; + std::string image_path = "mmdetection/demo/demo.jpg"; - // get mmdeploy model path of faster r-cnn - std::string model_path = std::getenv("WORK_DIR"); - // use mmdetection demo image as an input image - std::string image_path = std::getenv("MMDET_DIR") + "/demo/demo.jpg"; - - // create inference handle - mm_handle_t detector{}; - int status{}; - status = mmdeploy_detector_create_by_path(model_path, device_name, device_id, &detector); - assert(status == MM_SUCCESS); - - // read image + // 1. load model + mmdeploy::Model model(model_path); + // 2. create predictor + mmdeploy::Detector detector(model, mmdeploy::Device{device_name, device_id}); + // 3. read image cv::Mat img = cv::imread(image_path); - assert(img.data); - - // apply handle and get the inference result - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; - mm_detect_t *bboxes{}; - int *res_count{}; - status = mmdeploy_detector_apply(detector, &mat, 1, &bboxes, &res_count); - assert (status == MM_SUCCESS); - - // deal with the result. Here we choose to visualize it - for (int i = 0; i < *res_count; ++i) { - const auto &box = bboxes[i].bbox; + // 4. inference + auto dets = detector.Apply(img); + // 5. deal with the result. Here we choose to visualize it + for (int i = 0; i < dets.size(); ++i) { + const auto& box = dets[i].bbox; + fprintf(stdout, "box %d, left=%.2f, top=%.2f, right=%.2f, bottom=%.2f, label=%d, score=%.4f\n", + i, box.left, box.top, box.right, box.bottom, dets[i].label_id, dets[i].score); if (bboxes[i].score < 0.3) { continue; } cv::rectangle(img, cv::Point{(int)box.left, (int)box.top}, cv::Point{(int)box.right, (int)box.bottom}, cv::Scalar{0, 255, 0}); } - - cv::imwrite('output_detection.png', img); - - // destroy result buffer - mmdeploy_detector_release_result(bboxes, res_count, 1); - // destroy inference handle - mmdeploy_detector_destroy(detector); + cv::imwrite("output_detection.png", img); return 0; } ``` @@ -318,16 +318,13 @@ When you build this example, try to add MMDeploy package in your CMake project a ```Makefile find_package(MMDeploy REQUIRED) -mmdeploy_load_static(${YOUR_AWESOME_TARGET} MMDeployStaticModules) -mmdeploy_load_dynamic(${YOUR_AWESOME_TARGET} MMDeployDynamicModules) -target_link_libraries(${YOUR_AWESOME_TARGET} PRIVATE MMDeployLibs) +target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) ``` -For more SDK C API usages, please read these [samples](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc). +For more SDK C++ API usages, please read these [samples](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc). -#### C# API - -Due to limitations on space, we will not present a specific example. But you can find all of them [here](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csharp). +For the rest C, C# and Java API usages, please read [C demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csharp) and [Java demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/java) respectively. +We'll talk about them more in our next release. ## Evaluate Model diff --git a/docs/en/index.rst b/docs/en/index.rst index ca1e50d6c..0bc36a642 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -31,6 +31,7 @@ You can switch between Chinese and English documents in the lower-left corner of 03-benchmark/supported_models.md 03-benchmark/benchmark.md + 03-benchmark/benchmark_edge.md .. toctree:: :maxdepth: 1 @@ -72,11 +73,17 @@ You can switch between Chinese and English documents in the lower-left corner of 06-developer-guide/support_new_backend.md 06-developer-guide/add_test_units_for_backend_ops.md 06-developer-guide/test_rewritten_models.md + 06-developer-guide/partition_model.md .. toctree:: :maxdepth: 1 :caption: Tutorials on Model Deployment +.. toctree:: + :maxdepth: 1 + :caption: Appendix + + appendix/cross_build_snpe_service.md .. toctree:: :maxdepth: 1 diff --git a/docs/en/ops/tensorrt.md b/docs/en/ops/tensorrt.md index d1feae59c..35c196940 100644 --- a/docs/en/ops/tensorrt.md +++ b/docs/en/ops/tensorrt.md @@ -51,6 +51,12 @@ - [Inputs](#inputs-7) - [Outputs](#outputs-7) - [Type Constraints](#type-constraints-7) + - [GridPriorsTRT](#gridpriorstrt) + - [Description](#description-8) + - [Parameters](#parameters-8) + - [Inputs](#inputs-8) + - [Outputs](#outputs-8) + - [Type Constraints](#type-constraints-8) @@ -363,3 +369,39 @@ Batched rotated NMS with a fixed number of output bounding boxes. #### Type Constraints - T:tensor(float32, Linear) + +### GridPriorsTRT + +#### Description + +Generate the anchors for object detection task. + +#### Parameters + +| Type | Parameter | Description | +| ----- | ---------- | --------------------------------- | +| `int` | `stride_w` | The stride of the feature width. | +| `int` | `stride_h` | The stride of the feature height. | + +#### Inputs + +
+
inputs[0]: T
+
The base anchors; 2-D tensor with shape [num_base_anchor, 4].
+
inputs[1]: TAny
+
height provider; 1-D tensor with shape [featmap_height]. The data will never been used.
+
inputs[2]: TAny
+
width provider; 1-D tensor with shape [featmap_width]. The data will never been used.
+
+ +#### Outputs + +
+
outputs[0]: T
+
output anchors; 2-D tensor of shape (num_base_anchor*featmap_height*featmap_widht, 4).
+
+ +#### Type Constraints + +- T:tensor(float32, Linear) +- TAny: Any diff --git a/docs/en/useful_tools.md b/docs/en/useful_tools.md index 896a89263..92b012110 100644 --- a/docs/en/useful_tools.md +++ b/docs/en/useful_tools.md @@ -12,7 +12,7 @@ python tools/torch2onnx.py \ ${MODEL_CFG} \ ${CHECKPOINT} \ ${INPUT_IMG} \ - ${OUTPUT} \ + --work-dir ${WORK_DIR} \ --device cpu \ --log-level INFO ``` @@ -23,7 +23,7 @@ python tools/torch2onnx.py \ - `model_cfg` : The path of model config file in OpenMMLab codebase. - `checkpoint` : The path of the model checkpoint file. - `img` : The path of the image file used to convert the model. -- `output` : The path of the output ONNX model. +- `--work-dir` : Directory to save output ONNX models Default is `./work-dir`. - `--device` : The device used for conversion. If not specified, it will be set to `cpu`. - `--log-level` : To set log level which in `'CRITICAL', 'FATAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'`. If not specified, it will be set to `INFO`. @@ -128,3 +128,69 @@ python tools/onnx2ncnn.py \ - `output_param` : The converted `ncnn` param path. - `output_bin` : The converted `ncnn` bin path. - `--log-level` : To set log level which in `'CRITICAL', 'FATAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'`. If not specified, it will be set to `INFO`. + +## profile + +This tool helps to test latency of models with PyTorch, TensorRT and other backends. Note that the pre- and post-processing is excluded when computing inference latency. + +### Usage + +```bash +python tools/profile.py \ + ${DEPLOY_CFG} \ + ${MODEL_CFG} \ + ${IMAGE_DIR} \ + --model ${MODEL} \ + --device ${DEVICE} \ + --shape ${SHAPE} \ + --num-iter {NUM_ITER} \ + --warmup {WARMUP} + --cfg-options ${CFG_OPTIONS} +``` + +### Description of all arguments + +- `deploy_cfg` : The path of the deploy config file in MMDeploy codebase. +- `model_cfg` : The path of model config file in OpenMMLab codebase. +- `image_dir` : The directory to image files that used to test the model. +- `--model` : The path of the model to be tested. +- `--shape` : Input shape of the model by `HxW`, e.g., `800x1344`. If not specified, it would use `input_shape` from deploy config. +- `--num-iter` : Number of iteration to run inference. Default is `100`. +- `--warmup` : Number of iteration to warm-up the machine. Default is `10`. +- `--device` : The device type. If not specified, it will be set to `cuda:0`. +- `--cfg-options` : Optional key-value pairs to be overrode for model config. + +### Example: + +```shell +python tools/profile.py \ + configs/mmcls/classification_tensorrt_dynamic-224x224-224x224.py \ + ../mmclassification/configs/resnet/resnet18_8xb32_in1k.py \ + ../mmdetection/demo \ + --model work-dirs/mmcls/resnet/trt/end2end.engine \ + --device cuda \ + --shape 224x224 \ + --num-iter 100 \ + --warmup 10 \ +``` + +And the output look like this: + +```text +----- Settings: ++------------+---------+ +| batch size | 1 | +| shape | 224x224 | +| iterations | 100 | +| warmup | 10 | ++------------+---------+ +----- Results: ++--------+------------+---------+ +| Stats | Latency/ms | FPS | ++--------+------------+---------+ +| Mean | 1.535 | 651.656 | +| Median | 1.665 | 600.569 | +| Min | 1.308 | 764.341 | +| Max | 1.689 | 591.983 | ++--------+------------+---------+ +``` diff --git a/docs/zh_cn/01-how-to-build/android.md b/docs/zh_cn/01-how-to-build/android.md index 1d86e22d8..40828a9b7 100644 --- a/docs/zh_cn/01-how-to-build/android.md +++ b/docs/zh_cn/01-how-to-build/android.md @@ -7,8 +7,7 @@ - [安装 MMDeploy SDK 依赖](#安装-mmdeploy-sdk-依赖) - [编译 MMDeploy](#编译-mmdeploy) - [编译选项说明](#编译选项说明) - - [编译 SDK](#编译-sdk) - - [编译 Demo](#编译-demo) + - [编译 SDK 和 Demos](#编译-sdk-和-demos) ______________________________________________________________________ @@ -40,12 +39,12 @@ MMDeploy 的交叉编译分为两步: - ANDROID NDK 19+ - **保证 android ndk 的版本 >= 19.0**. 如果不是,可以参考以下命令安装 r23b 版本. 如要获取其他版本,请参考 [这里](https://developer.android.com/ndk/downloads) + **保证 android ndk 的版本 >= 19.0**. 如果不是,可以参考以下命令安装 r23c 版本. 如要获取其他版本,请参考 [这里](https://developer.android.com/ndk/downloads) ```bash - wget https://dl.google.com/android/repository/android-ndk-r23b-linux.zip - unzip android-ndk-r23b-linux.zip - cd android-ndk-r23b + wget https://dl.google.com/android/repository/android-ndk-r23c-linux.zip + unzip android-ndk-r23c-linux.zip + cd android-ndk-r23c export NDK_PATH=${PWD} ``` @@ -67,7 +66,7 @@ MMDeploy 的交叉编译分为两步: OpenCV
(>=3.0)

-export OPENCV_VERSION=4.5.4
+export OPENCV_VERSION=4.6.0
 wget https://github.com/opencv/opencv/releases/download/${OPENCV_VERSION}/opencv-${OPENCV_VERSION}-android-sdk.zip
 unzip opencv-${OPENCV_VERSION}-android-sdk.zip
 export OPENCV_ANDROID_SDK_DIR=${PWD}/OpenCV-android-sdk
@@ -78,18 +77,30 @@ export OPENCV_ANDROID_SDK_DIR=${PWD}/OpenCV-android-sdk
   
     ncnn 
     ncnn 是支持 android 平台的高效神经网络推理计算框架
- 目前, MMDeploy 支持 ncnn 的 20220216 版本, 且必须使用git clone 下载源码的方式安装
+ 目前, MMDeploy 支持 ncnn 的 20220721 版本, 且必须使用git clone 下载源码的方式安装。请到 这里 查询 ncnn 支持的 android ABI。
+

-git clone -b 20220216 https://github.com/Tencent/ncnn.git
+git clone -b 20220721 https://github.com/Tencent/ncnn.git
 cd ncnn
 git submodule update --init
 export NCNN_DIR=${PWD}
-mkdir -p build
-cd build
-cmake -DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-30 -DNCNN_VULKAN=ON -DNCNN_DISABLE_EXCEPTION=OFF -DNCNN_DISABLE_RTTI=OFF ..
-make install
+
+export ANDROID_ABI=arm64-v8a
+
+mkdir -p build_${ANDROID_ABI}
+cd build_${ANDROID_ABI}
+
+cmake -DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake -DANDROID_ABI="${ANDROID_ABI}" -DANDROID_PLATFORM=android-30 -DNCNN_VULKAN=ON -DNCNN_DISABLE_EXCEPTION=OFF -DNCNN_DISABLE_RTTI=OFF ..
+make -j$(nproc) install
 
- + + + + + OpenJDK + 编译Java API之前需要先准备OpenJDK开发环境
+ 请参考 Java API 编译 进行构建. + @@ -114,6 +125,18 @@ make install OFF MMDeploy SDK 编译开关 + + MMDEPLOY_BUILD_SDK_PYTHON_API + {ON, OFF} + OFF + MMDeploy SDK Python API的编译开关 + + + MMDEPLOY_BUILD_SDK_JAVA_API + {ON, OFF} + OFF + MMDeploy SDK Java API的编译开关 + MMDEPLOY_BUILD_TEST {ON, OFF} @@ -133,14 +156,14 @@ make install 设置推理后端.
默认情况下,SDK不设置任何后端, 因为它与应用场景高度相关.
Android 端目前只支持ncnn一个后端
构建时,几乎每个后端,都需传入一些路径变量,用来查找依赖包.
1. ncnn: 表示 ncnn. 需要设置ncnn_DIR. -
-Dncnn_DIR=${NCNN_DIR}/build/install/lib/cmake/ncnn
+
-Dncnn_DIR=${NCNN_DIR}/build_${ANDROID_ABI}/install/lib/cmake/ncnn
MMDEPLOY_CODEBASES {"mmcls", "mmdet", "mmseg", "mmedit", "mmocr", "all"} N/A - 用来设置 SDK 后处理组件,加载 OpenMMLab 算法仓库的后处理功能. 已支持的算法仓库有'mmcls','mmdet','mmedit','mmseg'和'mmocr'. 如果选择多个codebase,中间使用分号隔开. 比如, 'mmcls', 'mmdet', 'mmedit', 'mmseg', 'mmocr'. 也可以通过 all 的方式, 加载所有codebase, 即 -DMMDEPLOY_CODEBASES=all. + 用来设置 SDK 后处理组件,加载 OpenMMLab 算法仓库的后处理功能. 已支持的算法仓库有'mmcls','mmdet','mmedit','mmseg'和'mmocr'. 如果选择多个codebase,中间使用分号隔开. 比如, 'mmcls', 'mmdet', 'mmedit', 'mmseg', 'mmocr'. 也可以通过 all 的方式, 加载所有codebase, 即 -DMMDEPLOY_CODEBASES=all.。请同时手动编辑 csrc/mmdeploy/apis/java/native/CMakeLists.txt以避免编译错误。 MMDEPLOY_SHARED_LIBS @@ -151,40 +174,28 @@ make install -#### 编译 SDK +#### 编译 SDK 和 Demos 下文展示构建SDK的样例,用 ncnn 作为推理引擎。 - cpu + ncnn ```Bash + export ANDROID_ABI=arm64-v8a cd ${MMDEPLOY_DIR} - mkdir -p build && cd build + mkdir -p build_${ANDROID_ABI} && cd build_${ANDROID_ABI} cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ - -DOpenCV_DIR=${OPENCV_ANDROID_SDK_DIR}/sdk/native/jni/abi-arm64-v8a \ - -Dncnn_DIR=${NCNN_DIR}/build/install/lib/cmake/ncnn \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ + -DMMDEPLOY_BUILD_SDK_JAVA_API=ON \ + -DOpenCV_DIR=${OPENCV_ANDROID_SDK_DIR}/sdk/native/jni/abi-${ANDROID_ABI} \ + -Dncnn_DIR=${NCNN_DIR}/build_${ANDROID_ABI}/install/lib/cmake/ncnn \ -DMMDEPLOY_TARGET_BACKENDS=ncnn \ -DMMDEPLOY_CODEBASES=all \ - -DMMDEPLOY_SHARED_LIBS=OFF \ + -DMMDEPLOY_SHARED_LIBS=ON \ -DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake \ - -DANDROID_ABI=arm64-v8a \ + -DANDROID_ABI=${ANDROID_ABI} \ -DANDROID_PLATFORM=android-30 \ -DANDROID_CPP_FEATURES="rtti exceptions" make -j$(nproc) && make install ``` - -#### 编译 Demo - -```Bash -cd ${MMDEPLOY_DIR}/build/install/example -mkdir -p build && cd build -cmake .. \ - -DOpenCV_DIR=${OPENCV_ANDROID_SDK_DIR}/sdk/native/jni/abi-arm64-v8a \ - -Dncnn_DIR=${NCNN_DIR}/build/install/lib/cmake/ncnn \ - -DMMDeploy_DIR=${MMDEPLOY_DIR}/build/install/lib/cmake/MMDeploy \ - -DCMAKE_TOOLCHAIN_FILE=${NDK_PATH}/build/cmake/android.toolchain.cmake \ - -DANDROID_ABI=arm64-v8a \ - -DANDROID_PLATFORM=android-30 -make -j$(nproc) -``` diff --git a/docs/zh_cn/01-how-to-build/build_from_docker.md b/docs/zh_cn/01-how-to-build/build_from_docker.md index 232fc7a23..082526290 100644 --- a/docs/zh_cn/01-how-to-build/build_from_docker.md +++ b/docs/zh_cn/01-how-to-build/build_from_docker.md @@ -42,7 +42,7 @@ docker build docker/CPU/ -t mmdeploy:inside --build-arg USE_SRC_INSIDE=true 构建 docker 镜像成功后,我们可以使用 `docker run` 启动 docker 服务。 GPU 镜像为例: ``` -docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu +docker run --gpus all -it mmdeploy:master-gpu ``` ## 常见问答 diff --git a/docs/zh_cn/01-how-to-build/build_from_source.md b/docs/zh_cn/01-how-to-build/build_from_source.md index 5e18c9959..66b6907d0 100644 --- a/docs/zh_cn/01-how-to-build/build_from_source.md +++ b/docs/zh_cn/01-how-to-build/build_from_source.md @@ -30,8 +30,6 @@ git clone -b master git@github.com:open-mmlab/mmdeploy.git --recursive git clone -b master https://github.com/open-mmlab/mmdeploy.git MMDeploy cd MMDeploy git submodule update --init --recursive - - ``` ## 编译 @@ -42,3 +40,4 @@ git clone -b master git@github.com:open-mmlab/mmdeploy.git --recursive - [Windows](windows.md) - [Android-aarch64](android.md) - [NVIDIA Jetson](jetsons.md) +- [Qcom SNPE](snpe.md) diff --git a/docs/zh_cn/01-how-to-build/jetsons.md b/docs/zh_cn/01-how-to-build/jetsons.md index fb994f68c..c952da898 100644 --- a/docs/zh_cn/01-how-to-build/jetsons.md +++ b/docs/zh_cn/01-how-to-build/jetsons.md @@ -213,7 +213,7 @@ export MMDEPLOY_DIR=$(pwd) # 编译 TensorRT 自定义算子 mkdir -p build && cd build cmake .. -DMMDEPLOY_TARGET_BACKENDS="trt" -make -j$(nproc) +make -j$(nproc) && make install # 安装 model converter cd ${MMDEPLOY_DIR} @@ -227,13 +227,14 @@ pip install -v -e . 如果你不需要使用 MMDeploy C/C++ Inference SDK 则可以跳过本步骤。 -1. 编译 SDK Libraries +1. 编译 SDK Libraries 和 Demos ```shell mkdir -p build && cd build cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ -DMMDEPLOY_TARGET_BACKENDS="trt" \ -DMMDEPLOY_CODEBASES=all \ @@ -241,16 +242,7 @@ pip install -v -e . make -j$(nproc) && make install ``` -2. 编译 SDK demos - - ```shell - cd ${MMDEPLOY_DIR}/build/install/example - mkdir -p build && cd build - cmake .. -DMMDeploy_DIR=${MMDEPLOY_DIR}/build/install/lib/cmake/MMDeploy - make -j$(nproc) - ``` - -3. 运行 demo +2. 运行 demo 以目标检测为例: diff --git a/docs/zh_cn/01-how-to-build/linux-x86_64.md b/docs/zh_cn/01-how-to-build/linux-x86_64.md index 63bdf2a0e..42dd18235 100644 --- a/docs/zh_cn/01-how-to-build/linux-x86_64.md +++ b/docs/zh_cn/01-how-to-build/linux-x86_64.md @@ -12,8 +12,7 @@ - [编译安装 Model Converter](#编译安装-model-converter) - [编译自定义算子](#编译自定义算子) - [安装 Model Converter](#安装-model-converter) - - [编译SDK](#编译sdk) - - [编译 Demo](#编译-demo) + - [编译 SDK 和 Demos](#编译-sdk-和-demos) ______________________________________________________________________ @@ -110,12 +109,12 @@ sudo apt-get install libopencv-dev pplcv - pplcv 是 openPPL 开发的高性能图像处理库。 此依赖项为可选项,只有在 cuda 平台下,才需安装。而且,目前必须使用 v0.6.2,且需要使用 git clone 的方式下载源码并编译安装
+ pplcv 是 openPPL 开发的高性能图像处理库。 此依赖项为可选项,只有在 cuda 平台下,才需安装。

 git clone https://github.com/openppl-public/ppl.cv.git
 cd ppl.cv
 export PPLCV_DIR=$(pwd)
-git checkout tags/v0.6.2 -b v0.6.2
+git checkout tags/v0.7.0 -b v0.7.0
 ./build.sh cuda
 
@@ -245,7 +244,7 @@ export LD_LIBRARY_PATH=$Torch_DIR/lib:$LD_LIBRARY_PATH ```bash echo '# set env for onnxruntime' >> ~/.bashrc echo "export ONNXRUNTIME_DIR=${ONNXRUNTIME_DIR}" >> ~/.bashrc -echo 'export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH' >> ~/.bashrc +echo "export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH" >> ~/.bashrc source ~/.bashrc ``` @@ -280,6 +279,12 @@ export MMDEPLOY_DIR=$(pwd) OFF MMDeploy SDK python package的编译开关 + + MMDEPLOY_BUILD_SDK_JAVA_API + {ON, OFF} + OFF + MMDeploy SDK Java API的编译开关 + MMDEPLOY_BUILD_TEST {ON, OFF} @@ -343,7 +348,7 @@ export MMDEPLOY_DIR=$(pwd) cd ${MMDEPLOY_DIR} mkdir -p build && cd build cmake -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_TARGET_BACKENDS=ort -DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} .. - make -j$(nproc) + make -j$(nproc) && make install ``` - **TensorRT** 自定义算子 @@ -352,7 +357,7 @@ export MMDEPLOY_DIR=$(pwd) cd ${MMDEPLOY_DIR} mkdir -p build && cd build cmake -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_TARGET_BACKENDS=trt -DTENSORRT_DIR=${TENSORRT_DIR} -DCUDNN_DIR=${CUDNN_DIR} .. - make -j$(nproc) + make -j$(nproc) && make install ``` - **ncnn** 自定义算子 @@ -361,7 +366,7 @@ export MMDEPLOY_DIR=$(pwd) cd ${MMDEPLOY_DIR} mkdir -p build && cd build cmake -DCMAKE_CXX_COMPILER=g++-7 -DMMDEPLOY_TARGET_BACKENDS=ncnn -Dncnn_DIR=${NCNN_DIR}/build/install/lib/cmake/ncnn .. - make -j$(nproc) + make -j$(nproc) && make install ``` - **torchscript** 自定义算子 @@ -370,7 +375,7 @@ export MMDEPLOY_DIR=$(pwd) cd ${MMDEPLOY_DIR} mkdir -p build && cd build cmake -DMMDEPLOY_TARGET_BACKENDS=torchscript -DTorch_DIR=${Torch_DIR} .. - make -j$(nproc) + make -j$(nproc) && make install ``` ##### 安装 Model Converter @@ -385,7 +390,7 @@ pip install -e . - 有些依赖项是可选的。运行 `pip install -e .` 将进行最小化依赖安装。 如果需安装其他可选依赖项,请执行`pip install -r requirements/optional.txt`, 或者 `pip install -e .[optional]`。其中,`[optional]`可以替换为:`all`、`tests`、`build` 或 `optional`。 -#### 编译SDK +#### 编译 SDK 和 Demos 下文展示2个构建SDK的样例,分别用 ONNXRuntime 和 TensorRT 作为推理引擎。您可以参考它们,激活其他的推理引擎。 @@ -397,6 +402,7 @@ pip install -e . cmake .. \ -DCMAKE_CXX_COMPILER=g++-7 \ -DMMDEPLOY_BUILD_SDK=ON \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ -DMMDEPLOY_TARGET_DEVICES=cpu \ -DMMDEPLOY_TARGET_BACKENDS=ort \ @@ -414,6 +420,7 @@ pip install -e . cmake .. \ -DCMAKE_CXX_COMPILER=g++-7 \ -DMMDEPLOY_BUILD_SDK=ON \ + -DMMDEPLOY_BUILD_EXAMPLES=ON \ -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ -DMMDEPLOY_TARGET_BACKENDS=trt \ @@ -422,14 +429,5 @@ pip install -e . -DTENSORRT_DIR=${TENSORRT_DIR} \ -DCUDNN_DIR=${CUDNN_DIR} - make -j$(nproc) && make -install + make -j$(nproc) && make install ``` - -#### 编译 Demo - -```Bash -cd ${MMDEPLOY_DIR}/build/install/example -mkdir -p build && cd build -cmake .. -DMMDeploy_DIR=${MMDEPLOY_DIR}/build/install/lib/cmake/MMDeploy -make -j$(nproc) -``` diff --git a/docs/zh_cn/01-how-to-build/snpe.md b/docs/zh_cn/01-how-to-build/snpe.md new file mode 100644 index 000000000..8664c97be --- /dev/null +++ b/docs/zh_cn/01-how-to-build/snpe.md @@ -0,0 +1,198 @@ +# 支持 SNPE + +mmdeploy 集成 snpe 的方式简单且有效: Client/Server 模式。 + +这种模式 + +1. 能剥离`模型转换`和`推理`环境: + +- 推理无关事项在算力更高的设备上完成; +- 对于推理计算,能拿到 gpu/npu 真实运行结果,而非 cpu 模拟数值。 + +2. 能覆盖成本敏感的设备。armv7/risc-v/mips 芯片满足产品需求,但往往对 Python 支持有限; + +3. 能简化 mmdeploy 安装步骤。如果只想转 snpe 模型测试精度,不需要编译 .whl 包。 + +## 一、运行推理服务 + +下载预编译 snpe 推理服务包, `adb push` 到手机、执行。 +注意**手机要有 qcom 芯片**。 + +```bash +$ wget https://media.githubusercontent.com/media/tpoisonooo/mmdeploy_snpe_testdata/main/snpe-inference-server-1.59.tar.gz +... +$ sudo apt install adb +$ adb push snpe-inference-server-1.59.tar.gz /data/local/tmp/ + +# 解压运行 +$ adb shell +venus:/ $ cd /data/local/tmp +130|venus:/data/local/tmp $ tar xvf snpe-inference-server-1.59.tar.gz +... +130|venus:/data/local/tmp $ source export1.59.sh +130|venus:/data/local/tmp $ ./inference_server 60000 +... + Server listening on [::]:60000 +``` + +此时推理服务应打印设备所有 ipv6 和 ipv4 地址,并监听端口。 + +tips: + +- 如果 `adb devices` 找不到设备,可能因为: + - 有些廉价线只能充电、不能传输数据 + - 或者没有打开手机的“开发者模式” +- 如果需要自己编译,可参照 [NDK 交叉编译 snpe 推理服务](../appendix/cross_build_snpe_service.md) +- 如果监听端口时 `segmentation fault`,可能是因为: + - 端口号已占用,换一个端口 + +## 二、安装 mmdeploy + +1. 环境要求 + +| 事项 | 版本 | 备注 | +| ------- | ------------------ | ------------- | +| host OS | ubuntu18.04 x86_64 | snpe 指定版本 | +| Python | **3.6.0** | snpe 指定版本 | + +2. 安装 + +[官网下载 snpe-1.59](https://developer.qualcomm.com/qfile/69652/snpe-1.59.0.zip),解压设置环境变量 + +```bash +$ unzip snpe-1.59.0.zip +$ export SNPE_ROOT=${PWD}/snpe-1.59.0.3230 +$ cd /path/to/mmdeploy +$ export PYTHONPATH=${PWD}/service/snpe/client:${SNPE_ROOT}/lib/python:${PYTHONPATH} +$ export LD_LIBRARY_PATH=${SNPE_ROOT}/lib/x86_64-linux-clang:${LD_LIBRARY_PATH} +$ export PATH=${SNPE_ROOT}/bin/x86_64-linux-clang:${PATH} +$ python3 -m pip install -e . +``` + +tips: + +- 如果网络不好,[这个 .tar.gz](https://github.com/tpoisonooo/mmdeploy_snpe_testdata/blob/main/snpe-1.59.tar.gz) 仅减小官方包体积,没有修改原始内容。 + +## 三、测试模型 + +以 Resnet-18 为例。先参照[文档安装 mmcls](https://github.com/open-mmlab/mmclassification),然后使用 `tools/deploy.py` 转换模型。 + +```bash +$ export MODEL_CONFIG=/path/to/mmclassification/configs/resnet/resnet18_8xb16_cifar10.py +$ export MODEL_PATH=https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth + +# 模型转换 +$ cd /path/to/mmdeploy +$ python3 tools/deploy.py configs/mmcls/classification_snpe_static.py $MODEL_CONFIG $MODEL_PATH /path/to/test.png --work-dir resnet18 --device cpu --uri 192.168.1.1\:60000 --dump-info + +# 精度测试 +$ python3 tools/test.py configs/mmcls/classification_snpe_static.py $MODEL_CONFIG --model reset18/end2end.dlc --metrics accuracy precision f1_score recall --uri 192.168.1.1\:60000 +``` + +注意需要 `--uri` 指明 snpe 推理服务的 ip 和端口号,可以使用 ipv4 和 ipv6 地址。 + +## 四、Android NDK 编译 SDK + +如果你还需要用 Android NDK 编译 mmdeploy SDK,请继续阅读本章节。 + +### 1. 下载 OCV、NDK,设置环境变量 + +```bash +# 下载 android OCV +$ export OPENCV_VERSION=4.5.4 +$ wget https://github.com/opencv/opencv/releases/download/${OPENCV_VERSION}/opencv-${OPENCV_VERSION}-android-sdk.zip +$ unzip opencv-${OPENCV_VERSION}-android-sdk.zip + +$ export ANDROID_OCV_ROOT=`realpath opencv-${OPENCV_VERSION}-android-sdk` + +# 下载 ndk r23b +$ wget https://dl.google.com/android/repository/android-ndk-r23b-linux.zip +$ unzip android-ndk-r23b-linux.zip + +$ export ANDROID_NDK_ROOT=`realpath android-ndk-r23b` +``` + +### 2. 编译 mmdeploy SDK + +```bash +$ cd /path/to/mmdeploy +$ mkdir build && cd build +$ cmake .. \ + -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_CODEBASES=all \ + -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \ + -DMMDEPLOY_CODEBASES=all -DMMDEPLOY_TARGET_BACKENDS=snpe \ + -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-30 \ + -DANDROID_STL=c++_static \ + -DOpenCV_DIR=${ANDROID_OCV_ROOT}/sdk/native/jni/abi-arm64-v8a \ + -DMMDEPLOY_SHARED_LIBS=ON + + $ make && make install +``` + +选项说明 + +| 选项 | 说明 | +| ----------------------------- | ------------------------------------- | +| DMMDEPLOY_CODEBASES=all | 编译所有算法后处理 | +| CMAKE_TOOLCHAIN_FILE | 加载 NDK 参数,主要用于选择编译器版本 | +| MMDEPLOY_TARGET_BACKENDS=snpe | 使用 snpe 推理 | +| ANDROID_STL=c++\_static | 避免 NDK 环境找不到合适的 c++ lib | +| MMDEPLOY_SHARED_LIBS=ON | 官方 snpe 没有提供静态库 | + +### 3. 编译 demo + +```bash +$ cd /path/to/install/example +$ mkdir build && cd build + +$ cmake .. \ + -DMMDEPLOY_CODEBASES=all \ + -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \ + -DMMDEPLOY_CODEBASES=all -DMMDEPLOY_TARGET_BACKENDS=snpe \ + -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-30 \ + -DANDROID_STL=c++_static \ + -DOpenCV_DIR=${ANDROID_OCV_ROOT}/sdk/native/jni/abi-arm64-v8a \ + -DMMDeploy_DIR=${PWD}/../../lib/cmake/MMDeploy + +$ make +$ tree -L 1 +. +├── image_classification +├── image_restorer +├── image_segmentation +├── object_detection +├── ocr +├── pose_detection +└── rotated_object_detection +``` + +## 4. 运行 demo + +先确认测试模型用了 `--dump-info`,这样 `resnet18` 目录才有 `pipeline.json` 等 SDK 所需文件。 + +把 dump 好的模型目录、可执行文件和 lib 都 `adb push` 到设备里 + +```bash +$ cd /path/to/mmdeploy +$ adb push resnet18 /data/local/tmp +$ adb push tests/data/tiger.jpeg /data/local/tmp/resnet18/ + +$ cd /path/to/install/ +$ adb push lib /data/local/tmp + +$ cd /path/to/install/example/build +$ adb push image_classification /data/local/tmp/resnet18/ +``` + +设置环境变量,执行样例 + +```bash +$ adb push /path/to/mmcls/demo/demo.JPEG /data/local/tmp +$ adb shell +venus:/ $ cd /data/local/tmp/resnet18 +venus:/data/local/tmp/resnet18 $ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/data/local/tmp/lib + +venus:/data/local/tmp/resnet18 $ ./image_classification cpu ./ tiger.jpeg +.. +label: 3, score: 0.3214 +``` diff --git a/docs/zh_cn/01-how-to-build/windows.md b/docs/zh_cn/01-how-to-build/windows.md index a634cdf95..0257a2fd7 100644 --- a/docs/zh_cn/01-how-to-build/windows.md +++ b/docs/zh_cn/01-how-to-build/windows.md @@ -12,14 +12,11 @@ - [编译安装 Model Converter](#编译安装-model-converter) - [编译自定义算子](#编译自定义算子) - [安装 Model Converter](#安装-model-converter) - - [编译 SDK](#编译-sdk) - - [编译 Demo](#编译-demo) + - [编译 SDK 和 Demos](#编译-sdk-和-demos) - [注意事项](#注意事项) ______________________________________________________________________ -目前,MMDeploy 在 Windows 平台下仅提供源码编译安装方式。未来会提供预编译包方式。 - ## 源码安装 下述安装方式,均是在 **Windows 10** 下进行,使用 **PowerShell Preview** 版本。 @@ -94,15 +91,15 @@ pip install mmcv-full==1.4.0 -f https://download.openmmlab.com/mmcv/dist/$env:cu pplcv - pplcv 是 openPPL 开发的高性能图像处理库。 此依赖项为可选项,只有在 cuda 平台下,才需安装。而且,目前必须使用 v0.6.2,且需要使用 git clone 的方式下载源码并编译安装
+ pplcv 是 openPPL 开发的高性能图像处理库。 此依赖项为可选项,只有在 cuda 平台下,才需安装。

 git clone https://github.com/openppl-public/ppl.cv.git
 cd ppl.cv
-git checkout tags/v0.6.2 -b v0.6.2
+git checkout tags/v0.7.0 -b v0.7.0
 $env:PPLCV_DIR = "$pwd"
 mkdir pplcv-build
 cd pplcv-build
-cmake .. -G "Visual Studio 16 2019" -T v142 -A x64 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=install -DHPCC_USE_CUDA=ON -DHPCC_MSVC_MD=ON
+cmake .. -G "Visual Studio 16 2019" -T v142 -A x64 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=install -DPPLCV_USE_CUDA=ON -DPPLCV_USE_MSVC_STATIC_RUNTIME=OFF
 cmake --build . --config Release -- /m
 cmake --install . --config Release
 cd ../..
@@ -278,6 +275,7 @@ mkdir build -ErrorAction SilentlyContinue
 cd build
 cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DMMDEPLOY_TARGET_BACKENDS="ort" -DONNXRUNTIME_DIR="$env:ONNXRUNTIME_DIR"
 cmake --build . --config Release -- /m
+cmake --install . --config Release
 ```
 
 - **TensorRT** 自定义算子
@@ -287,6 +285,7 @@ mkdir build -ErrorAction SilentlyContinue
 cd build
 cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DMMDEPLOY_TARGET_BACKENDS="trt" -DTENSORRT_DIR="$env:TENSORRT_DIR" -DCUDNN_DIR="$env:CUDNN_DIR"
 cmake --build . --config Release -- /m
+cmake --install . --config Release
 ```
 
 - **ncnn** 自定义算子
@@ -305,7 +304,7 @@ pip install -e .
 - 有些依赖项是可选的。运行 `pip install -e .` 将进行最小化依赖安装。 如果需安装其他可选依赖项,请执行`pip install -r requirements/optional.txt`,
   或者 `pip install -e .[optional]`。其中,`[optional]`可以替换为:`all`、`tests`、`build` 或 `optional`。
 
-#### 编译 SDK
+#### 编译 SDK 和 Demos
 
 下文展示2个构建SDK的样例,分别用 ONNXRuntime 和 TensorRT 作为推理引擎。您可以参考它们,并结合前文 SDK 的编译选项说明,激活其他的推理引擎。
 
@@ -317,6 +316,8 @@ pip install -e .
   cd build
   cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 `
       -DMMDEPLOY_BUILD_SDK=ON `
+      -DMMDEPLOY_BUILD_EXAMPLES=ON `
+      -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON `
       -DMMDEPLOY_TARGET_DEVICES="cpu" `
       -DMMDEPLOY_TARGET_BACKENDS="ort" `
       -DMMDEPLOY_CODEBASES="all" `
@@ -334,6 +335,8 @@ pip install -e .
   cd build
   cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 `
     -DMMDEPLOY_BUILD_SDK=ON `
+    -DMMDEPLOY_BUILD_EXAMPLES=ON `
+    -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON `
     -DMMDEPLOY_TARGET_DEVICES="cuda" `
     -DMMDEPLOY_TARGET_BACKENDS="trt" `
     -DMMDEPLOY_CODEBASES="all" `
@@ -345,20 +348,6 @@ pip install -e .
   cmake --install . --config Release
   ```
 
-#### 编译 Demo
-
-```PowerShell
-cd $env:MMDEPLOY_DIR\build\install\example
-mkdir build -ErrorAction SilentlyContinue
-cd build
-cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 `
-  -DMMDeploy_DIR="$env:MMDEPLOY_DIR/build/install/lib/cmake/MMDeploy"
-
-cmake --build . --config Release -- /m
-
-$env:path = "$env:MMDEPLOY_DIR/build/install/bin;" + $env:path
-```
-
 ### 注意事项
 
 1. Release / Debug 库不能混用。MMDeploy 要是编译 Release 版本,所有第三方依赖都要是 Release 版本。反之亦然。
diff --git a/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md b/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md
new file mode 100644
index 000000000..4b84f403f
--- /dev/null
+++ b/docs/zh_cn/02-how-to-run/prebuilt_package_windows.md
@@ -0,0 +1,400 @@
+# Win10 下预编译包的使用
+
+- [Win10 下预编译包的使用](#win10-下预编译包的使用)
+  - [准备工作](#准备工作)
+    - [ONNX Runtime](#onnx-runtime)
+    - [TensorRT](#tensorrt)
+  - [模型转换](#模型转换)
+    - [ONNX Runtime Example](#onnx-runtime-example)
+    - [TensorRT Example](#tensorrt-example)
+  - [模型推理](#模型推理)
+    - [Backend Inference](#backend-inference)
+      - [ONNXRuntime](#onnxruntime)
+      - [TensorRT](#tensorrt-1)
+    - [Python SDK](#python-sdk)
+      - [ONNXRuntime](#onnxruntime-1)
+      - [TensorRT](#tensorrt-2)
+    - [C SDK](#c-sdk)
+      - [ONNXRuntime](#onnxruntime-2)
+      - [TensorRT](#tensorrt-3)
+  - [可能遇到的问题](#可能遇到的问题)
+
+______________________________________________________________________
+
+目前,`MMDeploy`在`Windows`平台下提供`TensorRT`以及`ONNX Runtime`两种预编译包,可以从[Releases](https://github.com/open-mmlab/mmdeploy/releases)获取。
+
+本篇教程以`mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1.zip`和`mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip`为例,展示预编译包的使用方法。
+
+为了方便使用者快速上手,本教程以分类模型(mmclassification)为例,展示两种预编译包的使用方法。
+
+预编译包的目录结构如下,其中`dist`文件夹为模型转换相关内容,`sdk`文件夹为模型推理相关内容。
+
+```
+.
+|-- dist
+`-- sdk
+    |-- bin
+    |-- example
+    |-- include
+    |-- lib
+    `-- python
+```
+
+## 准备工作
+
+使用预编译包来进行`模型转换`以及`模型推理`,除了预编译包的中的内容外,还需要安装一些第三方依赖库,下面分别介绍以`ONNX Runtime`、`TensorRT`为推理后端所要进行的准备工作。
+
+两种推理后端环境准备工作中,其中一些操作是共有的,下面先介绍这些共有的操作,再分别介绍各自特有的操作。
+
+首先新建一个工作目录workspace
+
+1. 请按照[get_started](../get_started.md)文档,准备虚拟环境,安装pytorch、torchvision、mmcv-full。若要使用SDK的C接口,需要安装vs2019+, OpenCV。
+
+   :point_right: 这里建议使用`pip`而不是`conda`安装pytorch、torchvision
+
+2. 克隆mmdeploy仓库
+
+   ```bash
+   git clone https://github.com/open-mmlab/mmdeploy.git
+   ```
+
+   :point_right: 这里主要为了使用configs文件,所以没有加`--recursive`来下载submodule,也不需要编译`mmdeploy`
+
+3. 安装mmclassification
+
+   ```bash
+   git clone https://github.com/open-mmlab/mmclassification.git
+   cd mmclassification
+   pip install -e .
+   ```
+
+4. 准备一个PyTorch的模型文件当作我们的示例
+
+   这里选择了[resnet18_8xb32_in1k_20210831-fbbb1da6.pth](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth),对应的训练config为[resnet18_8xb32_in1k.py](https://github.com/open-mmlab/mmclassification/blob/master/configs/resnet/resnet18_8xb32_in1k.py)
+
+做好以上工作后,当前工作目录的结构应为:
+
+```
+.
+|-- mmclassification
+|-- mmdeploy
+|-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+```
+
+### ONNX Runtime
+
+本节介绍`mmdeploy`使用`ONNX Runtime`推理所特有的环境准备工作
+
+5. 安装`mmdeploy`(模型转换)以及`mmdeploy_python`(模型推理Python API)的预编译包
+
+   ```bash
+   # 先下载 mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1.zip
+   pip install .\mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\dist\mmdeploy-0.7.0-py38-none-win_amd64.whl
+   pip install .\mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\sdk\python\mmdeploy_python-0.7.0-cp38-none-win_amd64.whl
+   ```
+
+   :point_right: 如果之前安装过,需要先卸载后再安装。
+
+6. 安装onnxruntime package
+
+   ```
+   pip install onnxruntime==1.8.1
+   ```
+
+7. 下载[`onnxruntime`](https://github.com/microsoft/onnxruntime/releases/tag/v1.8.1),添加环境变量
+
+   将onnxruntime的lib目录添加到PATH里面,如图所示,具体的路径根据个人情况更改。
+
+   ![sys-path](https://user-images.githubusercontent.com/16019484/181463801-1d7814a8-b256-46e9-86f2-c08de0bc150b.png)
+   :exclamation: 重启powershell让环境变量生效,可以通过 echo $env:PATH 来检查是否设置成功。
+
+### TensorRT
+
+本节介绍`mmdeploy`使用`TensorRT`推理所特有的环境准备工作
+
+5. 安装`mmdeploy`(模型转换)以及`mmdeploy_python`(模型推理Python API)的预编译包
+
+   ```bash
+   # 先下载 mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0.zip
+   pip install .\mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\dist\mmdeploy-0.7.0-py38-none-win_amd64.whl
+   pip install .\mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\python\mmdeploy_python-0.7.0-cp38-none-win_amd64.whl
+   ```
+
+   :point_right: 如果之前安装过,需要先卸载后再安装
+
+6. 安装CUDA相关内容,并设置环境变量
+
+   - CUDA Toolkit 11.1
+   - TensorRT 8.2.3.0 (python包 + 环境变量)
+   - cuDNN 8.2.1.0
+
+   其中CUDA的环境变量在安装CUDA Toolkit后会自动添加,TensorRT以及cuDNN解压后需要自行添加运行库的路径到PATH,可参考onnxruntime的设置图例
+
+   :exclamation: 重启powershell让环境变量生效,可以通过 echo $env:PATH 来检查是否设置成功
+
+   :exclamation: 建议只添加一个版本的TensorRT的lib到PATH里面。不建议拷贝TensorRT的dll到C盘的cuda目录,在某些情况下,这样可以暴露dll的版本问题
+
+7. 安装pycuda `pip install pycuda`
+
+## 模型转换
+
+### ONNX Runtime Example
+
+下面介绍根据之前下载的ckpt来展示如果使用`mmdeploy`预编译包来进行模型转换
+
+经过之前的准备工作,当前的工作目录结构应该为:
+
+```
+..
+|-- mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1
+|-- mmclassification
+|-- mmdeploy
+`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+```
+
+python 转换代码
+
+```python
+from mmdeploy.apis import torch2onnx
+from mmdeploy.backend.sdk.export_info import export2SDK
+
+img = 'mmclassification/demo/demo.JPEG'
+work_dir = 'work_dir/onnx/resnet'
+save_file = 'end2end.onnx'
+deploy_cfg = 'mmdeploy/configs/mmcls/classification_onnxruntime_dynamic.py'
+model_cfg = 'mmclassification/configs/resnet/resnet18_8xb32_in1k.py'
+model_checkpoint = 'resnet18_8xb32_in1k_20210831-fbbb1da6.pth'
+device = 'cpu'
+
+# 1. convert model to onnx
+torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg,
+  model_checkpoint, device)
+
+# 2. extract pipeline info for sdk use (dump-info)
+export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint)
+```
+
+转换后的模型目录结构应该为:
+
+```bash
+.\work_dir\
+`-- onnx
+    `-- resnet
+        |-- deploy.json
+        |-- detail.json
+        |-- end2end.onnx
+        `-- pipeline.json
+```
+
+### TensorRT Example
+
+下面根据之前下载的ckpt来展示如果使用mmdeploy预编译包来进行模型转换
+
+经过之前的准备工作,当前的工作目录结构应该为:
+
+```
+..
+|-- mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0
+|-- mmclassification
+|-- mmdeploy
+`-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+```
+
+python 转换代码
+
+```python
+from mmdeploy.apis import torch2onnx
+from mmdeploy.apis.tensorrt import onnx2tensorrt
+from mmdeploy.backend.sdk.export_info import export2SDK
+import os
+
+img = 'mmclassification/demo/demo.JPEG'
+work_dir = 'work_dir/trt/resnet'
+save_file = 'end2end.onnx'
+deploy_cfg = 'mmdeploy/configs/mmcls/classification_tensorrt_static-224x224.py'
+model_cfg = 'mmclassification/configs/resnet/resnet18_8xb32_in1k.py'
+model_checkpoint = 'resnet18_8xb32_in1k_20210831-fbbb1da6.pth'
+device = 'cpu'
+
+# 1. convert model to IR(onnx)
+torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg,
+  model_checkpoint, device)
+
+# 2. convert IR to tensorrt
+onnx_model = os.path.join(work_dir, save_file)
+save_file = 'end2end.engine'
+model_id = 0
+device = 'cuda'
+onnx2tensorrt(work_dir, save_file, model_id, deploy_cfg, onnx_model, device)
+
+# 3. extract pipeline info for sdk use (dump-info)
+export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint)
+```
+
+转换后的模型目录结构应该为:
+
+```
+.\work_dir\
+`-- trt
+    `-- resnet
+        |-- deploy.json
+        |-- detail.json
+        |-- end2end.engine
+        |-- end2end.onnx
+        `-- pipeline.json
+```
+
+## 模型推理
+
+以下内容假定已完成了上述模型转换的两个Example,并得到了上述模型转换后的两个文件夹其中之一或者全部:
+
+```
+.\work_dir\onnx\resnet
+.\work_dir\trt\resnet
+```
+
+当前的工作目录应为:
+
+```
+.
+|-- mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0
+|-- mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1
+|-- mmclassification
+|-- mmdeploy
+|-- resnet18_8xb32_in1k_20210831-fbbb1da6.pth
+`-- work_dir
+```
+
+### Backend Inference
+
+:exclamation: 需要强调的一点是,这个接口不是为了做部署的,而是屏蔽了推理后端接口的,用来检验转换的模型是否可以正常推理的。
+
+#### ONNXRuntime
+
+Python 代码
+
+```python
+from mmdeploy.apis import inference_model
+
+model_cfg = 'mmclassification/configs/resnet/resnet18_8xb32_in1k.py'
+deploy_cfg = 'mmdeploy/configs/mmcls/classification_onnxruntime_dynamic.py'
+backend_files = ['work_dir/onnx/resnet/end2end.onnx']
+img = 'mmclassification/demo/demo.JPEG'
+device = 'cpu'
+result = inference_model(model_cfg, deploy_cfg, backend_files, img, device)
+```
+
+#### TensorRT
+
+Python 代码
+
+```python
+from mmdeploy.apis import inference_model
+
+model_cfg = 'mmclassification/configs/resnet/resnet18_8xb32_in1k.py'
+deploy_cfg = 'mmdeploy/configs/mmcls/classification_tensorrt_static-224x224.py'
+backend_files = ['work_dir/trt/resnet/end2end.engine']
+img = 'mmclassification/demo/demo.JPEG'
+device = 'cuda'
+result = inference_model(model_cfg, deploy_cfg, backend_files, img, device)
+```
+
+### Python SDK
+
+这里介绍如何使用SDK的Python API进行推理
+
+#### ONNXRuntime
+
+推理代码
+
+```bash
+python .\mmdeploy\demo\python\image_classification.py cpu .\work_dir\onnx\resnet\ .\mmclassification\demo\demo.JPEG
+```
+
+#### TensorRT
+
+推理代码
+
+```
+ python .\mmdeploy\demo\python\image_classification.py cuda .\work_dir\trt\resnet\ .\mmclassification\demo\demo.JPEG
+```
+
+### C SDK
+
+这里介绍如何使用SDK的C API进行推理
+
+#### ONNXRuntime
+
+1. 编译 examples
+
+   在`mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\sdk\example`目录下
+
+   ```
+   // 部分路径根据实际位置进行修改
+   mkdir build
+   cd build
+   cmake .. -A x64 -T v142 `
+     -DOpenCV_DIR=C:\Deps\opencv\build\x64\vc15\lib `
+     -DMMDeploy_DIR=C:\workspace\mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\sdk\lib\cmake\MMDeploy `
+     -DONNXRUNTIME_DIR=C:\Deps\onnxruntime\onnxruntime-win-gpu-x64-1.8.1
+
+   cmake --build . --config Release
+   ```
+
+2. 添加环境变量或拷贝动态库到exe同级目录
+
+   :point_right: 目的是使exe运行时可以正确找到相关dll
+
+   若选择添加环境变量,则将`mmdeploy`的运行时库路径(`mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\sdk\bin`)添加到PATH,可参考onnxruntime的添加过程。
+
+   若选择拷贝动态库,而将bin目录中的dll拷贝到刚才编译出的exe(build/Release)的同级目录下。
+
+3. 推理:
+
+   这里建议使用cmd,这样如果exe运行时如果找不到相关的dll的话会有弹窗
+
+   在mmdeploy-0.7.0-windows-amd64-onnxruntime1.8.1\\sdk\\example\\build\\Release目录下:
+
+   ```
+   .\image_classification.exe cpu C:\workspace\work_dir\onnx\resnet\ C:\workspace\mmclassification\demo\demo.JPEG
+   ```
+
+#### TensorRT
+
+1. 编译 examples
+
+   在mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example目录下
+
+   ```
+   // 部分路径根据所在硬盘的位置进行修改
+   mkdir build
+   cd build
+   cmake .. -A x64 -T v142 `
+     -DOpenCV_DIR=C:\Deps\opencv\build\x64\vc15\lib `
+     -DMMDeploy_DIR=C:\workspace\mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8 2.3.0\sdk\lib\cmake\MMDeploy `
+     -DTENSORRT_DIR=C:\Deps\tensorrt\TensorRT-8.2.3.0 `
+     -DCUDNN_DIR=C:\Deps\cudnn\8.2.1
+   cmake --build . --config Release
+   ```
+
+2. 添加环境变量或拷贝动态库到exe同级目录
+
+   :point_right: 目的是使exe运行时可以正确找到相关dll
+
+   若选择添加环境变量,则将`mmdeploy`的运行时库路径(`mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\sdk\bin`)添加到PATH,可参考onnxruntime的添加过程。
+
+   若选择拷贝动态库,而将bin目录中的dll拷贝到刚才编译出的exe(build/Release)的同级目录下。
+
+3. 推理
+
+   这里建议使用cmd,这样如果exe运行时如果找不到相关的dll的话会有弹窗
+
+   在mmdeploy-0.7.0-windows-amd64-cuda11.1-tensorrt8.2.3.0\\sdk\\example\\build\\Release目录下:
+
+   ```
+   .\image_classification.exe cuda C:\workspace\work_dir\trt\resnet C:\workspace\mmclassification\demo\demo.JPEG
+   ```
+
+## 可能遇到的问题
+
+如遇到问题,可参考[FAQ](../faq.md)
diff --git a/docs/zh_cn/02-how-to-run/quantize_model.md b/docs/zh_cn/02-how-to-run/quantize_model.md
index bd290c700..d3c339f24 100644
--- a/docs/zh_cn/02-how-to-run/quantize_model.md
+++ b/docs/zh_cn/02-how-to-run/quantize_model.md
@@ -14,7 +14,7 @@
 
 以 ncnn backend 为例,完整的工作流如下:
 
-```mermaid
+```{mermaid}
 flowchart TD;
      torch模型-->非标准onnx;
      非标准onnx-->ncnn-fp32;
diff --git a/docs/zh_cn/03-benchmark/benchmark.md b/docs/zh_cn/03-benchmark/benchmark.md
index f0d2c8768..b97777afa 100644
--- a/docs/zh_cn/03-benchmark/benchmark.md
+++ b/docs/zh_cn/03-benchmark/benchmark.md
@@ -556,6 +556,27 @@ GPU: ncnn, TensorRT, PPLNN
     89.85
     90.41
   
+  
+    Vision Transformer
+    top-1
+    85.43
+    85.43
+    -
+    85.43
+    85.42
+    -
+    -
+  
+  
+    top-5
+    97.77
+    97.77
+    -
+    97.77
+    97.76
+    -
+    -
+  
 
 
 
@@ -751,6 +772,29 @@ GPU: ncnn, TensorRT, PPLNN
     -
     -
   
+  
+    Swin-Transformer
+    Instance Segmentation
+    COCO2017
+    box AP
+    42.7
+    -
+    42.7
+    42.5
+    37.7
+    -
+    -
+  
+  
+    mask AP
+    39.3
+    -
+    39.3
+    39.3
+    35.4
+    -
+    -
+  
 
 
 
@@ -1438,6 +1482,18 @@ GPU: ncnn, TensorRT, PPLNN
     -
     -
   
+  
+    Segmenter
+    ADE20K
+    mIoU
+    44.32
+    44.29
+    44.29
+    44.29
+    43.34
+    43.35
+    -
+  
 
 
 
@@ -1577,8 +1633,32 @@ GPU: ncnn, TensorRT, PPLNN
     mAP
     0.756
     0.756
+    0.758
+    0.730
     -
     -
+  
+  
+    GlidingVertex
+    Rotated Detection
+    DOTA-v1.0
+    mAP
+    0.732
+    -
+    0.733
+    0.731
+    -
+    -
+  
+  
+    RoI Transformer
+    Rotated Detection
+    DOTA-v1.0
+    mAP
+    0.761
+    -
+    0.758
+    -
     -
     -
   
diff --git a/docs/zh_cn/03-benchmark/benchmark_edge.md b/docs/zh_cn/03-benchmark/benchmark_edge.md
new file mode 100644
index 000000000..d320c3fb7
--- /dev/null
+++ b/docs/zh_cn/03-benchmark/benchmark_edge.md
@@ -0,0 +1,58 @@
+# 边、端设备测试结果
+
+这里给出我们边、端设备的测试结论,用户可以直接通过 [model profiling](../02-how-to-run/profile_model.md) 获得自己环境的结果。
+
+## 软硬件环境
+
+- host OS ubuntu 18.04
+- backend SNPE-1.59
+- device Mi11 (qcom 888)
+
+## mmcls 模型
+
+|                                                              model                                                               |   dataset   | spatial | fp32 top-1 (%) | snpe gpu hybrid fp32 top-1 (%) | latency (ms) |
+| :------------------------------------------------------------------------------------------------------------------------------: | :---------: | :-----: | :------------: | :----------------------------: | :----------: |
+| [ShuffleNetV2](https://github.com/open-mmlab/mmclassification/blob/master/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | ImageNet-1k | 224x224 |     69.55      |            69.83\*             |     20±7     |
+|    [MobilenetV2](https://github.com/open-mmlab/mmclassification/blob/master/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py)     | ImageNet-1k | 224x224 |     71.86      |            72.14\*             |     15±6     |
+
+tips:
+
+1. ImageNet-1k 数据集较大,仅使用一部分测试(8000/50000)
+2. 边、端设备发热会降频,因此耗时实际上会波动。这里给出运行一段时间后、稳定的数值。这个结果更贴近实际需求
+
+## mmocr 检测
+
+|                                                       model                                                       |  dataset  | spatial  | fp32 hmean | snpe gpu hybrid hmean | latency(ms) |
+| :---------------------------------------------------------------------------------------------------------------: | :-------: | :------: | :--------: | :-------------------: | :---------: |
+| [PANet](https://github.com/open-mmlab/mmocr/blob/main/configs/textdet/panet/panet_r18_fpem_ffm_600e_icdar2015.py) | ICDAR2015 | 1312x736 |   0.795    |    0.785 @thr=0.9     |  3100±100   |
+
+## mmpose 模型
+
+|                                                                               model                                                                               |  dataset   | spatial | snpe hybrid AR@IoU=0.50 | snpe hybrid AP@IoU=0.50 | latency(ms) |
+| :---------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------: | :-----: | :---------------------: | :---------------------: | :---------: |
+| [pose_hrnet_w32](https://github.com/open-mmlab/mmpose/blob/master/configs/animal/2d_kpt_sview_rgb_img/topdown_heatmap/animalpose/hrnet_w32_animalpose_256x256.py) | Animalpose | 256x256 |          0.997          |          0.989          |   630±50    |
+
+tips:
+
+- 测试 pose_hrnet 用的是 AnimalPose 的 test dataset,而非 val dataset
+
+## mmseg
+
+|                                                       model                                                       |  dataset   | spatial  | mIoU  | latency(ms) |
+| :---------------------------------------------------------------------------------------------------------------: | :--------: | :------: | :---: | :---------: |
+| [fcn](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py) | Cityscapes | 512x1024 | 71.11 |  4915±500   |
+
+tips:
+
+- fcn 用 512x1024 尺寸运行正常。Cityscapes 数据集 1024x2048 分辨率会导致设备重启
+
+## 其他模型
+
+- mmdet 需要手动把模型拆成两部分。因为
+  - snpe 源码中 `onnx_to_ir.py` 仅能解析输入,`ir_to_dlc.py` 还不支持 topk
+  - UDO (用户自定义算子)无法和 `snpe-onnx-to-dlc` 配合使用
+- mmedit 模型
+  - srcnn 需要 cubic resize,snpe 不支持
+  - esrgan 可正常转换,但加载模型会导致设备重启
+- mmrotate 依赖 [e2cnn](https://pypi.org/project/e2cnn/) ,需要手动安装 [其 Python3.6
+  兼容分支](https://github.com/QUVA-Lab/e2cnn)
diff --git a/docs/zh_cn/03-benchmark/supported_models.md b/docs/zh_cn/03-benchmark/supported_models.md
index 69f5b48ca..256f96f1a 100644
--- a/docs/zh_cn/03-benchmark/supported_models.md
+++ b/docs/zh_cn/03-benchmark/supported_models.md
@@ -2,73 +2,77 @@
 
 自测完成的 model-backend 组合:
 
-| Model                      | Codebase         | TorchScript | OnnxRuntime | TensorRT | ncnn | PPLNN | OpenVINO |                                          Model config                                           |
-| :------------------------- | :--------------- | :---------: | :---------: | :------: | :--: | :---: | :------: | :---------------------------------------------------------------------------------------------: |
-| RetinaNet                  | MMDetection      |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |        [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet)        |
-| Faster R-CNN               | MMDetection      |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |       [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn)       |
-| YOLOv3                     | MMDetection      |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo)           |
-| YOLOX                      | MMDetection      |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox)          |
-| FCOS                       | MMDetection      |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos)           |
-| FSAF                       | MMDetection      |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf)           |
-| Mask R-CNN                 | MMDetection      |      Y      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn)        |
-| SSD[\*](#note)             | MMDetection      |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |           [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd)           |
-| FoveaBox                   | MMDetection      |      Y      |      Y      |    N     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox)         |
-| ATSS                       | MMDetection      |      N      |      Y      |    Y     |  N   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss)           |
-| GFL                        | MMDetection      |      N      |      Y      |    Y     |  N   |   ?   |    Y     |           [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl)           |
-| Cascade R-CNN              | MMDetection      |      N      |      Y      |    Y     |  N   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn)       |
-| Cascade Mask R-CNN         | MMDetection      |      N      |      Y      |    Y     |  N   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn)       |
-| VFNet                      | MMDetection      |      N      |      N      |    N     |  N   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/vfnet)          |
-| RepPoints                  | MMDetection      |      N      |      N      |    Y     |  N   |   ?   |    Y     |        [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints)        |
-| ResNet                     | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |       [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet)       |
-| ResNeXt                    | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext)       |
-| SE-ResNet                  | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet)      |
-| MobileNetV2                | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |    [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2)    |
-| ShuffleNetV1               | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |   [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1)    |
-| ShuffleNetV2               | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |   [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2)    |
-| VisionTransformer          | MMClassification |      Y      |      Y      |    ?     |  Y   |   ?   |    ?     | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) |
-| FCN                        | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |         [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fcn)          |
-| PSPNet[\*static](#note)    | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/pspnet)        |
-| DeepLabV3                  | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3)       |
-| DeepLabV3+                 | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |    [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3plus)     |
-| Fast-SCNN[\*static](#note) | MMSegmentation   |      Y      |      Y      |    Y     |  N   |   Y   |    Y     |       [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fastscnn)       |
-| UNet                       | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |         [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/unet)         |
-| ANN[\*](#note)             | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    N     |         [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ann)          |
-| APCNet                     | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    N     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/apcnet)        |
-| BiSeNetV1                  | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/bisenetv1)       |
-| BiSeNetV2                  | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/bisenetv2)       |
-| CGNet                      | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/cgnet)         |
-| DMNet                      | MMSegmentation   |      ?      |      Y      |    N     |  N   |   N   |    N     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/dmnet)         |
-| DNLNet                     | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/dnlnet)        |
-| EMANet                     | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/emanet)        |
-| EncNet                     | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/encnet)        |
-| ERFNet                     | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/erfnet)        |
-| FastFCN                    | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |       [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fastfcn)        |
-| GCNet                      | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    N     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/gcnet)         |
-| ICNet[\*](#note)           | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/icnet)         |
-| ISANet                     | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/isanet)        |
-| NonLocal Net               | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |     [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/nonlocal_net)     |
-| OCRNet                     | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ocrnet)        |
-| PointRend                  | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/point_rend)      |
-| Semantic FPN               | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |       [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/sem_fpn)        |
-| STDC                       | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |         [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/stdc)         |
-| UPerNet[\*](#note)         | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    N     |       [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/upernet)        |
-| DANet                      | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    N     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/danet)         |
-| SRCNN                      | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srcnn)      |
-| ESRGAN                     | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |     [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/esrgan)      |
-| SRGAN                      | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srresnet_srgan)  |
-| SRResNet                   | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srresnet_srgan)  |
-| Real-ESRGAN                | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |   [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/real_esrgan)   |
-| EDSR                       | MMEditing        |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/edsr)       |
-| RDN                        | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |       [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/rdn)       |
-| DBNet                      | MMOCR            |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |          [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textdet/dbnet)          |
-| CRNN                       | MMOCR            |      Y      |      Y      |    Y     |  Y   |   Y   |    N     |         [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/crnn)          |
-| SAR                        | MMOCR            |      N      |      Y      |    N     |  N   |   N   |    N     |          [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/sar)          |
-| HRNet                      | MMPose           |      N      |      Y      |    Y     |  Y   |   N   |    Y     |     [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#hrnet-cvpr-2019)     |
-| MSPN                       | MMPose           |      N      |      Y      |    Y     |  Y   |   N   |    Y     |     [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#mspn-arxiv-2019)     |
-| LiteHRNet                  | MMPose           |      N      |      Y      |    Y     |  N   |   N   |    Y     |   [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#litehrnet-cvpr-2021)   |
-| PointPillars               | MMDetection3d    |      ?      |      Y      |    Y     |  N   |   N   |    Y     |     [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars)      |
-| CenterPoint (pillar)       | MMDetection3d    |      ?      |      Y      |    Y     |  N   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/centerpoint)      |
-| RotatedRetinaNet           | RotatedDetection |      N      |      Y      |    Y     |  N   |   N   |    N     | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/rotated_retinanet/README.md)  |
+| Model                       | Codebase         | TorchScript | OnnxRuntime | TensorRT | ncnn | PPLNN | OpenVINO |                                          Model config                                           |
+| :-------------------------- | :--------------- | :---------: | :---------: | :------: | :--: | :---: | :------: | :---------------------------------------------------------------------------------------------: |
+| RetinaNet                   | MMDetection      |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |        [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet)        |
+| Faster R-CNN                | MMDetection      |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |       [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn)       |
+| YOLOv3                      | MMDetection      |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo)           |
+| YOLOX                       | MMDetection      |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox)          |
+| FCOS                        | MMDetection      |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos)           |
+| FSAF                        | MMDetection      |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf)           |
+| Mask R-CNN                  | MMDetection      |      Y      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn)        |
+| SSD[\*](#note)              | MMDetection      |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |           [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd)           |
+| FoveaBox                    | MMDetection      |      Y      |      Y      |    N     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox)         |
+| ATSS                        | MMDetection      |      N      |      Y      |    Y     |  N   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss)           |
+| GFL                         | MMDetection      |      N      |      Y      |    Y     |  N   |   ?   |    Y     |           [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl)           |
+| Cascade R-CNN               | MMDetection      |      N      |      Y      |    Y     |  N   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn)       |
+| Cascade Mask R-CNN          | MMDetection      |      N      |      Y      |    Y     |  N   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn)       |
+| Swin Transformer[\*](#note) | MMDetection      |      N      |      Y      |    Y     |  N   |   N   |    N     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/swin)           |
+| VFNet                       | MMDetection      |      N      |      N      |    N     |  N   |   N   |    Y     |          [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/vfnet)          |
+| RepPoints                   | MMDetection      |      N      |      N      |    Y     |  N   |   ?   |    Y     |        [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints)        |
+| ResNet                      | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |       [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet)       |
+| ResNeXt                     | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext)       |
+| SE-ResNet                   | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet)      |
+| MobileNetV2                 | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |    [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2)    |
+| ShuffleNetV1                | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |   [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1)    |
+| ShuffleNetV2                | MMClassification |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |   [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2)    |
+| VisionTransformer           | MMClassification |      Y      |      Y      |    Y     |  Y   |   ?   |    Y     | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) |
+| FCN                         | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |         [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fcn)          |
+| PSPNet[\*static](#note)     | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/pspnet)        |
+| DeepLabV3                   | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3)       |
+| DeepLabV3+                  | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |    [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3plus)     |
+| Fast-SCNN[\*static](#note)  | MMSegmentation   |      Y      |      Y      |    Y     |  N   |   Y   |    Y     |       [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fastscnn)       |
+| UNet                        | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |         [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/unet)         |
+| ANN[\*](#note)              | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    N     |         [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ann)          |
+| APCNet                      | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    N     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/apcnet)        |
+| BiSeNetV1                   | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/bisenetv1)       |
+| BiSeNetV2                   | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/bisenetv2)       |
+| CGNet                       | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/cgnet)         |
+| DMNet                       | MMSegmentation   |      ?      |      Y      |    N     |  N   |   N   |    N     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/dmnet)         |
+| DNLNet                      | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/dnlnet)        |
+| EMANet                      | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/emanet)        |
+| EncNet                      | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/encnet)        |
+| ERFNet                      | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/erfnet)        |
+| FastFCN                     | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |       [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fastfcn)        |
+| GCNet                       | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    N     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/gcnet)         |
+| ICNet[\*](#note)            | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/icnet)         |
+| ISANet                      | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/isanet)        |
+| NonLocal Net                | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |     [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/nonlocal_net)     |
+| OCRNet                      | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/ocrnet)        |
+| PointRend                   | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/point_rend)      |
+| Semantic FPN                | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |       [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/sem_fpn)        |
+| STDC                        | MMSegmentation   |      ?      |      Y      |    Y     |  Y   |   N   |    Y     |         [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/stdc)         |
+| UPerNet[\*](#note)          | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    N     |       [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/upernet)        |
+| DANet                       | MMSegmentation   |      ?      |      Y      |    Y     |  N   |   N   |    N     |        [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/danet)         |
+| Segmenter                   | MMSegmentation   |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/segmenter)       |
+| SRCNN                       | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |      [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srcnn)      |
+| ESRGAN                      | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |     [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/esrgan)      |
+| SRGAN                       | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srresnet_srgan)  |
+| SRResNet                    | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     | [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srresnet_srgan)  |
+| Real-ESRGAN                 | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |   [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/real_esrgan)   |
+| EDSR                        | MMEditing        |      Y      |      Y      |    Y     |  Y   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/edsr)       |
+| RDN                         | MMEditing        |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |       [config](https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/rdn)       |
+| DBNet                       | MMOCR            |      Y      |      Y      |    Y     |  Y   |   Y   |    Y     |          [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textdet/dbnet)          |
+| CRNN                        | MMOCR            |      Y      |      Y      |    Y     |  Y   |   Y   |    N     |         [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/crnn)          |
+| SAR                         | MMOCR            |      N      |      Y      |    N     |  N   |   N   |    N     |          [config](https://github.com/open-mmlab/mmocr/tree/main/configs/textrecog/sar)          |
+| HRNet                       | MMPose           |      N      |      Y      |    Y     |  Y   |   N   |    Y     |     [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#hrnet-cvpr-2019)     |
+| MSPN                        | MMPose           |      N      |      Y      |    Y     |  Y   |   N   |    Y     |     [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#mspn-arxiv-2019)     |
+| LiteHRNet                   | MMPose           |      N      |      Y      |    Y     |  N   |   N   |    Y     |   [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#litehrnet-cvpr-2021)   |
+| PointPillars                | MMDetection3d    |      ?      |      Y      |    Y     |  N   |   N   |    Y     |     [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars)      |
+| CenterPoint (pillar)        | MMDetection3d    |      ?      |      Y      |    Y     |  N   |   N   |    Y     |      [config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/centerpoint)      |
+| RotatedRetinaNet            | RotatedDetection |      N      |      Y      |    Y     |  N   |   N   |    N     | [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/rotated_retinanet/README.md)  |
+| Oriented RCNN               | RotatedDetection |      N      |      Y      |    Y     |  N   |   N   |    N     |   [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/oriented_rcnn/README.md)    |
+| Gliding Vertex              | RotatedDetection |      N      |      N      |    Y     |  N   |   N   |    N     |   [config](https://github.com/open-mmlab/mmrotate/blob/main/configs/gliding_vertex/README.md)   |
 
 ## Note
 
@@ -76,3 +80,4 @@
   - static: This model only support static export. Please use `static` deploy config, just like $MMDEPLOY_DIR/configs/mmseg/segmentation_tensorrt_static-1024x2048.py.
 - SSD: When you convert SSD model, you need to use min shape deploy config just like 300x300-512x512 rather than 320x320-1344x1344, for example $MMDEPLOY_DIR/configs/mmdet/detection/detection_tensorrt_dynamic-300x300-512x512.py.
 - YOLOX: YOLOX with ncnn only supports static shape.
+- Swin Transformer: For TensorRT, only version 8.4+ is supported.
diff --git a/docs/zh_cn/04-developer-guide/do_regression_test.md b/docs/zh_cn/04-developer-guide/do_regression_test.md
index 45cd5d32d..d1b33d007 100644
--- a/docs/zh_cn/04-developer-guide/do_regression_test.md
+++ b/docs/zh_cn/04-developer-guide/do_regression_test.md
@@ -257,6 +257,7 @@ models:
 - [x] ncnn
 - [x] OpenVINO
 - [x] TorchScript
+- [x] SNPE
 - [x] MMDeploy SDK
 
 ## 6. 支持的Codebase及其Metric
diff --git a/docs/zh_cn/04-developer-guide/partition_model.md b/docs/zh_cn/04-developer-guide/partition_model.md
new file mode 100644
index 000000000..f1d6280fa
--- /dev/null
+++ b/docs/zh_cn/04-developer-guide/partition_model.md
@@ -0,0 +1,85 @@
+# How to get partitioned ONNX models
+
+MMDeploy 支持将PyTorch模型导出到onnx模型并进行拆分得到多个onnx模型文件,用户可以自由的对模型图节点进行标记并根据这些标记的节点定制任意的onnx模型拆分策略。在这个教程中,我们将通过具体例子来展示如何进行onnx模型拆分。在这个例子中,我们的目标是将YOLOV3模型拆分成两个部分,保留不带后处理的onnx模型,丢弃包含Anchor生成,NMS的后处理部分。
+
+## 步骤 1: 添加模型标记点
+
+为了进行图拆分,我们定义了`Mark`类型op,标记模型导出的边界。在实现方法上,采用`mark`装饰器对函数的输入、输出`Tensor`打标记。需要注意的是,我们的标记函数需要在某个重写函数中执行才能生效。
+
+为了对YOLOV3进行拆分,首先我们需要标记模型的输入。这里为了通用性,我们标记检测器父类`BaseDetector`的`forward`方法中的`img` `Tensor`,同时为了支持其他拆分方案,也对`forward`函数的输出进行了标记,分别是`dets`, `labels`和`masks`。下面的代码是截图[mmdeploy/codebase/mmdet/models/detectors/base.py](https://github.com/open-mmlab/mmdeploy/blob/86a50e343a3a45d7bc2ba3256100accc4973e71d/mmdeploy/codebase/mmdet/models/detectors/base.py)中的一部分,可以看出我们使用`mark`装饰器标记了`__forward_impl`函数的输入输出,并在重写函数`base_detector__forward`进行了调用,从而完成了对检测器输入的标记。
+
+```python
+from mmdeploy.core import FUNCTION_REWRITER, mark
+
+@mark(
+    'detector_forward', inputs=['input'], outputs=['dets', 'labels', 'masks'])
+def __forward_impl(ctx, self, img, img_metas=None, **kwargs):
+    ...
+
+
+@FUNCTION_REWRITER.register_rewriter(
+    'mmdet.models.detectors.base.BaseDetector.forward')
+def base_detector__forward(ctx, self, img, img_metas=None, **kwargs):
+    ...
+    # call the mark function
+    return __forward_impl(...)
+```
+
+接下来,我们只需要对`YOLOV3Head`中最后一层输出特征`Tensor`进行标记就可以将整个`YOLOV3`模型拆分成两部分。通过查看`mmdet`源码我们可以知道`YOLOV3Head`的`get_bboxes`方法中输入参数`pred_maps`就是我们想要的拆分点,因此可以在重写函数[`yolov3_head__get_bboxes`](https://github.com/open-mmlab/mmdeploy/blob/86a50e343a3a45d7bc2ba3256100accc4973e71d/mmdeploy/codebase/mmdet/models/dense_heads/yolo_head.py#L14)中添加内部函数对`pred_mapes`进行标记,具体参考如下示例代码。值得注意的是,输入参数`pred_maps`是由三个`Tensor`组成的列表,所以我们在onnx模型中添加了三个`Mark`标记节点。
+
+```python
+from mmdeploy.core import FUNCTION_REWRITER, mark
+
+@FUNCTION_REWRITER.register_rewriter(
+    func_name='mmdet.models.dense_heads.YOLOV3Head.get_bboxes')
+def yolov3_head__get_bboxes(ctx,
+                            self,
+                            pred_maps,
+                            img_metas,
+                            cfg=None,
+                            rescale=False,
+                            with_nms=True):
+    # mark pred_maps
+    @mark('yolo_head', inputs=['pred_maps'])
+    def __mark_pred_maps(pred_maps):
+        return pred_maps
+    pred_maps = __mark_pred_maps(pred_maps)
+    ...
+```
+
+## 步骤 2: 添加部署配置文件
+
+在完成模型中节点标记之后,我们需要创建部署配置文件,我们假设部署后端是`onnxruntime`,并模型输入是固定尺寸`608x608`,因此添加文件`configs/mmdet/detection/yolov3_partition_onnxruntime_static.py`. 我们需要在配置文件中添加基本的配置信息如`onnx_config`,如何你还不熟悉如何添加配置文件,可以参考[write_config.md](../02-how-to-run/write_config.md).
+
+在这个部署配置文件中, 我们需要添加一个特殊的模型分段配置字段`partition_config`. 在模型分段配置中,我们可以可以给分段策略添加一个类型名称如`yolov3_partition`,设定`apply_marks=True`。在分段方式`partition_cfg`,我们需要指定每段模型的分割起始点`start`, 终止点`end`以及保存分段onnx的文件名。需要提醒的是,各段模型起始点`start`和终止点`end`是由多个标记节点`Mark`组成,例如`'detector_forward:input'`代表`detector_forward`标记处输入所产生的标记节点。配置文件具体内容参考如下代码:
+
+```python
+_base_ = ['./detection_onnxruntime_static.py']
+
+onnx_config = dict(input_shape=[608, 608])
+partition_config = dict(
+    type='yolov3_partition', # the partition policy name
+    apply_marks=True, # should always be set to True
+    partition_cfg=[
+        dict(
+            save_file='yolov3.onnx', # filename to save the partitioned onnx model
+            start=['detector_forward:input'], # [mark_name:input/output, ...]
+            end=['yolo_head:input'])  # [mark_name:input/output, ...]
+    ])
+
+```
+
+## 步骤 3: 拆分onnx模型
+
+添加好节点标记和部署配置文件,我们可以使用`tools/torch2onnx.py`工具导出带有`Mark`标记的完成onnx模型并根据分段策略提取分段的onnx模型文件。我们可以执行如下脚本,得到不带后处理的`YOLOV3`onnx模型文件`yolov3.onnx`,同时输出文件中也包含了添加`Mark`标记的完整模型文件`end2end.onnx`。此外,用户可以使用网页版模型可视化工具[netron](https://netron.app/)来查看和验证输出onnx模型的结构是否正确。
+
+```shell
+python tools/torch2onnx.py \
+configs/mmdet/detection/yolov3_partition_onnxruntime_static.py \
+../mmdetection/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py \
+https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth \
+../mmdetection/demo/demo.jpg \
+--work-dir ./work-dirs/mmdet/yolov3/ort/partition
+```
+
+当得到分段onnx模型之后,我们可以使用mmdeploy提供的其他工具如`mmdeploy_onnx2ncnn`, `onnx2tensorrt`来进行后续的模型部署工作。
diff --git a/docs/zh_cn/04-developer-guide/support_new_backend.md b/docs/zh_cn/04-developer-guide/support_new_backend.md
index 223271ecc..50ae93739 100644
--- a/docs/zh_cn/04-developer-guide/support_new_backend.md
+++ b/docs/zh_cn/04-developer-guide/support_new_backend.md
@@ -1,8 +1,8 @@
-## 如何支持新的后端
+# 如何支持新的后端
 
 MMDeploy 支持了许多后端推理引擎,但我们依然非常欢迎新后端的贡献。在本教程中,我们将介绍在 MMDeploy 中支持新后端的一般过程。
 
-### 必要条件
+## 必要条件
 
 在对 MMDeploy 添加新的后端引擎之前,需要先检查所要支持的新后端是否符合一些要求:
 
@@ -10,7 +10,7 @@ MMDeploy 支持了许多后端推理引擎,但我们依然非常欢迎新后
 - 如果后端需要“.onnx”文件以外的模型文件或权重文件,则需要添加将“.onnx”文件转换为模型文件或权重文件的转换工具,该工具可以是 Python API、脚本或可执行程序。
 - 强烈建议新后端可提供 Python 接口来加载后端文件和推理以进行验证。
 
-### 支持后端转换
+## 支持后端转换
 
 MMDeploy 中的后端必须支持 ONNX,因此后端能直接加载“.onnx”文件,或者使用转换工具将“.onnx”转换成自己的格式。在本节中,我们将介绍支持后端转换的步骤。
 
@@ -142,7 +142,7 @@ MMDeploy 中的后端必须支持 ONNX,因此后端能直接加载“.onnx”
            backend_files = []
            for onnx_path in onnx_files:
                create_process(
-                   f'onnx2ncnn with {onnx_path}',
+                   f'mmdeploy_onnx2ncnn with {onnx_path}',
                    target=onnx2ncnn,
                    args=(onnx_path, args.work_dir),
                    kwargs=dict(),
@@ -155,7 +155,7 @@ MMDeploy 中的后端必须支持 ONNX,因此后端能直接加载“.onnx”
 
 7. 为新后端引擎代码添加相关注释和单元测试:).
 
-### 支持后端推理
+## 支持后端推理
 
 尽管后端引擎通常用C/C++实现,但如果后端提供Python推理接口,则测试和调试非常方便。我们鼓励贡献者在MMDeploy的Python接口中支持新后端推理。在本节中,我们将介绍支持后端推理的步骤。
 
@@ -204,7 +204,7 @@ MMDeploy 中的后端必须支持 ONNX,因此后端能直接加载“.onnx”
    		# Postprocess data
            # ...
 
-       @TimeCounter.count_time()
+       @TimeCounter.count_time('onnxruntime')
        def __ort_execute(self, io_binding: ort.IOBinding):
    		# Only do the inference
            self.sess.run_with_iobinding(io_binding)
@@ -230,7 +230,7 @@ MMDeploy 中的后端必须支持 ONNX,因此后端能直接加载“.onnx”
 
 5. 为新后端引擎代码添加相关注释和单元测试 :).
 
-### 将MMDeploy作为第三方库时添加新后端
+## 将MMDeploy作为第三方库时添加新后端
 
 前面的部分展示了如何在 MMDeploy 中添加新的后端,这需要更改其源代码。但是,如果我们将 MMDeploy 视为第三方,则上述方法不再有效。为此,添加一个新的后端需要我们预先安装另一个名为 `aenum` 的包。我们可以直接通过`pip install aenum`进行安装。
 
diff --git a/docs/zh_cn/04-developer-guide/support_new_model.md b/docs/zh_cn/04-developer-guide/support_new_model.md
index 3ee4c8420..47ab46d4e 100644
--- a/docs/zh_cn/04-developer-guide/support_new_model.md
+++ b/docs/zh_cn/04-developer-guide/support_new_model.md
@@ -1,8 +1,8 @@
-## 如何支持新的模型
+# 如何支持新的模型
 
 我们提供了多种工具来支持模型转换
 
-### 函数的重写器
+## 函数的重写器
 
 PyTorch 神经网络是用 python 编写的,可以简化算法的开发。但与此同时 Python 的流程控制和第三方库会使得网络导出为中间语言的过程变得困难。为此我们提供了一个“MonKey path”工具将不支持的功能重写为另一个可支持中间语言导出的功能。下述是一个具体的使用例子:
 
@@ -26,7 +26,7 @@ def repeat_static(ctx, input, *size):
 
 可参照[这些样例代码](https://github.com/open-mmlab/mmdeploy/blob/master/mmdeploy/codebase/mmcls/models/backbones/shufflenet_v2.py)。
 
-### 模型重载器
+## 模型重载器
 
 如果您想用另一个模块替换整个模块,我们还有另一个重载器,如下所示:
 
@@ -61,7 +61,7 @@ class SRCNNWrapper(nn.Module):
 
 网络中模块的所有实例都将替换为这个新类的实例。原始模块和部署配置将作为前两个参数进行传递。
 
-### 符号函数重写
+## 符号函数重写
 
 PyTorch 和 ONNX 之间的映射是通过 PyTorch 中的符号函数进行定义的。自定义符号函数可以帮助我们绕过一些推理引擎不支持的 ONNX 节点。
 
diff --git a/docs/zh_cn/appendix/cross_build_snpe_service.md b/docs/zh_cn/appendix/cross_build_snpe_service.md
new file mode 100644
index 000000000..bb1ea4d40
--- /dev/null
+++ b/docs/zh_cn/appendix/cross_build_snpe_service.md
@@ -0,0 +1,170 @@
+# Ubuntu18.04 交叉编译 NDK snpe 推理服务
+
+mmdeploy 已提供预编译包,如果你想自己编译、或需要对 .proto 接口做修改,可参考此文档。
+
+注意 gRPC 官方文档并没有对 NDK 的完整支持。
+
+## 一、环境说明
+
+| 项目     | 版本           | 备注                                  |
+| -------- | -------------- | ------------------------------------- |
+| snpe     | 1.59           | 1.60 使用 clang-8.0,可能导致兼容问题 |
+| host OS  | ubuntu18.04    | snpe1.59 指定版本                     |
+| NDK      | r17c           | snpe1.59 指定版本                     |
+| gRPC     | commit 6f698b5 | -                                     |
+| 硬件设备 | qcom888        | 需要 qcom 芯片                        |
+
+## 二、NDK 交叉编译 gRPC
+
+1. 拉取 gRPC repo,  在 host 上编译出 `protoc` 和 `grpc_cpp_plugin`
+
+```bash
+# 安装依赖
+$ apt-get update && apt-get install -y libssl-dev
+# 编译
+$ git clone https://github.com/grpc/grpc --recursive=1 --depth=1
+$ mkdir -p cmake/build
+$ pushd cmake/build
+
+$ cmake \
+  -DCMAKE_BUILD_TYPE=Release \
+  -DgRPC_INSTALL=ON \
+  -DgRPC_BUILD_TESTS=OFF \
+  -DgRPC_SSL_PROVIDER=package \
+  ../..
+# 需要安装到 host 环境
+$ make -j
+$ sudo make install
+```
+
+2. 下载 NDK,交叉编译 android aarch64 所需静态库
+
+```bash
+$ wget https://dl.google.com/android/repository/android-ndk-r17c-linux-x86_64.zip
+$ unzip android-ndk-r17c-linux-x86_64.zip
+
+# 设置环境变量
+$ export ANDROID_NDK=/path/to/android-ndk-r17c
+
+# 编译
+$ cd /path/to/grpc
+$ mkdir -p cmake/build_aarch64  && pushd cmake/build_aarch64
+
+$ cmake ../.. \
+ -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \
+ -DANDROID_ABI=arm64-v8a \
+ -DANDROID_PLATFORM=android-26 \
+ -DANDROID_TOOLCHAIN=clang \
+ -DANDROID_STL=c++_shared \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_INSTALL_PREFIX=/tmp/android_grpc_install_shared
+
+$ make -j
+$ make install
+```
+
+3. 此时 `/tmp/android_grpc_install` 应有完整的安装文件
+
+```bash
+$ cd /tmp/android_grpc_install
+$ tree -L 1
+.
+├── bin
+├── include
+├── lib
+└── share
+```
+
+## 三、【可跳过】自测 NDK gRPC 是否正常
+
+1. 编译 gRPC 自带的 helloworld
+
+```bash
+$ cd /path/to/grpc/examples/cpp/helloworld/
+$ mkdir cmake/build_aarch64 -p && pushd cmake/build_aarch64
+
+$ cmake ../.. \
+ -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \
+ -DANDROID_ABI=arm64-v8a \
+ -DANDROID_PLATFORM=android-26 \
+ -DANDROID_STL=c++_shared \
+ -DANDROID_TOOLCHAIN=clang \
+ -DCMAKE_BUILD_TYPE=Release \
+ -Dabsl_DIR=/tmp/android_grpc_install_shared/lib/cmake/absl \
+ -DProtobuf_DIR=/tmp/android_grpc_install_shared/lib/cmake/protobuf \
+ -DgRPC_DIR=/tmp/android_grpc_install_shared/lib/cmake/grpc
+
+$ make -j
+$ ls greeter*
+greeter_async_client   greeter_async_server     greeter_callback_server  greeter_server
+greeter_async_client2  greeter_callback_client  greeter_client
+```
+
+2. 打开手机调试模式,push 编译结果到 `/data/local/tmp` 目录
+
+tips:对于国产手机,设置 - 版本号,点击 7 次可进入开发者模式,然后才能打开 USB 调试
+
+```bash
+$ adb push greeter* /data/local/tmp
+```
+
+3. `adb shell` 进手机,执行 client/server
+
+```bash
+/data/local/tmp $ ./greeter_client
+Greeter received: Hello world
+```
+
+## 四、交叉编译 snpe 推理服务
+
+1. 打开 [snpe tools 官网](https://developer.qualcomm.com/software/qualcomm-neural-processing-sdk/tools),下载 1.59 版本。 解压并设置环境变量
+
+**注意 snpe >= 1.60 开始使用 `clang-8.0`,可能导致旧设备与 `libc++_shared.so` 不兼容。**
+
+```bash
+$ export SNPE_ROOT=/path/to/snpe-1.59.0.3230
+```
+
+2. 打开 mmdeploy  snpe server 目录,使用交叉编译 gRPC 时的选项
+
+```bash
+$ cd /path/to/mmdeploy
+$ cd service/snpe/server
+
+$ mkdir -p build && cd build
+$ export ANDROID_NDK=/path/to/android-ndk-r17c
+$ cmake .. \
+ -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \
+ -DANDROID_ABI=arm64-v8a \
+ -DANDROID_PLATFORM=android-26 \
+ -DANDROID_STL=c++_shared \
+ -DANDROID_TOOLCHAIN=clang \
+ -DCMAKE_BUILD_TYPE=Release \
+ -Dabsl_DIR=/tmp/android_grpc_install_shared/lib/cmake/absl \
+ -DProtobuf_DIR=/tmp/android_grpc_install_shared/lib/cmake/protobuf \
+ -DgRPC_DIR=/tmp/android_grpc_install_shared/lib/cmake/grpc
+
+ $ make -j
+ $ file inference_server
+inference_server: ELF 64-bit LSB shared object, ARM aarch64, version 1 (SYSV), dynamically linked, interpreter /system/bin/linker64, BuildID[sha1]=252aa04e2b982681603dacb74b571be2851176d2, with debug_info, not stripped
+```
+
+最终可得到 `infernece_server`,`adb push` 到设备上即可执行。
+
+## 五、重新生成 proto 接口
+
+如果改过 `inference.proto`,需要重新生成 .cpp 和 .py 通信接口
+
+```Shell
+$ python3 -m pip install grpc_tools --user
+$ python3 -m  grpc_tools.protoc -I./ --python_out=./client/ --grpc_python_out=./client/ inference.proto
+
+$ ln -s `which protoc-gen-grpc`
+$ protoc --cpp_out=./ --grpc_out=./  --plugin=protoc-gen-grpc=grpc_cpp_plugin  inference.proto
+```
+
+## 参考文档
+
+- snpe tutorial https://developer.qualcomm.com/sites/default/files/docs/snpe/cplus_plus_tutorial.html
+- gRPC cross build script https://raw.githubusercontent.com/grpc/grpc/master/test/distrib/cpp/run_distrib_test_cmake_aarch64_cross.sh
+- stackoverflow https://stackoverflow.com/questions/54052229/build-grpc-c-for-android-using-ndk-arm-linux-androideabi-clang-compiler
diff --git a/docs/zh_cn/conf.py b/docs/zh_cn/conf.py
index de962265f..c060165f5 100644
--- a/docs/zh_cn/conf.py
+++ b/docs/zh_cn/conf.py
@@ -56,6 +56,7 @@ extensions = [
     'sphinx_markdown_tables',
     'myst_parser',
     'sphinx_copybutton',
+    'sphinxcontrib.mermaid'
 ]  # yapf: disable
 
 autodoc_mock_imports = ['tensorrt']
diff --git a/docs/zh_cn/faq.md b/docs/zh_cn/faq.md
index 8b227cc9f..67615b1d1 100644
--- a/docs/zh_cn/faq.md
+++ b/docs/zh_cn/faq.md
@@ -50,6 +50,40 @@
   print(torch.__file__)
   ```
 
+- 编译时enable_language(CUDA) 报错
+
+  ```
+  -- Selecting Windows SDK version 10.0.19041.0 to target Windows 10.0.19044.
+  -- Found CUDA: C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.1 (found version "11.1")
+  CMake Error at C:/Software/cmake/cmake-3.23.1-windows-x86_64/share/cmake-3.23/Modules/CMakeDetermineCompilerId.cmake:491 (message):
+    No CUDA toolset found.
+  Call Stack (most recent call first):
+    C:/Software/cmake/cmake-3.23.1-windows-x86_64/share/cmake-3.23/Modules/CMakeDetermineCompilerId.cmake:6 (CMAKE_DETERMINE_COMPILER_ID_BUILD)
+    C:/Software/cmake/cmake-3.23.1-windows-x86_64/share/cmake-3.23/Modules/CMakeDetermineCompilerId.cmake:59 (__determine_compiler_id_test)
+    C:/Software/cmake/cmake-3.23.1-windows-x86_64/share/cmake-3.23/Modules/CMakeDetermineCUDACompiler.cmake:339 (CMAKE_DETERMINE_COMPILER_ID)
+    C:/workspace/mmdeploy-0.6.0-windows-amd64-cuda11.1-tensorrt8.2.3.0/sdk/lib/cmake/MMDeploy/MMDeployConfig.cmake:27 (enable_language)
+    CMakeLists.txt:5 (find_package)
+  ```
+
+  **原因:** CUDA Toolkit 11.1安装在Visual Studio之前,造成VS的插件没有安装。或者VS的版本过新,使得CUDA Toolkit的安装的时候跳过了VS插件的安装
+
+  **解决方法:** 可以通过手工拷贝插件的方式来解决这个问题。比如将`C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.1\extras\visual_studio_integration\MSBuildExtensions`中的四个文件拷贝到`C:\Software\Microsoft Visual Studio\2022\Community\Msbuild\Microsoft\VC\v170\BuildCustomizations` 目录下。具体路径根据实际情况进行更改。
+
+### ONNX Runtime
+
+- Windows系统下,转模型可视化时以及SDK推理时遇到
+  ```
+  onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Failed to load library, error code: 193
+  ```
+  **原因:** 在较新的windows系统中,系统路径下下有两个`onnxruntime.dll`,且会优先加载,造成冲突。
+  ```
+  C:\Windows\SysWOW64\onnxruntime.dll
+  C:\Windows\System32\onnxruntime.dll
+  ```
+  **解决方法:** 以下两个方案任选其一
+  1. 将下载的onnxruntime中的lib目录下的dll拷贝到mmdeploy_onnxruntime_ops.dll的同级目录(推荐使用Everything 进行查找)
+  2. 将系统路径下的这两个dll改名,使其加载不到,可能涉及到修改文件权限的操作
+
 ### Pip
 
 - pip installed package but could not `import` them.
diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md
index b698dd6b9..c0e2e84d8 100644
--- a/docs/zh_cn/get_started.md
+++ b/docs/zh_cn/get_started.md
@@ -4,8 +4,6 @@ MMDeploy 提供了一系列工具,帮助您更轻松的将 OpenMMLab 下的算
 
 您可以使用我们设计的流程一“部”到位,也可以定制您自己的转换流程。
 
-在接下来的章节中,我们将会向您展示 MMDeploy 的模型部署方式。并在 NVIDIA 设备上,以 [MMDetection](https://github.com/open-mmlab/mmdetection) Faster R-CNN 模型为例,演示 MMDeploy 的基本使用方法。
-
 ## 流程简介
 
 MMDeploy 定义的模型部署流程,如下图所示:
@@ -19,7 +17,7 @@ MMDeploy 定义的模型部署流程,如下图所示:
 
 ### MMDeploy 模型(MMDeploy Model)
 
-模型转换结果的集合。它不仅包括后端模型,还包括模型的元信息。这些信息将用于推理 SDK 中。
+也称 SDK Model。它是模型转换结果的集合。不仅包括后端模型,还包括模型的元信息。这些信息将用于推理 SDK 中。
 
 ### 推理 SDK(Inference SDK)
 
@@ -27,208 +25,239 @@ MMDeploy 定义的模型部署流程,如下图所示:
 
 ## 准备工作
 
-对于端到端的模型转换和推理,MMDeploy 依赖 Python 3.6+ 以及 PyTorch 1.5+。
+对于端到端的模型转换和推理,MMDeploy 依赖 Python 3.6+ 以及 PyTorch 1.8+。
 
 **第一步**:从[官网](https://docs.conda.io/en/latest/miniconda.html)下载并安装 Miniconda
 
 **第二步**:创建并激活 conda 环境
 
 ```shell
-export PYTHON_VERSION=3.7
-conda create --name mmdeploy python=${PYTHON_VERSION} -y
+conda create --name mmdeploy python=3.8 -y
 conda activate mmdeploy
 ```
 
 **第三步**: 参考[官方文档](https://pytorch.org/get-started/locally/)并安装 PyTorch
 
-Model Converter 的 torch2onnx 功能依赖它。
-
-在 GPU 环境下(这里我们以 Ubuntu 18.04 CUDA 11.1 为基础),您可以使用如下方式安装 PyTorch 1.8:
+在 GPU 环境下:
 
 ```shell
-export PYTHON_VERSION=3.7
-export PYTORCH_VERSION=1.8.0
-export TORCHVISION_VERSION=0.9.0
-export CUDA_VERSION=11.1
-
-conda create -n mmdeploy python=${PYTHON_VERSION} -y
-conda activate mmdeploy
-
-conda install pytorch==${PYTORCH_VERSION} torchvision==${TORCHVISION_VERSION} cudatoolkit=${CUDA_VERSION} -c pytorch -c conda-forge
+conda install pytorch=={pytorch_version} torchvision=={torchvision_version} cudatoolkit={cudatoolkit_version} -c pytorch -c conda-forge
 ```
 
-在 CPU 环境下,您可以执行:
+在 CPU 环境下:
 
 ```shell
-export PYTORCH_VERSION=1.8.0
-export TORCHVISION_VERSION=0.9.0
-conda install pytorch==${PYTORCH_VERSION} torchvision==${TORCHVISION_VERSION} cpuonly -c pytorch
+conda install pytorch=={pytorch_version} torchvision=={torchvision_version} cpuonly -c pytorch
+```
+
+```{note}
+在 GPU 环境下,请务必保证 {cudatoolkit_version} 和主机的 CUDA Toolkit 版本一致,避免在使用 TensorRT 时,可能引起的版本冲突问题。
 ```
 
 ## 安装 MMDeploy
 
-**第一步**: 安装 mmcv-full
+**第一步**:通过 [MIM](https://github.com/open-mmlab/mim) 安装 [MMCV](https://github.com/open-mmlab/mmcv)
 
 ```shell
-export MMCV_VERSION=1.5.0
-export CUDA_STRING="${CUDA_VERSION/./""}"
-
-python -m pip install mmcv-full==${MMCV_VERSION} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA_STRING}/torch${PYTORCH_VERSION}/index.html
+pip install -U openmim
+mim install mmcv-full
 ```
 
-**第二步**: 安装 MMDeploy
+**第二步**: 安装 MMDeploy 和 推理引擎
 
-从 v0.5.0 之后,MMDeploy 开始提供预编译包。您可以根据目标软硬件平台,从[这里](https://github.com/open-mmlab/mmdeploy/releases)选择并下载预编译包。
+我们推荐用户使用预编译包安装和体验 MMDeploy 功能。请根据目标软硬件平台,从[这里](https://github.com/open-mmlab/mmdeploy/releases) 选择最新版本下载并安装。
 
-在 NVIDIA 设备上,我们推荐使用 MMDeploy-TensoRT 预编译包:
+目前,MMDeploy 的预编译包支持的平台和设备矩阵如下:
+
+
+
+  
+    
+    
+    
+    
+  
+
+
+  
+    
+    
+    
+    
+  
+  
+    
+    
+    
+  
+  
+    
+    
+    
+    
+  
+  
+    
+    
+    
+  
+
+
OS-ArchDeviceONNX RuntimeTensorRT
Linux-x86_64CPUYN/A
CUDANY
Windows-x86_64CPUYN/A
CUDANY
+ +**注:对于不在上述表格中的软硬件平台,请参考[源码安装文档](./01-how-to-build/build_from_source.md),正确安装和配置 MMDeploy。** + +以最新的预编译包为例,你可以参考以下命令安装: + +
+Linux-x86_64, CPU, ONNX Runtime 1.8.1 ```shell -export MMDEPLOY_VERSION=0.5.0 -export TENSORRT_VERSION=8.2.3.0 -export PYTHON_VERSION=3.7 -export PYTHON_STRING="${PYTHON_VERSION/./""}" - -wget https://github.com/open-mmlab/mmdeploy/releases/download/v${MMDEPLOY_VERSION}/mmdeploy-${MMDEPLOY_VERSION}-linux-x86_64-cuda${CUDA_VERSION}-tensorrt${TENSORRT_VERSION}.tar.gz -tar -zxvf mmdeploy-${MMDEPLOY_VERSION}-linux-x86_64-cuda${CUDA_VERSION}-tensorrt${TENSORRT_VERSION}.tar.gz -cd mmdeploy-${MMDEPLOY_VERSION}-linux-x86_64-cuda${CUDA_VERSION}-tensorrt${TENSORRT_VERSION} -python -m pip install dist/mmdeploy-*-py${PYTHON_STRING}*.whl -python -m pip install sdk/python/mmdeploy_python-*-cp${PYTHON_STRING}*.whl -export LD_LIBRARY_PATH=$(pwd)/sdk/lib:$LD_LIBRARY_PATH +# 安装 MMDeploy ONNX Runtime 自定义算子库和推理 SDK +wget https://github.com/open-mmlab/mmdeploy/releases/download/v0.7.0/mmdeploy-0.7.0-linux-x86_64-onnxruntime1.8.1.tar.gz +tar -zxvf mmdeploy-0.7.0-linux-x86_64-onnxruntime1.8.1.tar.gz +cd mmdeploy-0.7.0-linux-x86_64-onnxruntime1.8.1 +pip install dist/mmdeploy-0.7.0-py3-none-linux_x86_64.whl +pip install sdk/python/mmdeploy_python-0.7.0-cp38-none-linux_x86_64.whl cd .. +# 安装推理引擎 ONNX Runtime +pip install onnxruntime==1.8.1 +wget https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz +tar -zxvf onnxruntime-linux-x64-1.8.1.tgz +export ONNXRUNTIME_DIR=$(pwd)/onnxruntime-linux-x64-1.8.1 +export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$LD_LIBRARY_PATH ``` -```{note} -如果 MMDeploy 没有您所需要的目标软硬件平台的预编译包,请参考源码安装文档,正确安装和配置 -``` +
-**第三步**: 安装预编译包要求的推理后端 - -在本例中,我们需要安装 TensorRT(含 cuDNN)推理引擎。因在 NVIDIA 官网下载软件包,必须要登录认证,所以请预先登录并下载所需的 [TensorRT](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-tar) 和 [cuDNN](https://developer.nvidia.com/cudnn)。**请注意: TensorRT 版本、cuDNN 版本要和 CUDA 版本匹配** - -下载完毕后,您可以参考如下方法安装。这里,我们以 TensorRT 8.2.3.0、cuDNN 8.2 为例: +
+Linux-x86_64, CUDA 11.x, TensorRT 8.2.3.0 ```shell -export TENSORRT_VERSION=8.2.3.0 -CUDA_MAJOR="${CUDA_VERSION/\.*/""}" - -# !!! 从 NVIDIA 官网下载 与 cuda toolkit 匹配的 tensorrt 到当前的工作目录 -tar -zxvf TensorRT-${TENSORRT_VERSION}*cuda-${CUDA_MAJOR}*.tar.gz -python -m pip install TensorRT-${TENSORRT_VERSION}/python/tensorrt-*-cp${PYTHON_STRING}*.whl -python -m pip install pycuda -export TENSORRT_DIR=$(pwd)/TensorRT-${TENSORRT_VERSION} +# 安装 MMDeploy TensorRT 自定义算子库和推理 SDK +wget https://github.com/open-mmlab/mmdeploy/releases/download/v0.7.0/mmdeploy-0.7.0-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz +tar -zxvf mmdeploy-v0.7.0-linux-x86_64-cuda11.1-tensorrt8.2.3.0.tar.gz +cd mmdeploy-0.7.0-linux-x86_64-cuda11.1-tensorrt8.2.3.0 +pip install dist/mmdeploy-0.7.0-py3-none-linux_x86_64.whl +pip install sdk/python/mmdeploy_python-0.7.0-cp38-none-linux_x86_64.whl +cd .. +# 安装推理引擎 TensorRT +# !!! 从 NVIDIA 官网下载 TensorRT-8.2.3.0 CUDA 11.x 安装包并解压到当前目录 +pip install TensorRT-8.2.3.0/python/tensorrt-8.2.3.0-cp38-none-linux_x86_64.whl +pip install pycuda +export TENSORRT_DIR=$(pwd)/TensorRT-8.2.3.0 export LD_LIBRARY_PATH=${TENSORRT_DIR}/lib:$LD_LIBRARY_PATH - - -# !!! 从 NVIDIA 官网下载与 cuda toolkit,tensorrt 匹配的 cudnn 到当前的工作目录 -tar -zxvf cudnn-${CUDA_MAJOR}.*-linux-x64*.tgz +# !!! 从 NVIDIA 官网下载 cuDNN 8.2.1 CUDA 11.x 安装包并解压到当前目录 export CUDNN_DIR=$(pwd)/cuda export LD_LIBRARY_PATH=$CUDNN_DIR/lib64:$LD_LIBRARY_PATH ``` -在接下来的章节中,我们均以此环境为基础,演示 MMDeploy 的功能。 +
-目前,对于 MMDeploy 支持各种推理后端的安装方法,您可以查阅以下文档: +
+Windows-x86_64 +
-- [ONNX Runtime](../en/05-supported-backends/onnxruntime.md) -- [TensorRT](../en/05-supported-backends/tensorrt.md) -- [PPL.NN](../en/05-supported-backends/pplnn.md) -- [ncnn](../en/05-supported-backends/ncnn.md) -- [OpenVINO](../en/05-supported-backends/openvino.md) -- [LibTorch](../en/05-supported-backends/torchscript.md) +请阅读 [这里](./02-how-to-run/prebuilt_package_windows.md),了解 MMDeploy 预编译包在 Windows 平台下的使用方法。 ## 模型转换 -在准备工作就绪后,我们可以使用 MMDeploy 中的工具 `deploy.py`,将 OpenMMLab 的 PyTorch 模型转换成推理后端支持的格式。 +在准备工作就绪后,我们可以使用 MMDeploy 中的工具 `tools/deploy.py`,将 OpenMMLab 的 PyTorch 模型转换成推理后端支持的格式。 +对于`tools/deploy.py` 的使用细节,请参考 [如何转换模型](./02-how-to-run/convert_model.md)。 -以 [MMDetection](https://github.com/open-mmlab/mmdetection) 中的 `Faster R-CNN` 为例,我们可以使用如下命令,将 PyTorch 模型转换成可部署在 NVIDIA GPU 上的 TenorRT 模型: +以 [MMDetection](https://github.com/open-mmlab/mmdetection) 中的 `Faster R-CNN` 为例,我们可以使用如下命令,将 PyTorch 模型转换为 TenorRT 模型,从而部署到 NVIDIA GPU 上. ```shell # 克隆 mmdeploy 仓库。转换时,需要使用 mmdeploy 仓库中的配置文件,建立转换流水线 git clone --recursive https://github.com/open-mmlab/mmdeploy.git -python -m pip install -r mmdeploy/requirements/runtime.txt -export MMDEPLOY_DIR=$(pwd)/mmdeploy -# 克隆 mmdetection 仓库。转换时,需要使用 mmdetection 仓库中的模型配置文件,构建 PyTorch nn module -python -m pip install mmdet==2.24.0 +# 安装 mmdetection。转换时,需要使用 mmdetection 仓库中的模型配置文件,构建 PyTorch nn module git clone https://github.com/open-mmlab/mmdetection.git -export MMDET_DIR=$(pwd)/mmdetection +cd mmdetection +pip install -v -e . +cd .. # 下载 Faster R-CNN 模型权重 -export CHECKPOINT_DIR=$(pwd)/checkpoints -wget -P ${CHECKPOINT_DIR} https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth - -# 设置工作路径 -export WORK_DIR=$(pwd)/mmdeploy_models/faster-rcnn +wget -P checkpoints https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth # 执行转换命令,实现端到端的转换 -python ${MMDEPLOY_DIR}/tools/deploy.py \ - ${MMDEPLOY_DIR}/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py \ - ${MMDET_DIR}/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ - ${CHECKPOINT_DIR}/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ - ${MMDET_DIR}/demo/demo.jpg \ - --work-dir ${WORK_DIR} \ - --device cuda:0 \ +python mmdeploy/tools/deploy.py \ + mmdeploy/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py \ + mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ + mmdetection/demo/demo.jpg \ + --work-dir mmdeploy_model/faster-rcnn \ + --device cuda \ --dump-info ``` -`${MMDEPLOY_DIR}/tools/deploy.py` 是一个方便模型转换的工具。您可以阅读 [如何转换模型](./02-how-to-run/convert_model.md) 了解更多细节。 +转换结果被保存在 `--work-dir` 指向的文件夹中。**该文件夹中不仅包含推理后端模型,还包括推理元信息。这些内容的整体被定义为 SDK Model。推理 SDK 将用它进行模型推理。** -`detection_tensorrt_dynamic-320x320-1344x1344.py` 是一个参数配置文件。该文件的命名遵循如下规则: - -```bash -<任务名>_<推理后端>-[后端特性]_<动态模型支持>.py +```{tip} +在安装了 MMDeploy-ONNXRuntime 预编译包后,把上述转换命令中的detection_tensorrt_dynamic-320x320-1344x1344.py 换成 detection_onnxruntime_dynamic.py,并修改 --device 为 cpu, +即可以转出 onnx 模型,并用 ONNXRuntime 进行推理 ``` -可以很容易的通过文件名来确定最适合的那个配置文件。如果您希望定制自己的转换配置,可以参考[如何编写配置文件](./02-how-to-run/write_config.md)修改参数。 - ## 模型推理 -在转换完成后,您既可以使用 Model Converter 进行推理,也可以使用 Inference SDK。前者使用 Python 开发,后者主要使用 C/C++ 开发。 +在转换完成后,你既可以使用 Model Converter 进行推理,也可以使用 Inference SDK。 ### 使用 Model Converter 的推理 API Model Converter 屏蔽了推理后端接口的差异,对其推理 API 进行了统一封装,接口名称为 `inference_model`。 -以上文中 Faster R-CNN 的 TensorRT 模型为例,您可以使用如下方式进行模型推理工作: +以上文中 Faster R-CNN 的 TensorRT 模型为例,你可以使用如下方式进行模型推理工作: ```python from mmdeploy.apis import inference_model -import os - -model_cfg = os.getenv('MMDET_DIR') + '/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -deploy_cfg = os.getenv('MMDEPLOY_DIR') + '/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py' -backend_files = os.getenv('WORK_DIR') + '/end2end.engine' - -result = inference_model(model_cfg, deploy_cfg, backend_files, img=img, device=device) +result = inference_model( + model_cfg='mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', + deploy_cfg='mmdeploy/configs/mmdet/detection/detection_tensorrt_dynamic-320x320-1344x1344.py', + backend_files=['mmdeploy_model/faster-rcnn/end2end.engine'], + img='mmdetection/demo/demo.jpg', + device='cuda:0') ``` -`inference_model`会创建一个对后端模型的封装,通过该封装进行推理。推理的结果会保持与 OpenMMLab 中原模型同样的格式。 - ```{note} -MMDeploy 转出的后端模型,您可以直接使用后端 API 进行推理。不过,因为 MMDeploy 拥有 TensorRT、ONNX Runtime 等自定义算子, -您需要先加载对应的自定义算子库,然后再使用后端 API。 +接口中的 model_path 指的是推理引擎文件的路径,比如例子当中end2end.engine文件的路径。路径必须放在 list 中,因为有的推理引擎模型结构和权重是分开存储的。 ``` ### 使用推理 SDK -您也可以使用 MMDeploy SDK 进行推理。以上文中转出的 Faster R-CNN TensorRT 模型为例,接下来的章节将介绍如何使用 SDK 的 FFI 进行模型推理。 +你可以直接运行预编译包中的 demo 程序,输入 SDK Model 和图像,进行推理,并查看推理结果。 + +```shell +cd mmdeploy-0.7.0-linux-x86_64-cuda11.1-tensorrt8.2.3.0 +# 运行 python demo +python sdk/example/python/object_detection.py cuda ../mmdeploy_model/faster-rcnn ../mmdetection/demo/demo.jpg +# 运行 C/C++ demo +export LD_LIBRARY_PATH=$(pwd)/sdk/lib:$LD_LIBRARY_PATH +./sdk/bin/object_detection cuda ../mmdeploy_model/faster-rcnn ../mmdetection/demo/demo.jpg +``` + +```{note} +以上述命令中,输入模型是 SDK Model 的路径(也就是 Model Converter 中 --work-dir 参数),而不是推理引擎文件的路径。 +因为 SDK 不仅要获取推理引擎文件,还需要推理元信息(deploy.json, pipeline.json)。它们合在一起,构成 SDK Model,存储在 --work-dir 下 +``` + +除了 demo 程序,预编译包还提供了 SDK 多语言接口。你可以根据自己的项目需求,选择合适的语言接口, +把 MMDeploy SDK 集成到自己的项目中,进行二次开发。 #### Python API +对于检测功能,你也可以参考如下代码,集成 MMDeploy SDK Python API 到自己的项目中: + ```python from mmdeploy_python import Detector -import os import cv2 -# 获取转换后的 mmdeploy model 路径 -model_path = os.getenv('WORK_DIR') -# 从 mmdetection repo 中,获取 demo.jpg 路径 -image_path = '/'.join((os.getenv('MMDET_DIR'), 'demo/demo.jpg')) - -img = cv2.imread(image_path) -detector = Detector(model_path, 'cuda', 0) -bboxes, labels, _ = detector([img])[0] +# 读取图片 +img = cv2.imread('mmdetection/demo/demo.jpg') +# 创建检测器 +detector = Detector(model_path='mmdeploy_models/faster-rcnn', device_name='cuda', device_id=0) +# 执行推理 +bboxes, labels, _ = detector(img) +# 使用阈值过滤推理结果,并绘制到原图中 indices = [i for i in range(len(bboxes))] for index, bbox, label_id in zip(indices, bboxes, labels): [left, top, right, bottom], score = bbox[0:4].astype(int), bbox[4] @@ -237,78 +266,50 @@ for index, bbox, label_id in zip(indices, bboxes, labels): cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0)) cv2.imwrite('output_detection.png', img) - ``` -更多模型的 SDK Python API 应用样例,请查阅[这里](https://github.com/open-mmlab/mmdeploy/tree/master/demo/python)。 +更多示例,请查阅[这里](https://github.com/open-mmlab/mmdeploy/tree/master/demo/python)。 -```{note} -如果您使用源码安装方式, 请把 ${MMDEPLOY_DIR}/build/lib 加入到环境变量 PYTHONPATH 中。 -否则会遇到错误’ModuleNotFoundError: No module named 'mmdeploy_python' -``` +#### C++ API -#### C API - -使用 C API 进行模型推理的流程符合下面的模式: - -```mermaid -graph LR - A[创建推理句柄] --> B(读取图像) - B --> C(应用句柄进行推理) - C --> D[处理推理结果] - D -->E[销毁结果] - E -->F[销毁推理句柄] -``` +使用 C++ API 进行模型推理的流程符合下面的模式: +![image](https://user-images.githubusercontent.com/4560679/182554486-2bf0ff80-9e82-4a0f-bccc-5e1860444302.png) 以下是这个流程的具体应用过程: ```C++ #include #include -#include "detector.h" +#include "mmdeploy/detector.hpp" int main() { const char* device_name = "cuda"; int device_id = 0; - // 获取转换后的 mmdeploy model 路径 - std::string model_path = std::getenv("WORK_DIR"); - // 从 mmdetection repo 中,获取 demo.jpg 路径 - std::string image_path = std::getenv("MMDET_DIR") + "/demo/demo.jpg"; + // mmdeploy SDK model,以上文中转出的 faster r-cnn 模型为例 + std::string model_path = "mmdeploy_model/faster-rcnn"; + std::string image_path = "mmdetection/demo/demo.jpg"; - // 创建推理句柄 - mm_handle_t detector{}; - int status{}; - status = mmdeploy_detector_create_by_path(model_path, device_name, device_id, &detector); - assert(status == MM_SUCCESS); - - // 读取图像 + // 1. 读取模型 + mmdeploy::Model model(model_path); + // 2. 创建预测器 + mmdeploy::Detector detector(model, mmdeploy::Device{device_name, device_id}); + // 3. 读取图像 cv::Mat img = cv::imread(image_path); - assert(img.data); - - // 应用句柄进行推理 - mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8}; - mm_detect_t *bboxes{}; - int *res_count{}; - status = mmdeploy_detector_apply(detector, &mat, 1, &bboxes, &res_count); - assert (status == MM_SUCCESS); - - // 处理推理结果: 此处我们选择可视化推理结果 - for (int i = 0; i < *res_count; ++i) { - const auto &box = bboxes[i].bbox; + // 4. 应用预测器推理 + auto dets = detector.Apply(img); + // 5. 处理推理结果: 此处我们选择可视化推理结果 + for (int i = 0; i < dets.size(); ++i) { + const auto& box = dets[i].bbox; + fprintf(stdout, "box %d, left=%.2f, top=%.2f, right=%.2f, bottom=%.2f, label=%d, score=%.4f\n", + i, box.left, box.top, box.right, box.bottom, dets[i].label_id, dets[i].score); if (bboxes[i].score < 0.3) { continue; } cv::rectangle(img, cv::Point{(int)box.left, (int)box.top}, cv::Point{(int)box.right, (int)box.bottom}, cv::Scalar{0, 255, 0}); } - - cv::imwrite('output_detection.png', img); - - // 销毁结果 - mmdeploy_detector_release_result(bboxes, res_count, 1); - // 销毁推理句柄 - mmdeploy_detector_destroy(detector); + cv::imwrite("output_detection.png", img); return 0; } ``` @@ -317,27 +318,24 @@ int main() { ```Makefile find_package(MMDeploy REQUIRED) -mmdeploy_load_static(${YOUR_AWESOME_TARGET} MMDeployStaticModules) -mmdeploy_load_dynamic(${YOUR_AWESOME_TARGET} MMDeployDynamicModules) -target_link_libraries(${YOUR_AWESOME_TARGET} PRIVATE MMDeployLibs) +target_link_libraries(${name} PRIVATE mmdeploy ${OpenCV_LIBS}) ``` 编译时,使用 -DMMDeploy_DIR,传入MMDeloyConfig.cmake所在的路径。它在预编译包中的sdk/lib/cmake/MMDeloy下。 -更多模型的 SDK C API 应用样例,请查阅[此处](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc)。 +更多示例,请查阅[此处](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc)。 -#### C# API - -因篇幅所限,本文不展示具体案例。请参考[这里](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csharp),了解 SDK C# API 的用法。 +对于 C API、C# API、Java API 的使用方法,请分别阅读代码[C demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csrc), [C# demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/csharp) 和 [Java demos](https://github.com/open-mmlab/mmdeploy/tree/master/demo/java)。 +我们将在后续版本中详细讲述它们的用法。 ## 模型精度评估 为了测试部署模型的精度,推理效率,我们提供了 `tools/test.py` 来帮助完成相关工作。以上文中的部署模型为例: ```bash -python ${MMDEPLOY_DIR}/tools/test.py \ - ${MMDEPLOY_DIR}/configs/detection/detection_tensorrt_dynamic-320x320-1344x1344.py \ - ${MMDET_DIR}/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ - --model ${BACKEND_MODEL_FILES} \ +python mmdeploy/tools/test.py \ + mmdeploy/configs/detection/detection_tensorrt_dynamic-320x320-1344x1344.py \ + mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ + --model mmdeploy_model/faster-rcnn/end2end.engine \ --metrics ${METRICS} \ --device cuda:0 ``` diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst index 4c2d629df..b53cbf68e 100644 --- a/docs/zh_cn/index.rst +++ b/docs/zh_cn/index.rst @@ -40,6 +40,7 @@ 04-developer-guide/support_new_model.md 04-developer-guide/support_new_backend.md 04-developer-guide/do_regression_test.md + 04-developer-guide/partition_model.md .. toctree:: :maxdepth: 1 @@ -51,6 +52,12 @@ 05-tutorial/04_onnx_custom_op.md 05-tutorial/05_onnx_model_editing.md +.. toctree:: + :maxdepth: 1 + :caption: 附录 + + appendix/cross_build_snpe_service.md + .. toctree:: :maxdepth: 1 :caption: 常见问题 diff --git a/mmdeploy/apis/calibration.py b/mmdeploy/apis/calibration.py index eda807200..6c216c842 100644 --- a/mmdeploy/apis/calibration.py +++ b/mmdeploy/apis/calibration.py @@ -5,8 +5,8 @@ from typing import Optional, Union from mmengine import Config from mmdeploy.core import patch_model -from mmdeploy.utils import cfg_apply_marks, load_config -from mmdeploy.utils.config_utils import get_backend +from mmdeploy.utils import (IR, cfg_apply_marks, get_backend, get_ir_config, + load_config) from .core import PIPELINE_MANAGER, no_mp from .utils import create_calib_input_data as create_calib_input_data_impl @@ -60,8 +60,10 @@ def create_calib_input_data(calib_file: str, dataloader = task_processor.build_dataloader(calib_dataloader) # patch model + backend = get_backend(deploy_cfg).value + ir = IR.get(get_ir_config(deploy_cfg)['type']) patched_model = patch_model( - model, cfg=deploy_cfg, backend=get_backend(deploy_cfg).value) + model, cfg=deploy_cfg, backend=backend, ir=ir) def get_tensor_func(input_data): input_data = model.data_preprocessor(input_data) diff --git a/mmdeploy/apis/core/pipeline_manager.py b/mmdeploy/apis/core/pipeline_manager.py index 490302c08..0cd80f62e 100644 --- a/mmdeploy/apis/core/pipeline_manager.py +++ b/mmdeploy/apis/core/pipeline_manager.py @@ -78,8 +78,8 @@ class PipelineCaller: call_id = self._call_id if call_id is None else call_id if call_id not in self._mp_dict: get_root_logger().error( - f'`{self._func_name}` with Call id: {call_id} failed.') - exit() + f'`{self._func_name}` with Call id: {call_id} failed. exit.') + exit(1) ret = self._mp_dict[call_id] self._mp_dict.pop(call_id) return ret diff --git a/mmdeploy/apis/onnx/export.py b/mmdeploy/apis/onnx/export.py index e33938875..37a7333e1 100644 --- a/mmdeploy/apis/onnx/export.py +++ b/mmdeploy/apis/onnx/export.py @@ -7,7 +7,7 @@ import torch from mmdeploy.apis.core import PIPELINE_MANAGER from mmdeploy.core import RewriterContext, patch_model -from mmdeploy.utils import Backend, get_root_logger +from mmdeploy.utils import IR, Backend, get_ir_config, get_root_logger from .optimizer import * # noqa from .passes import optimize_onnx @@ -92,21 +92,21 @@ def export(model: torch.nn.Module, verbose=verbose, keep_initializers_as_inputs=keep_initializers_as_inputs) _add_or_update(deploy_cfg, 'ir_config', ir_config) - + ir = IR.get(get_ir_config(deploy_cfg)['type']) if isinstance(backend, Backend): backend = backend.value backend_config = dict(type=backend) _add_or_update(deploy_cfg, 'backend_config', backend_config) context_info['cfg'] = deploy_cfg + context_info['ir'] = ir if 'backend' not in context_info: context_info['backend'] = backend if 'opset' not in context_info: context_info['opset'] = opset_version # patch model - patched_model = patch_model( - model, cfg=deploy_cfg, backend=backend, **patch_metas) + patched_model = patch_model(model, cfg=deploy_cfg, backend=backend, ir=ir) if 'onnx_custom_passes' not in context_info: onnx_custom_passes = optimize_onnx if optimize else None diff --git a/mmdeploy/apis/onnx/optimizer.py b/mmdeploy/apis/onnx/optimizer.py index 612e9d8ea..b9d2ead0c 100644 --- a/mmdeploy/apis/onnx/optimizer.py +++ b/mmdeploy/apis/onnx/optimizer.py @@ -15,7 +15,18 @@ def model_to_graph__custom_optimizer(ctx, *args, **kwargs): assert isinstance( custom_passes, Callable ), f'Expect a callable onnx_custom_passes, get {type(custom_passes)}.' - graph, params_dict, torch_out = custom_passes(graph, params_dict, + graph, params_dict, torch_out = custom_passes(ctx, graph, params_dict, torch_out) return graph, params_dict, torch_out + + +@FUNCTION_REWRITER.register_rewriter( + 'torch._C._jit_pass_onnx_deduplicate_initializers', backend='tensorrt') +def jit_pass_onnx_deduplicate_initializers__disable(ctx, graph, param_dict, + arg2): + """This pass will disable TensorRT topk export. + + disable for TensorRT. + """ + return param_dict diff --git a/mmdeploy/apis/onnx/passes/optimize_onnx.py b/mmdeploy/apis/onnx/passes/optimize_onnx.py index d413a513e..19e14bc29 100644 --- a/mmdeploy/apis/onnx/passes/optimize_onnx.py +++ b/mmdeploy/apis/onnx/passes/optimize_onnx.py @@ -2,7 +2,8 @@ from mmdeploy.utils import get_root_logger -def optimize_onnx(graph, params_dict, torch_out): +def optimize_onnx(ctx, graph, params_dict, torch_out): + """The optimize callback of the onnx model.""" logger = get_root_logger() logger.info('Execute onnx optimize passes.') try: @@ -10,7 +11,13 @@ def optimize_onnx(graph, params_dict, torch_out): ts_optimizer.onnx._jit_pass_merge_shape_concate(graph) ts_optimizer.onnx._jit_pass_onnx_peephole(graph) ts_optimizer.onnx._jit_pass_flatten_cls_head(graph) - except Exception: - pass - + ts_optimizer.onnx._jit_pass_fuse_select_assign(graph, params_dict) + ts_optimizer.onnx._jit_pass_common_subgraph_elimination( + graph, params_dict) + except ImportError: + logger.warning( + 'Can not optimize model, please build torchscipt extension.\n' + 'More details: ' + 'https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/experimental/onnx_optimizer.md' # noqa + ) return graph, params_dict, torch_out diff --git a/mmdeploy/apis/pytorch2onnx.py b/mmdeploy/apis/pytorch2onnx.py index 68a7df3a7..812135372 100644 --- a/mmdeploy/apis/pytorch2onnx.py +++ b/mmdeploy/apis/pytorch2onnx.py @@ -88,6 +88,7 @@ def torch2onnx(img: Any, 'verbose', False) keep_initializers_as_inputs = onnx_cfg.get('keep_initializers_as_inputs', True) + optimize = onnx_cfg.get('optimize', False) with no_mp(): export( torch_model, @@ -102,4 +103,5 @@ def torch2onnx(img: Any, dynamic_axes=dynamic_axes, verbose=verbose, keep_initializers_as_inputs=keep_initializers_as_inputs, - patch_metas=patch_metas) + patch_metas=patch_metas, + optimize=optimize) diff --git a/mmdeploy/apis/snpe/__init__.py b/mmdeploy/apis/snpe/__init__.py new file mode 100644 index 000000000..6f8febaec --- /dev/null +++ b/mmdeploy/apis/snpe/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdeploy.backend.snpe import from_onnx as _from_onnx +from mmdeploy.backend.snpe import is_available +from ..core import PIPELINE_MANAGER + +from_onnx = PIPELINE_MANAGER.register_pipeline()(_from_onnx) + +__all__ = ['is_available', 'from_onnx'] + +if is_available(): + try: + from mmdeploy.backend.snpe.onnx2dlc import (get_env_key, + get_output_model_file) + __all__ += ['get_output_model_file', 'get_env_key'] + except Exception: + pass diff --git a/mmdeploy/apis/torch_jit/trace.py b/mmdeploy/apis/torch_jit/trace.py index 901a22913..e833381d2 100644 --- a/mmdeploy/apis/torch_jit/trace.py +++ b/mmdeploy/apis/torch_jit/trace.py @@ -6,7 +6,7 @@ import torch from packaging.version import parse as version_parse from mmdeploy.core import RewriterContext, patch_model -from mmdeploy.utils import IR, Backend, get_root_logger +from mmdeploy.utils import IR, Backend, get_ir_config, get_root_logger from ..core import PIPELINE_MANAGER @@ -87,7 +87,8 @@ def trace(func: torch.nn.Module, # patch model if isinstance(func, torch.nn.Module): - func = patch_model(func, cfg=deploy_cfg, backend=backend) + ir = IR.get(get_ir_config(deploy_cfg)['type']) + func = patch_model(func, cfg=deploy_cfg, backend=backend, ir=ir) with RewriterContext(**context_info), torch.no_grad(): # for exporting models with weight that depends on inputs diff --git a/mmdeploy/apis/visualize.py b/mmdeploy/apis/visualize.py index 2bce15952..95cd9d9e9 100644 --- a/mmdeploy/apis/visualize.py +++ b/mmdeploy/apis/visualize.py @@ -16,7 +16,8 @@ def visualize_model(model_cfg: Union[str, mmcv.Config], device: str, backend: Optional[Backend] = None, output_file: Optional[str] = None, - show_result: bool = False): + show_result: bool = False, + **kwargs): """Run inference with PyTorch or backend model and show results. Examples: diff --git a/mmdeploy/backend/ncnn/init_plugins.py b/mmdeploy/backend/ncnn/init_plugins.py index e04ca294c..e721a3f11 100644 --- a/mmdeploy/backend/ncnn/init_plugins.py +++ b/mmdeploy/backend/ncnn/init_plugins.py @@ -18,13 +18,22 @@ def get_ops_path() -> str: def get_onnx2ncnn_path() -> str: - """Get onnx2ncnn path. + """Get mmdeploy_onnx2ncnn path. Returns: - str: A path of onnx2ncnn tool. + str: A path of mmdeploy_onnx2ncnn tool. """ - candidates = ['./onnx2ncnn', './onnx2ncnn.exe'] - return get_file_path(os.path.dirname(__file__), candidates) + candidates = ['./mmdeploy_onnx2ncnn', './mmdeploy_onnx2ncnn.exe'] + onnx2ncnn_path = get_file_path(os.path.dirname(__file__), candidates) + + if onnx2ncnn_path is None or not os.path.exists(onnx2ncnn_path): + onnx2ncnn_path = get_file_path('', candidates) + + if onnx2ncnn_path is None or not os.path.exists(onnx2ncnn_path): + onnx2ncnn_path = shutil.which('mmdeploy_onnx2ncnn') + onnx2ncnn_path = '' if onnx2ncnn_path is None else onnx2ncnn_path + + return onnx2ncnn_path def get_ncnn2int8_path() -> str: diff --git a/mmdeploy/backend/ncnn/onnx2ncnn.py b/mmdeploy/backend/ncnn/onnx2ncnn.py index fd5c0251c..2c63aebe6 100644 --- a/mmdeploy/backend/ncnn/onnx2ncnn.py +++ b/mmdeploy/backend/ncnn/onnx2ncnn.py @@ -3,7 +3,7 @@ import os import os.path as osp import tempfile from subprocess import call -from typing import List, Union +from typing import List, Optional, Union import onnx @@ -17,17 +17,21 @@ def mkdir_or_exist(dir_name, mode=0o777): os.makedirs(dir_name, mode=mode, exist_ok=True) -def get_output_model_file(onnx_path: str, work_dir: str) -> List[str]: +def get_output_model_file(onnx_path: str, + work_dir: Optional[str] = None) -> List[str]: """Returns the path to the .param, .bin file with export result. Args: onnx_path (str): The path to the onnx model. - work_dir (str): The path to the directory for saving the results. + work_dir (str|None): The path to the directory for saving the results. + Defaults to `None`, which means use the directory of onnx_path. Returns: List[str]: The path to the files where the export result will be located. """ + if work_dir is None: + work_dir = osp.dirname(onnx_path) mkdir_or_exist(osp.abspath(work_dir)) file_name = osp.splitext(osp.split(onnx_path)[1])[0] save_param = osp.join(work_dir, file_name + '.param') @@ -64,4 +68,5 @@ def from_onnx(onnx_model: Union[onnx.ModelProto, str], save_bin = output_file_prefix + '.bin' onnx2ncnn_path = get_onnx2ncnn_path() - call([onnx2ncnn_path, onnx_path, save_param, save_bin]) + ret_code = call([onnx2ncnn_path, onnx_path, save_param, save_bin]) + assert ret_code == 0, 'onnx2ncnn failed' diff --git a/mmdeploy/backend/ncnn/wrapper.py b/mmdeploy/backend/ncnn/wrapper.py index 29b8c5113..adc9dccdf 100644 --- a/mmdeploy/backend/ncnn/wrapper.py +++ b/mmdeploy/backend/ncnn/wrapper.py @@ -125,7 +125,7 @@ class NCNNWrapper(BaseWrapper): return outputs - @TimeCounter.count_time() + @TimeCounter.count_time(Backend.NCNN.value) def __ncnn_execute(self, extractor: ncnn.Extractor, output_names: Sequence[str]) -> Dict[str, ncnn.Mat]: """Run inference with ncnn. diff --git a/mmdeploy/backend/onnxruntime/wrapper.py b/mmdeploy/backend/onnxruntime/wrapper.py index daac6bf51..65b4c7311 100644 --- a/mmdeploy/backend/onnxruntime/wrapper.py +++ b/mmdeploy/backend/onnxruntime/wrapper.py @@ -100,7 +100,7 @@ class ORTWrapper(BaseWrapper): return outputs - @TimeCounter.count_time() + @TimeCounter.count_time(Backend.ONNXRUNTIME.value) def __ort_execute(self, io_binding: ort.IOBinding): """Run inference with ONNXRuntime session. diff --git a/mmdeploy/backend/openvino/wrapper.py b/mmdeploy/backend/openvino/wrapper.py index 7a41db24a..ab91f8331 100644 --- a/mmdeploy/backend/openvino/wrapper.py +++ b/mmdeploy/backend/openvino/wrapper.py @@ -136,7 +136,7 @@ class OpenVINOWrapper(BaseWrapper): outputs = self.__process_outputs(outputs) return outputs - @TimeCounter.count_time() + @TimeCounter.count_time(Backend.OPENVINO.value) def __openvino_execute( self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """Run inference with OpenVINO IE. diff --git a/mmdeploy/backend/pplnn/onnx2pplnn.py b/mmdeploy/backend/pplnn/onnx2pplnn.py index 878238d04..a292dc8de 100644 --- a/mmdeploy/backend/pplnn/onnx2pplnn.py +++ b/mmdeploy/backend/pplnn/onnx2pplnn.py @@ -1,10 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. from typing import Optional, Sequence -from pyppl import nn as pplnn - from mmdeploy.utils.device import parse_cuda_device_id -from .utils import register_engines +from .utils import create_runtime, register_engines def from_onnx(onnx_model: str, @@ -52,10 +50,7 @@ def from_onnx(onnx_model: str, quick_select=False, export_algo_file=algo_file, input_shapes=input_shapes) - runtime_builder = pplnn.OnnxRuntimeBuilderFactory.CreateFromFile( - onnx_model, engines) - assert runtime_builder is not None, 'Failed to create '\ - 'OnnxRuntimeBuilder.' + _ = create_runtime(onnx_model, engines) # side effect: export algorithms import shutil if onnx_output_path != onnx_model: shutil.copy2(onnx_model, onnx_output_path) diff --git a/mmdeploy/backend/pplnn/utils.py b/mmdeploy/backend/pplnn/utils.py index f18d45330..f64e5c557 100644 --- a/mmdeploy/backend/pplnn/utils.py +++ b/mmdeploy/backend/pplnn/utils.py @@ -8,6 +8,41 @@ import pyppl.nn as pplnn from mmdeploy.utils import get_root_logger +def create_runtime(onnx_file: str, + engines: List[pplnn.Engine]) -> pplnn.Runtime: + """Create runtime object for pplnn. + + Args: + onnx_file (str): path to onnx model + engines (List[pplnn.Engine]): engines used to create the runtime + object + + Returns: + pplnn.Runtime: created runtime object + """ + runtime_builder = pplnn.onnx.RuntimeBuilderFactory.Create() + assert runtime_builder is not None, 'Failed to create '\ + 'onnx.RuntimeBuilder.' + + status = runtime_builder.LoadModelFromFile(onnx_file) + assert status == pplcommon.RC_SUCCESS, 'Failed to load ONNX model.' + + resources = pplnn.onnx.RuntimeBuilderResources() + resources.engines = engines + + status = runtime_builder.SetResources(resources) + assert status == pplcommon.RC_SUCCESS, 'runtime_builder.SetResources() ' \ + 'Failed.' + + status = runtime_builder.Preprocess() + assert status == pplcommon.RC_SUCCESS, 'runtime_builder.Preprocess() ' \ + 'Failed.' + + runtime = runtime_builder.CreateRuntime() + assert runtime is not None, 'Failed to create onnx.Runtime' + return runtime + + def register_engines(device_id: int, disable_avx512: bool = False, quick_select: bool = False, @@ -33,65 +68,66 @@ def register_engines(device_id: int, engines = [] logger = get_root_logger() if device_id == -1: - x86_options = pplnn.X86EngineOptions() - x86_engine = pplnn.X86EngineFactory.Create(x86_options) + x86_options = pplnn.x86.EngineOptions() + x86_engine = pplnn.x86.EngineFactory.Create(x86_options) if not x86_engine: logger.error('Failed to create x86 engine') - sys.exit(-1) + sys.exit(1) if disable_avx512: - status = x86_engine.Configure(pplnn.X86_CONF_DISABLE_AVX512) + status = x86_engine.Configure(pplnn.x86.ENGINE_CONF_DISABLE_AVX512) if status != pplcommon.RC_SUCCESS: logger.error('x86 engine Configure() failed: ' + pplcommon.GetRetCodeStr(status)) - sys.exit(-1) + sys.exit(1) - engines.append(pplnn.Engine(x86_engine)) + engines.append(x86_engine) else: - cuda_options = pplnn.CudaEngineOptions() + cuda_options = pplnn.cuda.EngineOptions() cuda_options.device_id = device_id + cuda_options.mm_policy = pplnn.cuda.MM_BEST_FIT - cuda_engine = pplnn.CudaEngineFactory.Create(cuda_options) + cuda_engine = pplnn.cuda.EngineFactory.Create(cuda_options) if not cuda_engine: logger.error('Failed to create cuda engine.') - sys.exit(-1) + sys.exit(1) if quick_select: status = cuda_engine.Configure( - pplnn.CUDA_CONF_USE_DEFAULT_ALGORITHMS) + pplnn.cuda.ENGINE_CONF_USE_DEFAULT_ALGORITHMS) if status != pplcommon.RC_SUCCESS: logger.error('cuda engine Configure() failed: ' + pplcommon.GetRetCodeStr(status)) - sys.exit(-1) + sys.exit(1) if input_shapes is not None: - status = cuda_engine.Configure(pplnn.CUDA_CONF_SET_INPUT_DIMS, - input_shapes) + status = cuda_engine.Configure( + pplnn.cuda.ENGINE_CONF_SET_INPUT_DIMS, input_shapes) if status != pplcommon.RC_SUCCESS: logger.error( - 'cuda engine Configure(CUDA_CONF_SET_INPUT_DIMS) failed: ' - + pplcommon.GetRetCodeStr(status)) - sys.exit(-1) + 'cuda engine Configure(ENGINE_CONF_SET_INPUT_DIMS) ' + 'failed: ' + pplcommon.GetRetCodeStr(status)) + sys.exit(1) if export_algo_file is not None: - status = cuda_engine.Configure(pplnn.CUDA_CONF_EXPORT_ALGORITHMS, - export_algo_file) + status = cuda_engine.Configure( + pplnn.cuda.ENGINE_CONF_EXPORT_ALGORITHMS, export_algo_file) if status != pplcommon.RC_SUCCESS: logger.error( - 'cuda engine Configure(CUDA_CONF_EXPORT_ALGORITHMS) ' + 'cuda engine Configure(ENGINE_CONF_EXPORT_ALGORITHMS) ' 'failed: ' + pplcommon.GetRetCodeStr(status)) - sys.exit(-1) + sys.exit(1) if import_algo_file is not None: - status = cuda_engine.Configure(pplnn.CUDA_CONF_IMPORT_ALGORITHMS, - import_algo_file) + status = cuda_engine.Configure( + pplnn.cuda.ENGINE_CONF_IMPORT_ALGORITHMS, import_algo_file) if status != pplcommon.RC_SUCCESS: logger.error( - 'cuda engine Configure(CUDA_CONF_IMPORT_ALGORITHMS) ' + 'cuda engine Configure(ENGINE_CONF_IMPORT_ALGORITHMS) ' 'failed: ' + pplcommon.GetRetCodeStr(status)) - sys.exit(-1) + sys.exit(1) - engines.append(pplnn.Engine(cuda_engine)) + engines.append(cuda_engine) return engines diff --git a/mmdeploy/backend/pplnn/wrapper.py b/mmdeploy/backend/pplnn/wrapper.py index aaba37f5d..869d43079 100644 --- a/mmdeploy/backend/pplnn/wrapper.py +++ b/mmdeploy/backend/pplnn/wrapper.py @@ -4,13 +4,12 @@ from typing import Dict, Optional, Sequence import numpy as np import onnx import pyppl.common as pplcommon -import pyppl.nn as pplnn import torch from mmdeploy.utils import Backend, parse_device_id from mmdeploy.utils.timer import TimeCounter from ..base import BACKEND_WRAPPER, BaseWrapper -from .utils import register_engines +from .utils import create_runtime, register_engines @BACKEND_WRAPPER.register_module(Backend.PPLNN.value) @@ -51,17 +50,12 @@ class PPLNNWrapper(BaseWrapper): engines = register_engines( device_id, disable_avx512=False, - quick_select=False, + quick_select=algo_file is None, import_algo_file=algo_file) - runtime_builder = pplnn.OnnxRuntimeBuilderFactory.CreateFromFile( - onnx_file, engines) - assert runtime_builder is not None, 'Failed to create '\ - 'OnnxRuntimeBuilder.' - - runtime = runtime_builder.CreateRuntime() - assert runtime is not None, 'Failed to create the instance of Runtime.' + runtime = create_runtime(onnx_file, engines) self.runtime = runtime + self.inputs = { runtime.GetInputTensor(i).GetName(): runtime.GetInputTensor(i) for i in range(runtime.GetInputCount()) @@ -100,7 +94,7 @@ class PPLNNWrapper(BaseWrapper): outputs[name] = torch.from_numpy(outputs[name]) return outputs - @TimeCounter.count_time() + @TimeCounter.count_time(Backend.PPLNN.value) def __pplnn_execute(self): """Run inference with PPLNN.""" status = self.runtime.Run() diff --git a/mmdeploy/backend/sdk/export_info.py b/mmdeploy/backend/sdk/export_info.py index 459e82b18..8e68f413f 100644 --- a/mmdeploy/backend/sdk/export_info.py +++ b/mmdeploy/backend/sdk/export_info.py @@ -127,6 +127,8 @@ def get_models(deploy_cfg: Union[str, mmcv.Config], weights = replace_suffix(ir_name, '.bin') if 'precision' in deploy_cfg['backend_config']: precision = deploy_cfg['backend_config']['precision'] + elif backend == Backend.SNPE: + net = replace_suffix(ir_name, '.dlc') elif backend in [Backend.ONNXRUNTIME, Backend.TORCHSCRIPT]: pass else: diff --git a/mmdeploy/backend/sdk/wrapper.py b/mmdeploy/backend/sdk/wrapper.py index 338e86641..7fa709221 100644 --- a/mmdeploy/backend/sdk/wrapper.py +++ b/mmdeploy/backend/sdk/wrapper.py @@ -15,7 +15,7 @@ class SDKWrapper(BaseWrapper): # TODO: get device id somewhere self.handle = creator(model_file, device, 0) - @TimeCounter.count_time() + @TimeCounter.count_time(Backend.SDK.value) def invoke(self, imgs): return self.handle(imgs) diff --git a/mmdeploy/backend/snpe/__init__.py b/mmdeploy/backend/snpe/__init__.py new file mode 100644 index 000000000..961b75dc7 --- /dev/null +++ b/mmdeploy/backend/snpe/__init__.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from .init_plugins import get_onnx2dlc_path +from .onnx2dlc import from_onnx + + +def is_available(): + """Check whether ncnn and snpe-onnx-to-dlc tool are installed. + + Returns: + bool: True if snpe-onnx-to-dlc tool are installed. + """ + + onnx2dlc = get_onnx2dlc_path() + if onnx2dlc is None: + return False + return osp.exists(onnx2dlc) + + +__all__ = ['from_onnx'] + +if is_available(): + try: + from .wrapper import SNPEWrapper + + __all__ += ['SNPEWrapper'] + except Exception as e: + print(e) + pass diff --git a/mmdeploy/backend/snpe/init_plugins.py b/mmdeploy/backend/snpe/init_plugins.py new file mode 100644 index 000000000..7f4c35394 --- /dev/null +++ b/mmdeploy/backend/snpe/init_plugins.py @@ -0,0 +1,11 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import shutil + + +def get_onnx2dlc_path() -> str: + """Get snpe-onnx-to-dlc path. + + Returns: + str: A path of snpe-onnx-to-dlc tool. + """ + return shutil.which('snpe-onnx-to-dlc') diff --git a/mmdeploy/backend/snpe/onnx2dlc.py b/mmdeploy/backend/snpe/onnx2dlc.py new file mode 100644 index 000000000..45e727e45 --- /dev/null +++ b/mmdeploy/backend/snpe/onnx2dlc.py @@ -0,0 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import tempfile +from subprocess import call +from typing import List, Optional, Union + +import onnx + +from .init_plugins import get_onnx2dlc_path + + +def mkdir_or_exist(dir_name, mode=0o777): + if dir_name == '': + return + dir_name = osp.expanduser(dir_name) + os.makedirs(dir_name, mode=mode, exist_ok=True) + + +def get_env_key() -> str: + """Return environment key str. + + Returns: + str: The string to find SNPE service URI + """ + return '__MMDEPLOY_SNPE_URI' + + +def get_output_model_file(onnx_path: str, + work_dir: Optional[str] = None) -> List[str]: + """Returns the path to the .dlc file with export result. + + Args: + onnx_path (str): The path to the onnx model. + work_dir (str|None): The path to the directory for saving the results. + Defaults to `None`, which means use the directory of onnx_path. + + Returns: + List[str]: The path to the files where the export result will be + located. + """ + if work_dir is None: + work_dir = osp.dirname(onnx_path) + mkdir_or_exist(osp.abspath(work_dir)) + file_name = osp.splitext(osp.split(onnx_path)[1])[0] + save_dlc = osp.join(work_dir, file_name + '.dlc') + return save_dlc + + +def from_onnx(onnx_model: Union[onnx.ModelProto, str], + output_file_prefix: str): + """Convert ONNX to dlc. + + We need to use a executable program to convert the `.onnx` file to a `.dlc` + + Example: + >>> from mmdeploy.apis.snpe import from_onnx + >>> onnx_path = 'work_dir/end2end.onnx' + >>> output_file_prefix = 'work_dir/end2end' + >>> from_onnx(onnx_path, output_file_prefix) + + Args: + onnx_path (ModelProto|str): The path of the onnx model. + output_file_prefix (str): The path to save the output .dlc file. + """ + + if not isinstance(onnx_model, str): + onnx_path = tempfile.NamedTemporaryFile(suffix='.onnx').name + onnx.save(onnx_model, onnx_path) + else: + onnx_path = onnx_model + + save_dlc = output_file_prefix + '.dlc' + + onnx2dlc = get_onnx2dlc_path() + ret_code = call( + [onnx2dlc, '--input_network', onnx_path, '--output', save_dlc]) + assert ret_code == 0, 'onnx2dlc failed' diff --git a/mmdeploy/backend/snpe/wrapper.py b/mmdeploy/backend/snpe/wrapper.py new file mode 100644 index 000000000..f16d6a554 --- /dev/null +++ b/mmdeploy/backend/snpe/wrapper.py @@ -0,0 +1,250 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import abc +import os +import time +from random import randint +from typing import Dict, Optional, Sequence, Tuple + +import grpc +import inference_pb2 +import inference_pb2_grpc +import numpy as np +import torch + +from mmdeploy.backend.snpe.onnx2dlc import get_env_key +from mmdeploy.utils import Backend, get_root_logger +from mmdeploy.utils.timer import TimeCounter +from ..base import BACKEND_WRAPPER, BaseWrapper + + +# add interceptor to sleep and retry request +# https://github.com/grpc/grpc/issues/19514 +class SleepingPolicy(abc.ABC): + + @abc.abstractmethod + def sleep(self, try_i: int): + """How long to sleep in milliseconds. + + :param try_i: the number of retry (starting from zero) + """ + assert try_i >= 0 + + +class ExponentialBackoff(SleepingPolicy): + + def __init__(self, *, init_backoff_ms: int, max_backoff_ms: int, + multiplier: int): + self.init_backoff = randint(0, init_backoff_ms) + self.max_backoff = max_backoff_ms + self.multiplier = multiplier + + def sleep(self, try_i: int): + sleep_range = min(self.init_backoff * self.multiplier**try_i, + self.max_backoff) + sleep_ms = randint(0, sleep_range) + logger = get_root_logger() + logger.debug(f'Sleeping for {sleep_ms}') + time.sleep(sleep_ms / 1000) + + +class RetryOnRpcErrorClientInterceptor(grpc.UnaryUnaryClientInterceptor, + grpc.StreamUnaryClientInterceptor): + + def __init__( + self, + *, + max_attempts: int, + sleeping_policy: SleepingPolicy, + status_for_retry: Optional[Tuple[grpc.StatusCode]] = None, + ): + self.max_attempts = max_attempts + self.sleeping_policy = sleeping_policy + self.status_for_retry = status_for_retry + + def _intercept_call(self, continuation, client_call_details, + request_or_iterator): + + for try_i in range(self.max_attempts): + response = continuation(client_call_details, request_or_iterator) + + if isinstance(response, grpc.RpcError): + + # Return if it was last attempt + if try_i == (self.max_attempts - 1): + return response + + # If status code is not in retryable status codes + if (self.status_for_retry + and response.code() not in self.status_for_retry): + return response + + self.sleeping_policy.sleep(try_i) + else: + return response + + def intercept_unary_unary(self, continuation, client_call_details, + request): + return self._intercept_call(continuation, client_call_details, request) + + def intercept_stream_unary(self, continuation, client_call_details, + request_iterator): + return self._intercept_call(continuation, client_call_details, + request_iterator) + + +@BACKEND_WRAPPER.register_module(Backend.SNPE.value) +class SNPEWrapper(BaseWrapper): + """snpe wrapper class for inference. + + Args: + dlc_file (str): Path of a weight file. + output_names (Sequence[str] | None): Names of model outputs in order. + Defaults to `None` and the wrapper will load the output names from + snpe model. + + Examples: + >>> from mmdeploy.backend.snpe import SNPEWrapper + >>> import torch + >>> + >>> snple_file = 'alexnet.dlc' + >>> model = SNPEWrapper(snpe_file) + >>> inputs = dict(input=torch.randn(1, 3, 224, 224)) + >>> outputs = model(inputs) + >>> print(outputs) + """ + + def __init__(self, + dlc_file: str, + uri: str, + output_names: Optional[Sequence[str]] = None, + **kwargs): + + logger = get_root_logger() + + interceptors = (RetryOnRpcErrorClientInterceptor( + max_attempts=4, + sleeping_policy=ExponentialBackoff( + init_backoff_ms=100, max_backoff_ms=1600, multiplier=2), + status_for_retry=(grpc.StatusCode.UNAVAILABLE, ), + ), ) + + if uri is None and get_env_key() in os.environ: + logger.warn( + 'snpe remote service URI not set, search from environment') + uri = os.environ[get_env_key()] + + if uri is None: + logger.error('URI not set') + + weights = bytes() + filesize = os.stat(dlc_file).st_size + + logger.info(f'reading local model file {dlc_file}') + with open(dlc_file, 'rb') as f: + weights = f.read(filesize) + + self.stub = inference_pb2_grpc.InferenceStub( + grpc.intercept_channel(grpc.insecure_channel(uri), *interceptors)) + + logger.info('init remote SNPE engine with RPC, please wait...') + model = inference_pb2.Model(name=dlc_file, weights=weights, device=1) + resp = self.stub.Init(model) + + if resp.status != 0: + logger.error(f'init SNPE model failed {resp.info}') + return + + output = self.stub.OutputNames(inference_pb2.Empty()) + output_names = output.names + + super().__init__(output_names) + logger.info(f'init success, outputs {output_names}') + + def forward(self, inputs: Dict[str, + torch.Tensor]) -> Dict[str, torch.Tensor]: + """Run forward inference. + + Args: + inputs (Dict[str, torch.Tensor]): Key-value pairs of model inputs. + + Returns: + Dict[str, torch.Tensor]: Key-value pairs of model outputs. + """ + + def get_shape(shape): + if len(shape) == 4: + return (0, 2, 3, 1) + elif len(shape) == 3: + return (0, 1, 2) + elif len(shape) == 2: + return (0, 1) + return (0) + + input_list = list(inputs.values()) + device_type = input_list[0].device.type + + logger = get_root_logger() + + # build `list` inputs for remote snpe engine + snpe_inputs = [] + for name, input_tensor in inputs.items(): + data = input_tensor.contiguous().detach() + # snpe input layout is NHWC + data = data.permute(get_shape(data.shape)) + data = data.cpu().numpy() + + if data.dtype != np.float32: + logger.error('SNPE now only support fp32 input') + data = data.astype(dtype=np.float32) + tensor = inference_pb2.Tensor( + data=data.tobytes(), + name=name, + dtype='float32', + shape=list(data.shape)) + + snpe_inputs.append(tensor) + + return self.__snpe_execute( + tensorList=inference_pb2.TensorList(data=snpe_inputs), + device=device_type) + + @TimeCounter.count_time(Backend.SNPE.value) + def __snpe_execute(self, tensorList: inference_pb2.TensorList, + device: str) -> Dict[str, torch.tensor]: + """Run inference with snpe remote inference engine. + + Args: + tensorList (inference_pb2.TensorList): snpe input tensor. + + Returns: + dict[str, torch.tensor]: Inference results of snpe model. + """ + resp = self.stub.Inference(tensorList) + + def get_shape(shape): + if len(shape) == 4: + if shape[0] == 1 and shape[ + 1] == 1 and shape[2] > 1 and shape[3] > 1: + # snpe NHWC layout works except for segmentation task + return (0, 1, 2, 3) + return (0, 3, 1, 2) + elif len(shape) == 3: + return (0, 1, 2) + elif len(shape) == 2: + return (0, 1) + return (0) + + result = dict() + if resp.status == 0: + for tensor in resp.data: + ndarray = np.frombuffer(tensor.data, dtype=np.float32) + shape = tuple(tensor.shape) + data = torch.from_numpy( + ndarray.reshape(shape).copy()).to(device) + data = data.permute(get_shape(data.shape)) + result[tensor.name] = data + else: + logger = get_root_logger() + logger.error(f'snpe inference failed {resp.info}') + + return result diff --git a/mmdeploy/backend/tensorrt/init_plugins.py b/mmdeploy/backend/tensorrt/init_plugins.py index d1dcc9d8b..cc75a80e2 100644 --- a/mmdeploy/backend/tensorrt/init_plugins.py +++ b/mmdeploy/backend/tensorrt/init_plugins.py @@ -13,7 +13,9 @@ def get_ops_path() -> str: """ candidates = [ '../../lib/libmmdeploy_tensorrt_ops.so', - '../../lib/mmdeploy_tensorrt_ops.dll' + '../../lib/mmdeploy_tensorrt_ops.dll', + '../../../build/lib/libmmdeploy_tensorrt_ops.so', + '../../../build/bin/*/mmdeploy_tensorrt_ops.dll' ] return get_file_path(os.path.dirname(__file__), candidates) diff --git a/mmdeploy/backend/tensorrt/wrapper.py b/mmdeploy/backend/tensorrt/wrapper.py index 888c3cae2..e4bfd22a6 100644 --- a/mmdeploy/backend/tensorrt/wrapper.py +++ b/mmdeploy/backend/tensorrt/wrapper.py @@ -166,7 +166,7 @@ class TRTWrapper(BaseWrapper): return outputs - @TimeCounter.count_time() + @TimeCounter.count_time(Backend.TENSORRT.value) def __trt_execute(self, bindings: Sequence[int]): """Run inference with TensorRT. diff --git a/mmdeploy/backend/torchscript/wrapper.py b/mmdeploy/backend/torchscript/wrapper.py index 668ab23aa..5d8c79177 100644 --- a/mmdeploy/backend/torchscript/wrapper.py +++ b/mmdeploy/backend/torchscript/wrapper.py @@ -93,7 +93,7 @@ class TorchscriptWrapper(BaseWrapper): outputs = dict(zip(self._output_names, outputs)) return outputs - @TimeCounter.count_time() + @TimeCounter.count_time(Backend.TORCHSCRIPT.value) def __torchscript_execute( self, inputs: Sequence[torch.Tensor]) -> Sequence[torch.Tensor]: """Run inference with TorchScript. diff --git a/mmdeploy/codebase/base/backend_model.py b/mmdeploy/codebase/base/backend_model.py index 91ad5c182..8505cee79 100644 --- a/mmdeploy/codebase/base/backend_model.py +++ b/mmdeploy/codebase/base/backend_model.py @@ -71,7 +71,7 @@ class BaseBackendModel(BaseModel, metaclass=ABCMeta): from mmdeploy.backend.pplnn import PPLNNWrapper return PPLNNWrapper( onnx_file=backend_files[0], - algo_file=backend_files[1], + algo_file=backend_files[1] if len(backend_files) > 1 else None, device=device, output_names=output_names) elif backend == Backend.NCNN: @@ -108,5 +108,12 @@ class BaseBackendModel(BaseModel, metaclass=ABCMeta): model=backend_files[0], input_names=input_names, output_names=output_names) + elif backend == Backend.SNPE: + from mmdeploy.backend.snpe import SNPEWrapper + uri = None + if 'uri' in kwargs: + uri = kwargs['uri'] + return SNPEWrapper( + dlc_file=backend_files[0], uri=uri, output_names=output_names) else: raise NotImplementedError(f'Unknown backend type: {backend.value}') diff --git a/mmdeploy/codebase/mmcls/deploy/classification.py b/mmdeploy/codebase/mmcls/deploy/classification.py index 016c4a1ae..9384b610f 100644 --- a/mmdeploy/codebase/mmcls/deploy/classification.py +++ b/mmdeploy/codebase/mmcls/deploy/classification.py @@ -169,7 +169,7 @@ class Classification(BaseTask): model_cfg = self.model_cfg assert 'test_pipeline' in model_cfg, \ f'test_pipeline not found in {model_cfg}.' - from mmcls.datasets.pipelines import Compose + from mmengine.dataset import Compose pipeline = deepcopy(model_cfg.test_pipeline) if isinstance(imgs, str): @@ -255,8 +255,14 @@ class Classification(BaseTask): dict: Composed of the postprocess information. """ postprocess = self.model_cfg.model.head - assert 'topk' in postprocess, 'model config lack topk' - postprocess.topk = max(postprocess.topk) + if 'topk' not in postprocess: + topk = (1, ) + logger = get_root_logger() + logger.warning('no topk in postprocess config, using default \ + topk value.') + else: + topk = postprocess.topk + postprocess.topk = max(topk) return postprocess def get_model_name(self) -> str: diff --git a/mmdeploy/codebase/mmcls/deploy/classification_model.py b/mmdeploy/codebase/mmcls/deploy/classification_model.py index f9bcac523..19644908e 100644 --- a/mmdeploy/codebase/mmcls/deploy/classification_model.py +++ b/mmdeploy/codebase/mmcls/deploy/classification_model.py @@ -43,7 +43,7 @@ class End2EndModel(BaseBackendModel): self.device = device def _init_wrapper(self, backend: Backend, backend_files: Sequence[str], - device: str): + device: str, **kwargs): output_names = self.output_names self.wrapper = BaseBackendModel._build_wrapper( backend=backend, @@ -51,7 +51,8 @@ class End2EndModel(BaseBackendModel): device=device, input_names=[self.input_name], output_names=output_names, - deploy_cfg=self.deploy_cfg) + deploy_cfg=self.deploy_cfg, + **kwargs) def forward(self, batch_inputs: torch.Tensor, @@ -101,8 +102,7 @@ class SDKEnd2EndModel(End2EndModel): list: A list contains predictions. """ - pred = self.wrapper.invoke( - [img[0].contiguous().detach().cpu().numpy()])[0] + pred = self.wrapper.invoke(img[0].contiguous().detach().cpu().numpy()) pred = np.array(pred, dtype=np.float32) return pred[np.argsort(pred[:, 0])][np.newaxis, :, 1] diff --git a/mmdeploy/codebase/mmcls/models/backbones/__init__.py b/mmdeploy/codebase/mmcls/models/backbones/__init__.py index 7e6c954ea..52e4af6bf 100644 --- a/mmdeploy/codebase/mmcls/models/backbones/__init__.py +++ b/mmdeploy/codebase/mmcls/models/backbones/__init__.py @@ -1,8 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. -from .shufflenet_v2 import shufflenetv2_backbone__forward__ncnn +from .shufflenet_v2 import shufflenetv2_backbone__forward__default from .vision_transformer import visiontransformer__forward__ncnn __all__ = [ - 'shufflenetv2_backbone__forward__ncnn', + 'shufflenetv2_backbone__forward__default', 'visiontransformer__forward__ncnn', ] diff --git a/mmdeploy/codebase/mmcls/models/backbones/shufflenet_v2.py b/mmdeploy/codebase/mmcls/models/backbones/shufflenet_v2.py index 2d26318af..fe3a73d0b 100644 --- a/mmdeploy/codebase/mmcls/models/backbones/shufflenet_v2.py +++ b/mmdeploy/codebase/mmcls/models/backbones/shufflenet_v2.py @@ -1,25 +1,17 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch -from mmcls.models.utils import channel_shuffle from mmdeploy.core import FUNCTION_REWRITER -from mmdeploy.utils import Backend -# torch.chunk will export dynamic shape slice, which will lead integer input -# on ncnn backend. So the model needs to rewrite. @FUNCTION_REWRITER.register_rewriter( - func_name='mmcls.models.backbones.shufflenet_v2.InvertedResidual.forward', - backend=Backend.NCNN.value) -@FUNCTION_REWRITER.register_rewriter( - func_name='mmcls.models.backbones.shufflenet_v2.InvertedResidual.forward', - backend=Backend.TORCHSCRIPT.value) -def shufflenetv2_backbone__forward__ncnn(ctx, self, x): - """Rewrite `forward` of InvertedResidual used in shufflenet_v2 for ncnn - backend. + func_name='mmcls.models.backbones.shufflenet_v2.InvertedResidual.forward') +def shufflenetv2_backbone__forward__default(ctx, self, x): + """Rewrite `forward` of InvertedResidual used in shufflenet_v2. The chunk in original InvertedResidual.forward will convert to dynamic - `Slice` operator in ONNX, which will raise error in ncnn. + `Slice` operator in ONNX, which will raise error in ncnn, torchscript + and tensorrt. Here we replace `chunk` with `split`. Args: ctx (ContextCaller): The context with additional information. @@ -29,6 +21,7 @@ def shufflenetv2_backbone__forward__ncnn(ctx, self, x): out (Tensor): A feature map output from InvertedResidual. The tensor shape (N, Cout, H, W). """ + from mmcls.models.utils import channel_shuffle if self.stride > 1: out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) else: diff --git a/mmdeploy/codebase/mmcls/models/backbones/vision_transformer.py b/mmdeploy/codebase/mmcls/models/backbones/vision_transformer.py index 21d99aa27..a31853912 100644 --- a/mmdeploy/codebase/mmcls/models/backbones/vision_transformer.py +++ b/mmdeploy/codebase/mmcls/models/backbones/vision_transformer.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch -from mmcls.models.utils import resize_pos_embed from mmdeploy.core import FUNCTION_REWRITER from mmdeploy.utils import Backend @@ -25,6 +24,7 @@ def visiontransformer__forward__ncnn(ctx, self, x): out (Tensor): A feature map output from InvertedResidual. The tensor shape (N, Cout, H, W). """ + from mmcls.models.utils import resize_pos_embed B = x.shape[0] x, patch_resolution = self.patch_embed(x) diff --git a/mmdeploy/codebase/mmcls/models/utils/attention.py b/mmdeploy/codebase/mmcls/models/utils/attention.py index 882f47343..2088af1f5 100644 --- a/mmdeploy/codebase/mmcls/models/utils/attention.py +++ b/mmdeploy/codebase/mmcls/models/utils/attention.py @@ -1,59 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch import Tensor - from mmdeploy.core import FUNCTION_REWRITER +from mmdeploy.mmcv.cnn import MultiHeadAttentionop from mmdeploy.utils import Backend -class MultiHeadAttentionop(torch.autograd.Function): - """Create onnx::MultiHeadAttention op.""" - - @staticmethod - def forward(ctx, q: Tensor, k: Tensor, v: Tensor, q_weight: Tensor, - q_bias: Tensor, k_weight: Tensor, k_bias: Tensor, - v_weight: Tensor, v_bias: Tensor, o_weight: Tensor, - o_bias: Tensor, embed_dims: int, num_heads: int) -> Tensor: - return torch.rand_like(q) - - @staticmethod - def symbolic(g, q: torch._C.Value, k: torch._C.Value, v: torch._C.Value, - q_weight: torch._C.Value, q_bias: torch._C.Value, - k_weight: torch._C.Value, k_bias: torch._C.Value, - v_weight: torch._C.Value, v_bias: torch._C.Value, - o_weight: torch._C.Value, o_bias: torch._C.Value, - embed_dims: int, num_heads: int): - - q_weight.setDebugName('q_weight') - q_bias.setDebugName('q_bias') - - k_weight.setDebugName('k_weight') - k_bias.setDebugName('k_bias') - - v_weight.setDebugName('v_weight') - v_bias.setDebugName('v_bias') - - o_weight.setDebugName('o_weight') - o_bias.setDebugName('o_bias') - - return g.op( - 'mmdeploy::MultiHeadAttention', - q, - k, - v, - q_weight, - q_bias, - k_weight, - k_bias, - v_weight, - v_bias, - o_weight, - o_bias, - embed_dim_i=embed_dims, - num_heads_i=num_heads) - - @FUNCTION_REWRITER.register_rewriter( func_name='mmcls.models.utils.attention.MultiheadAttention.forward', backend=Backend.NCNN.value) diff --git a/mmdeploy/codebase/mmdet/core/__init__.py b/mmdeploy/codebase/mmdet/core/__init__.py index bf32fab5f..ccf057bf7 100644 --- a/mmdeploy/codebase/mmdet/core/__init__.py +++ b/mmdeploy/codebase/mmdet/core/__init__.py @@ -1,4 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. +from .anchor import * # noqa: F401,F403 from .bbox import * # noqa: F401,F403 from .ops import * # noqa: F401,F403 +from .point_generator import * # noqa: F401,F403 from .post_processing import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmdet/core/anchor.py b/mmdeploy/codebase/mmdet/core/anchor.py new file mode 100644 index 000000000..4b8166f86 --- /dev/null +++ b/mmdeploy/codebase/mmdet/core/anchor.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import torch +from torch.onnx import symbolic_helper + +from mmdeploy.core import FUNCTION_REWRITER + + +class GridPriorsTRTOp(torch.autograd.Function): + + @staticmethod + def forward(ctx, base_anchors, feat_h, feat_w, stride_h: int, + stride_w: int): + device = base_anchors.device + dtype = base_anchors.dtype + shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w + shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h + + def _meshgrid(x, y, row_major=True): + # use shape instead of len to keep tracing while exporting to onnx + xx = x.repeat(y.shape[0]) + yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) + if row_major: + return xx, yy + else: + return yy, xx + + shift_xx, shift_yy = _meshgrid(shift_x, shift_y) + shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) + + all_anchors = base_anchors[None, :, :] + shifts[:, None, :] + all_anchors = all_anchors.view(-1, 4) + # then (0, 1), (0, 2), ... + return all_anchors + + @staticmethod + @symbolic_helper.parse_args('v', 'v', 'v', 'i', 'i') + def symbolic(g, base_anchors, feat_h, feat_w, stride_h: int, + stride_w: int): + # zero_h and zero_w is used to provide shape to GridPriorsTRT + feat_h = symbolic_helper._unsqueeze_helper(g, feat_h, [0]) + feat_w = symbolic_helper._unsqueeze_helper(g, feat_w, [0]) + zero_h = g.op( + 'ConstantOfShape', + feat_h, + value_t=torch.tensor([0], dtype=torch.long), + ) + zero_w = g.op( + 'ConstantOfShape', + feat_w, + value_t=torch.tensor([0], dtype=torch.long), + ) + return g.op( + 'mmdeploy::GridPriorsTRT', + base_anchors, + zero_h, + zero_w, + stride_h_i=stride_h, + stride_w_i=stride_w) + + +grid_priors_trt = GridPriorsTRTOp.apply + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.core.anchor.anchor_generator.' + 'AnchorGenerator.single_level_grid_priors', + backend='tensorrt') +def anchorgenerator__single_level_grid_priors__trt( + ctx, + self, + featmap_size: Tuple[int], + level_idx: int, + dtype: torch.dtype = torch.float32, + device: str = 'cuda') -> torch.Tensor: + """This is a rewrite to replace ONNX anchor generator to TensorRT custom + op. + + Args: + ctx : The rewriter context + featmap_size (tuple[int]): Size of the feature maps. + level_idx (int): The index of corresponding feature map level. + dtype (obj:`torch.dtype`): Date type of points.Defaults to + ``torch.float32``. + device (str, optional): The device the tensor will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors in the overall feature maps. + """ + feat_h, feat_w = featmap_size + if isinstance(feat_h, int) and isinstance(feat_w, int): + return ctx.origin_func(self, featmap_size, level_idx, dtype, + device).data + base_anchors = self.base_anchors[level_idx].to(device).to(dtype) + stride_w, stride_h = self.strides[level_idx] + return grid_priors_trt(base_anchors, feat_h, feat_w, stride_h, stride_w) diff --git a/mmdeploy/codebase/mmdet/core/bbox/__init__.py b/mmdeploy/codebase/mmdet/core/bbox/__init__.py index eaa8961dd..79b4917a0 100644 --- a/mmdeploy/codebase/mmdet/core/bbox/__init__.py +++ b/mmdeploy/codebase/mmdet/core/bbox/__init__.py @@ -1,4 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. from .delta_xywh_bbox_coder import * # noqa: F401,F403 +from .distance_point_bbox_coder import * # noqa: F401,F403 from .tblr_bbox_coder import * # noqa: F401,F403 from .transforms import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmdet/core/bbox/distance_point_bbox_coder.py b/mmdeploy/codebase/mmdet/core/bbox/distance_point_bbox_coder.py new file mode 100644 index 000000000..82b8606ad --- /dev/null +++ b/mmdeploy/codebase/mmdet/core/bbox/distance_point_bbox_coder.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmdet.core.bbox.transforms + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.core.bbox.coder.DistancePointBBoxCoder.decode', + backend='default') +def distancepointbboxcoder__decode(ctx, + self, + points, + pred_bboxes, + max_shape=None): + """Rewrite `mmdet.core.bbox.coder.DistancePointBBoxCoder.decode` + + Decode distance prediction to bounding box. + + Args: + ctx (ContextCaller): The context with additional information. + self (DistancePointBBoxCoder): The instance of the class + DistancePointBBoxCoder. + points (Tensor): Shape (B, N, 2) or (N, 2). + pred_bboxes (Tensor): Distance from the given point to 4 + boundaries (left, top, right, bottom). Shape (B, N, 4) + or (N, 4) + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If priors shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]], + and the length of max_shape should also be B. + Default None. + Returns: + Tensor: Boxes with shape (N, 4) or (B, N, 4) + """ + assert points.size(0) == pred_bboxes.size(0) + assert points.size(-1) == 2 + assert pred_bboxes.size(-1) == 4 + if self.clip_border is False: + max_shape = None + # Rewrite add mmdet.core.bbox.transforms to find correct + # rewriter, or you will not find correct rewriter. + return mmdet.core.bbox.transforms.distance2bbox(points, pred_bboxes, + max_shape) diff --git a/mmdeploy/codebase/mmdet/core/bbox/transforms.py b/mmdeploy/codebase/mmdet/core/bbox/transforms.py index 37e8017b7..1e4d60ba3 100644 --- a/mmdeploy/codebase/mmdet/core/bbox/transforms.py +++ b/mmdeploy/codebase/mmdet/core/bbox/transforms.py @@ -2,14 +2,19 @@ import torch from mmdeploy.codebase.mmdet.deploy import clip_bboxes +from mmdeploy.core import FUNCTION_REWRITER -def distance2bbox(points, distance, max_shape=None): +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.core.bbox.transforms.distance2bbox' # noqa +) +def distance2bbox__default(ctx, points, distance, max_shape=None): """Rewrite `mmdet.core.bbox.transforms.distance2bbox` Decode distance prediction to bounding box. Args: + ctx (ContextCaller): The context with additional information. points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) diff --git a/mmdeploy/codebase/mmdet/core/point_generator.py b/mmdeploy/codebase/mmdet/core/point_generator.py new file mode 100644 index 000000000..72fbf229e --- /dev/null +++ b/mmdeploy/codebase/mmdet/core/point_generator.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdeploy.core import FUNCTION_REWRITER +from mmdeploy.utils.constants import Backend + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.core.anchor.MlvlPointGenerator.single_level_grid_priors', + backend=Backend.TENSORRT.value) +def mlvl_point_generator__single_level_grid_priors__tensorrt( + ctx, + self, + featmap_size, + level_idx, + dtype=torch.float32, + device='cuda', + with_stride=False): + """Rewrite `single_level_grid_priors` of `MlvlPointGenerator` as + onnx2tensorrt raise the error of shape inference for YOLOX with some + versions of TensorRT. + + Args: + featmap_size (tuple[int]): Size of the feature maps, arrange as + (h, w). + level_idx (int): The index of corresponding feature map level. + dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. + device (str, optional): The device the tensor will be put on. + Defaults to 'cuda'. + with_stride (bool): Concatenate the stride to the last dimension + of points. + + Return: + Tensor: Points of single feature levels. + The shape of tensor should be (N, 2) when with stride is + ``False``, where N = width * height, width and height + are the sizes of the corresponding feature level, + and the last dimension 2 represent (coord_x, coord_y), + otherwise the shape should be (N, 4), + and the last dimension 4 represent + (coord_x, coord_y, stride_w, stride_h). + """ + feat_h, feat_w = featmap_size + stride_w, stride_h = self.strides[level_idx] + shift_x = (torch.arange(0, feat_w, device=device) + self.offset) * stride_w + # keep featmap_size as Tensor instead of int, so that we + # can convert to ONNX correctly + shift_x = shift_x.to(dtype) + + shift_y = (torch.arange(0, feat_h, device=device) + self.offset) * stride_h + # keep featmap_size as Tensor instead of int, so that we + # can convert to ONNX correctly + shift_y = shift_y.to(dtype) + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + if not with_stride: + shifts = torch.stack([shift_xx, shift_yy], dim=-1) + else: + # use `feat_w * feat_h` instead of `shift_xx.shape[0]` for TensorRT + stride_w = shift_xx.new_full((feat_w * feat_h, ), stride_w).to(dtype) + stride_h = shift_xx.new_full((feat_w * feat_h, ), stride_h).to(dtype) + shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1) + all_points = shifts.to(device) + return all_points diff --git a/mmdeploy/codebase/mmdet/deploy/object_detection_model.py b/mmdeploy/codebase/mmdet/deploy/object_detection_model.py index 05368659f..80a5162b0 100644 --- a/mmdeploy/codebase/mmdet/deploy/object_detection_model.py +++ b/mmdeploy/codebase/mmdet/deploy/object_detection_model.py @@ -606,7 +606,7 @@ class SDKEnd2EndModel(End2EndModel): """ from mmdet.core import bbox2result dets, labels, masks = self.wrapper.invoke( - [img[0].contiguous().detach().cpu().numpy()])[0] + img[0].contiguous().detach().cpu().numpy()) det_results = bbox2result(dets[np.newaxis, ...], labels[np.newaxis, ...], len(self.CLASSES)) diff --git a/mmdeploy/codebase/mmdet/models/__init__.py b/mmdeploy/codebase/mmdet/models/__init__.py index 19abd95e0..f67fee3e4 100644 --- a/mmdeploy/codebase/mmdet/models/__init__.py +++ b/mmdeploy/codebase/mmdet/models/__init__.py @@ -4,3 +4,4 @@ from .dense_heads import * # noqa: F401,F403 from .detectors import * # noqa: F401,F403 from .necks import * # noqa: F401,F403 from .roi_heads import * # noqa: F401,F403 +from .transformer import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmdet/models/backbones.py b/mmdeploy/codebase/mmdet/models/backbones.py index 65b672785..daa8b2a59 100644 --- a/mmdeploy/codebase/mmdet/models/backbones.py +++ b/mmdeploy/codebase/mmdet/models/backbones.py @@ -2,6 +2,26 @@ import torch from mmdeploy.core import FUNCTION_REWRITER +from mmdeploy.utils import get_common_config + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.models.backbones.csp_darknet.Focus.forward') +def focus__forward__default(ctx, self, x): + """Rewrite forward function of Focus class. + + Replace slice with transpose. + """ + # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) + B, C, H, W = x.shape + x = x.reshape(B, C, -1, 2, W) + x = x.reshape(B, C, x.shape[2], 2, -1, 2) + half_H = x.shape[2] + half_W = x.shape[4] + x = x.permute(0, 5, 3, 1, 2, 4) + x = x.reshape(B, C * 4, half_H, half_W) + + return self.conv(x) @FUNCTION_REWRITER.register_rewriter( @@ -44,3 +64,202 @@ def focus__forward__ncnn(ctx, self, x): x = x.reshape(_b, c * 4, h // 2, w // 2) return self.conv(x) + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.models.backbones.swin.WindowMSA.forward', + backend='tensorrt') +def windowmsa__forward__tensorrt(ctx, self, x, mask=None): + """Rewrite forward function of WindowMSA class for TensorRT. + + 1. replace Gather operation of qkv with split. + 2. replace SoftMax operation with a workaround done by PyTorch. + + Args: + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor | None, Optional): mask with shape of (num_windows, + Wh*Ww, Wh*Ww), value should be between (-inf, 0]. + """ + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + -1).permute(2, 0, 3, 1, 4).contiguous() + + # replace the gather operation with the split + q, k, v = [i.squeeze(0) for i in torch.split(qkv, 1, 0)] + + q = q * self.scale + + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(-1, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + + # replace softmax with a workaround + # weird bug from TensorRT. softmax cannot be used here for fp32 and it + # can be used in fp16, but softmax fp16 performance is not as good as + # exp and log_softmax. Besides, only the UT of exp and log_softmax passed. + fp16_mode = get_common_config(ctx.cfg).get('fp16_mode', False) + if fp16_mode: + attn = torch.exp(torch.log_softmax(attn, dim=self.softmax.dim)) + else: + means = torch.mean(attn, self.softmax.dim, keepdim=True)[0] + attn_exp = torch.exp(attn - means) + attn_exp_sum = torch.sum(attn_exp, self.softmax.dim, keepdim=True) + attn = attn_exp / attn_exp_sum + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).contiguous().reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.models.backbones.swin.ShiftWindowMSA.window_reverse', + backend='tensorrt') +def shift_window_msa__window_reverse__tensorrt(ctx, self, windows, H, W): + """Rewrite window_reverse function of ShiftWindowMSA class for TensorRT. + For TensorRT, seems radical shape transformations are not allowed. Replace + them with soft ones. + + Args: + windows: (num_windows*B, window_size, window_size, C) + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + window_size = self.window_size + B = int(windows.shape[0] / (H * W / window_size / window_size)) + + # x = windows.view(B, H // window_size, W // window_size, window_size, + # window_size, -1) + x = windows.view(B, -1, W, window_size, windows.shape[-1]) + x = x.view(B, x.shape[1], -1, window_size, window_size, x.shape[-1]) + x = x.permute(0, 1, 3, 2, 4, 5).reshape(B, H, W, x.shape[-1]) + return x + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.models.backbones.swin.ShiftWindowMSA.window_partition', + backend='tensorrt') +def shift_window_msa__window_partition__tensorrt(ctx, self, x): + """Rewrite window_partition function of ShiftWindowMSA class for TensorRT. + For TensorRT, seems radical shape transformations are not allowed. Replace + them with soft ones. + + Args: + x: (B, H, W, C) + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + window_size = self.window_size + x = x.view(B, H, -1, window_size, C) + x = x.view(B, -1, window_size, x.shape[-3], window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.models.backbones.swin.ShiftWindowMSA.forward', + backend='tensorrt') +def shift_window_msa__forward__tensorrt(ctx, self, query, hw_shape): + """Rewrite forward function of ShiftWindowMSA class for TensorRT. + + 1. replace dynamic padding with static padding and dynamic slice. + 2. always do slice `x = x[:, :H, :W, :].contiguous()` for stability. + """ + B, L, C = query.shape + H, W = hw_shape + assert L == H * W, 'input feature has wrong size' + query = query.view(B, H, W, C) + + # pad feature maps to multiples of window size + query = query.permute(0, 3, 1, 2).contiguous() + # query = torch.nn.ZeroPad2d([0, self.window_size, 0, self.window_size])( + # query) + query = torch.cat( + [query, query.new_zeros(B, C, H, self.window_size)], dim=-1) + query = torch.cat( + [query, + query.new_zeros(B, C, self.window_size, query.shape[-1])], + dim=-2) + slice_h = (H + self.window_size - 1) // self.window_size * self.window_size + slice_w = (W + self.window_size - 1) // self.window_size * self.window_size + query = query[:, :, :slice_h, :slice_w] + query = query.permute(0, 2, 3, 1).contiguous() + H_pad, W_pad = query.shape[1], query.shape[2] + + # cyclic shift + if self.shift_size > 0: + shifted_query = torch.roll( + query, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + + # calculate attention mask for SW-MSA + w_mask = torch.cat([ + shifted_query.new_zeros(W_pad - self.window_size), + shifted_query.new_full((self.window_size - self.shift_size, ), 1), + shifted_query.new_full((self.shift_size, ), 2) + ]) + h_mask = torch.cat([ + shifted_query.new_zeros(H_pad - self.window_size), + shifted_query.new_full((self.window_size - self.shift_size, ), 3), + shifted_query.new_full((self.shift_size, ), 6) + ]) + + img_mask = w_mask.unsqueeze(0) + h_mask.unsqueeze(1) + img_mask = img_mask.unsqueeze(0) + img_mask = img_mask.unsqueeze(-1) + + # nW, window_size, window_size, 1 + mask_windows = self.window_partition(img_mask) + mask_windows = mask_windows.view(-1, + self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-100.0)).masked_fill( + attn_mask == 0, float(0.0)) + else: + shifted_query = query + attn_mask = None + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(shifted_query) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, self.window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + return x diff --git a/mmdeploy/codebase/mmdet/models/dense_heads/yolo_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/yolo_head.py index 11acb61f6..e6125470d 100644 --- a/mmdeploy/codebase/mmdet/models/dense_heads/yolo_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/yolo_head.py @@ -5,7 +5,7 @@ import torch from mmdeploy.codebase.mmdet import (get_post_processing_params, multiclass_nms, pad_with_value_if_necessary) -from mmdeploy.core import FUNCTION_REWRITER +from mmdeploy.core import FUNCTION_REWRITER, mark from mmdeploy.utils import Backend, is_dynamic_shape @@ -45,6 +45,13 @@ def yolov3_head__get_bboxes(ctx, Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores """ + # mark pred_maps + @mark('yolo_head', inputs=['pred_maps']) + def __mark_pred_maps(pred_maps): + return pred_maps + + pred_maps = __mark_pred_maps(pred_maps) + is_dynamic_flag = is_dynamic_shape(ctx.cfg) num_levels = len(pred_maps) pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)] diff --git a/mmdeploy/codebase/mmdet/models/detectors/base.py b/mmdeploy/codebase/mmdet/models/detectors/base.py index f443eaeea..71eb85337 100644 --- a/mmdeploy/codebase/mmdet/models/detectors/base.py +++ b/mmdeploy/codebase/mmdet/models/detectors/base.py @@ -7,14 +7,13 @@ from mmdeploy.utils import is_dynamic_shape @mark( 'detector_forward', inputs=['input'], outputs=['dets', 'labels', 'masks']) -def __forward_impl(ctx, self, img, img_metas=None, **kwargs): +def __forward_impl(ctx, self, img, img_metas, **kwargs): """Rewrite and adding mark for `forward`. Encapsulate this function for rewriting `forward` of BaseDetector. 1. Add mark for BaseDetector. 2. Support both dynamic and static export to onnx. """ - assert isinstance(img_metas, dict) assert isinstance(img, torch.Tensor) deploy_cfg = ctx.cfg @@ -23,14 +22,18 @@ def __forward_impl(ctx, self, img, img_metas=None, **kwargs): img_shape = torch._shape_as_tensor(img)[2:] if not is_dynamic_flag: img_shape = [int(val) for val in img_shape] - img_metas['img_shape'] = img_shape - img_metas = [img_metas] + img_metas[0]['img_shape'] = img_shape return self.simple_test(img, img_metas, **kwargs) @FUNCTION_REWRITER.register_rewriter( 'mmdet.models.detectors.base.BaseDetector.forward') -def base_detector__forward(ctx, self, img, img_metas=None, **kwargs): +def base_detector__forward(ctx, + self, + img, + img_metas=None, + return_loss=False, + **kwargs): """Rewrite `forward` of BaseDetector for default backend. Rewrite this function to: @@ -56,14 +59,12 @@ def base_detector__forward(ctx, self, img, img_metas=None, **kwargs): corresponds to each class. """ if img_metas is None: - img_metas = {} - - while isinstance(img_metas, list): + img_metas = [{}] + else: + assert len(img_metas) == 1, 'do not support aug_test' img_metas = img_metas[0] if isinstance(img, list): - img = torch.cat(img, 0) + img = img[0] - if 'return_loss' in kwargs: - kwargs.pop('return_loss') return __forward_impl(ctx, self, img, img_metas=img_metas, **kwargs) diff --git a/mmdeploy/codebase/mmdet/models/roi_heads/cascade_roi_head.py b/mmdeploy/codebase/mmdet/models/roi_heads/cascade_roi_head.py index 107439ac6..179643abb 100644 --- a/mmdeploy/codebase/mmdet/models/roi_heads/cascade_roi_head.py +++ b/mmdeploy/codebase/mmdet/models/roi_heads/cascade_roi_head.py @@ -83,9 +83,10 @@ def cascade_roi_head__simple_test(ctx, self, x, proposals, img_metas, if not self.with_mask: return det_bboxes, det_labels else: - batch_index = torch.arange( - det_bboxes.size(0), - device=det_bboxes.device).float().view(-1, 1, 1) + batch_index = torch.arange(det_bboxes.size(0), + device=det_bboxes.device). \ + float().view(-1, 1, 1).expand( + det_bboxes.size(0), det_bboxes.size(1), 1) rois = det_bboxes[..., :4] mask_rois = torch.cat([batch_index, rois], dim=-1) mask_rois = mask_rois.view(-1, 5) diff --git a/mmdeploy/codebase/mmdet/models/roi_heads/single_level_roi_extractor.py b/mmdeploy/codebase/mmdet/models/roi_heads/single_level_roi_extractor.py index f91ca48ad..321522e7c 100644 --- a/mmdeploy/codebase/mmdet/models/roi_heads/single_level_roi_extractor.py +++ b/mmdeploy/codebase/mmdet/models/roi_heads/single_level_roi_extractor.py @@ -127,6 +127,8 @@ def single_roi_extractor__forward(ctx, roi_feats = feats[0].new_zeros(rois.shape[0], self.out_channels, *out_size) if num_levels == 1: assert len(rois) > 0, 'The number of rois should be positive' + if backend == Backend.TORCHSCRIPT: + self.roi_layers[0].use_torchvision = True return self.roi_layers[0](feats[0], rois) target_lvls = self.map_roi_levels(rois, num_levels) diff --git a/mmdeploy/codebase/mmdet/models/transformer.py b/mmdeploy/codebase/mmdet/models/transformer.py new file mode 100644 index 000000000..6bbb938de --- /dev/null +++ b/mmdeploy/codebase/mmdet/models/transformer.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.models.utils.transformer.PatchMerging.forward', + backend='tensorrt') +def patch_merging__forward__tensorrt(ctx, self, x, input_size): + """Rewrite forward function of PatchMerging class for TensorRT. + In original implementation, mmdet applies nn.unfold to accelerate the + inferece. However, the onnx graph of it can not be parsed correctly by + TensorRT. In mmdeploy, it is replaced. + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + Returns: + tuple: Contains merged results and its spatial shape. + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + H, W = input_size + B, L, C = x.shape + assert L == H * W, 'input feature has wrong size' + assert H % 2 == 0 and W % 2 == 0, f'x size ({H}*{W}) are not even.' + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x2 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + x = x.view(x.shape[0], x.shape[1], 4, + -1).permute(0, 1, 3, 2).reshape(x.shape[0], x.shape[1], -1) + x = self.norm(x) if self.norm else x + x = self.reduction(x) + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + return x, output_size diff --git a/mmdeploy/codebase/mmdet3d/models/base.py b/mmdeploy/codebase/mmdet3d/models/base.py index e8d7000e4..61fddae8e 100644 --- a/mmdeploy/codebase/mmdet3d/models/base.py +++ b/mmdeploy/codebase/mmdet3d/models/base.py @@ -18,6 +18,6 @@ def base3ddetector__forward_test(ctx, @FUNCTION_REWRITER.register_rewriter( 'mmdet3d.models.detectors.base.Base3DDetector.forward') -def base3ddetector__forward(ctx, self, *args): +def base3ddetector__forward(ctx, self, *args, **kwargs): """Rewrite this function to run the model directly.""" return self.forward_test(*args) diff --git a/mmdeploy/codebase/mmedit/deploy/super_resolution.py b/mmdeploy/codebase/mmedit/deploy/super_resolution.py index e7938c280..59e2cb48e 100644 --- a/mmdeploy/codebase/mmedit/deploy/super_resolution.py +++ b/mmdeploy/codebase/mmedit/deploy/super_resolution.py @@ -89,7 +89,11 @@ class SuperResolution(BaseTask): """ from .super_resolution_model import build_super_resolution_model model = build_super_resolution_model( - model_files, self.model_cfg, self.deploy_cfg, device=self.device) + model_files, + self.model_cfg, + self.deploy_cfg, + device=self.device, + **kwargs) return model def build_pytorch_model(self, diff --git a/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py b/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py index 454de8b95..85d44cf99 100644 --- a/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py +++ b/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py @@ -40,16 +40,20 @@ class End2EndModel(BaseBackendModel): backend_files: Sequence[str], device: str, model_cfg: mmcv.Config, - deploy_cfg: Union[str, mmcv.Config] = None): + deploy_cfg: Union[str, mmcv.Config] = None, + **kwargs): super().__init__(deploy_cfg=deploy_cfg) self.deploy_cfg = deploy_cfg self.test_cfg = model_cfg.test_cfg self.allowed_metrics = {'PSNR': psnr, 'SSIM': ssim} self._init_wrapper( - backend=backend, backend_files=backend_files, device=device) + backend=backend, + backend_files=backend_files, + device=device, + **kwargs) def _init_wrapper(self, backend: Backend, backend_files: Sequence[str], - device: str): + device: str, **kwargs): output_names = self.output_names self.wrapper = BaseBackendModel._build_wrapper( backend=backend, @@ -57,7 +61,8 @@ class End2EndModel(BaseBackendModel): device=device, input_names=[self.input_name], output_names=output_names, - deploy_cfg=self.deploy_cfg) + deploy_cfg=self.deploy_cfg, + **kwargs) def forward(self, lq: torch.Tensor, @@ -218,7 +223,7 @@ class SDKEnd2EndModel(End2EndModel): list | dict: High resolution image or a evaluation results. """ img = tensor2img(lq) - output = self.wrapper.invoke([img])[0] + output = self.wrapper.invoke(img) if test_mode: output = torch.from_numpy(output) output = output.permute(2, 0, 1) @@ -231,8 +236,8 @@ class SDKEnd2EndModel(End2EndModel): def build_super_resolution_model(model_files: Sequence[str], model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device: str): + deploy_cfg: Union[str, mmcv.Config], + device: str, **kwargs): model_cfg = load_config(model_cfg)[0] deploy_cfg = load_config(deploy_cfg)[0] @@ -245,6 +250,7 @@ def build_super_resolution_model(model_files: Sequence[str], backend_files=model_files, device=device, model_cfg=model_cfg, - deploy_cfg=deploy_cfg) + deploy_cfg=deploy_cfg, + **kwargs) return backend_model diff --git a/mmdeploy/codebase/mmocr/deploy/text_detection.py b/mmdeploy/codebase/mmocr/deploy/text_detection.py index b858694c8..7fec8a010 100644 --- a/mmdeploy/codebase/mmocr/deploy/text_detection.py +++ b/mmdeploy/codebase/mmocr/deploy/text_detection.py @@ -76,7 +76,11 @@ class TextDetection(BaseTask): """ from .text_detection_model import build_text_detection_model model = build_text_detection_model( - model_files, self.model_cfg, self.deploy_cfg, device=self.device) + model_files, + self.model_cfg, + self.deploy_cfg, + device=self.device, + **kwargs) return model.eval() def build_pytorch_model(self, diff --git a/mmdeploy/codebase/mmocr/deploy/text_detection_model.py b/mmdeploy/codebase/mmocr/deploy/text_detection_model.py index d6917161d..561acef17 100644 --- a/mmdeploy/codebase/mmocr/deploy/text_detection_model.py +++ b/mmdeploy/codebase/mmocr/deploy/text_detection_model.py @@ -43,6 +43,7 @@ class End2EndModel(BaseBackendModel): device: str, deploy_cfg: Union[str, mmcv.Config] = None, model_cfg: Union[str, mmcv.Config] = None, + **kwargs, ): super(End2EndModel, self).__init__(deploy_cfg=deploy_cfg) model_cfg, deploy_cfg = load_config(model_cfg, deploy_cfg) @@ -50,10 +51,13 @@ class End2EndModel(BaseBackendModel): self.show_score = False self.bbox_head = build_head(model_cfg.model.bbox_head) self._init_wrapper( - backend=backend, backend_files=backend_files, device=device) + backend=backend, + backend_files=backend_files, + device=device, + **kwargs) def _init_wrapper(self, backend: Backend, backend_files: Sequence[str], - device: str): + device: str, **kwargs): """Initialize the wrapper of backends. Args: @@ -69,7 +73,8 @@ class End2EndModel(BaseBackendModel): device=device, input_names=[self.input_name], output_names=output_names, - deploy_cfg=self.deploy_cfg) + deploy_cfg=self.deploy_cfg, + **kwargs) def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[Sequence[dict]], *args, **kwargs) -> list: @@ -164,7 +169,7 @@ class SDKEnd2EndModel(End2EndModel): list: A list contains predictions. """ boundaries = self.wrapper.invoke( - [img[0].contiguous().detach().cpu().numpy()])[0] + img[0].contiguous().detach().cpu().numpy()) boundaries = [list(x) for x in boundaries] return [ dict( diff --git a/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py b/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py index e5f1fb896..a8eca743c 100644 --- a/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py +++ b/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py @@ -181,9 +181,9 @@ class SDKEnd2EndModel(End2EndModel): Returns: list[str]: Text label result of each image. """ - results = self.wrapper.invoke( - [img[0].contiguous().detach().cpu().numpy()]) - results = [dict(text=text, score=score) for text, score in results] + text, score = self.wrapper.invoke( + img[0].contiguous().detach().cpu().numpy()) + results = [dict(text=text, score=score)] return results diff --git a/mmdeploy/codebase/mmpose/deploy/pose_detection.py b/mmdeploy/codebase/mmpose/deploy/pose_detection.py index 60d5e4770..7c94fc35d 100644 --- a/mmdeploy/codebase/mmpose/deploy/pose_detection.py +++ b/mmdeploy/codebase/mmpose/deploy/pose_detection.py @@ -38,6 +38,8 @@ def process_model_config( sdk_pipeline = [] color_type = 'color' channel_order = 'rgb' + if input_shape is None: + input_shape = np.array(cfg.data_cfg['image_size']) idx = 0 while idx < len(test_pipeline): @@ -99,7 +101,11 @@ class PoseDetection(BaseTask): """ from .pose_detection_model import build_pose_detection_model model = build_pose_detection_model( - model_files, self.model_cfg, self.deploy_cfg, device=self.device) + model_files, + self.model_cfg, + self.deploy_cfg, + device=self.device, + **kwargs) return model.eval() def build_pytorch_model(self, diff --git a/mmdeploy/codebase/mmpose/deploy/pose_detection_model.py b/mmdeploy/codebase/mmpose/deploy/pose_detection_model.py index ddfb462d1..8d6e28eb2 100644 --- a/mmdeploy/codebase/mmpose/deploy/pose_detection_model.py +++ b/mmdeploy/codebase/mmpose/deploy/pose_detection_model.py @@ -47,13 +47,16 @@ class End2EndModel(BaseBackendModel): self.deploy_cfg = deploy_cfg self.model_cfg = model_cfg self._init_wrapper( - backend=backend, backend_files=backend_files, device=device) + backend=backend, + backend_files=backend_files, + device=device, + **kwargs) # create base_head for decoding heatmap base_head = builder.build_head(model_cfg.model.keypoint_head) base_head.test_cfg = model_cfg.model.test_cfg self.base_head = base_head - def _init_wrapper(self, backend, backend_files, device): + def _init_wrapper(self, backend, backend_files, device, **kwargs): """Initialize backend wrapper. Args: @@ -69,7 +72,8 @@ class End2EndModel(BaseBackendModel): device=device, input_names=[self.input_name], output_names=output_names, - deploy_cfg=self.deploy_cfg) + deploy_cfg=self.deploy_cfg, + **kwargs) def forward(self, img: torch.Tensor, img_metas: Sequence[Sequence[dict]], *args, **kwargs): @@ -214,8 +218,8 @@ class SDKEnd2EndModel(End2EndModel): image_paths.append(img_meta['image_file']) bbox_ids.append(img_meta['bbox_id']) - pred = self.wrapper.handle( - [img[0].contiguous().detach().cpu().numpy()], [sdk_boxes])[0] + pred = self.wrapper.handle(img[0].contiguous().detach().cpu().numpy(), + sdk_boxes) result = dict( preds=pred, @@ -254,6 +258,7 @@ def build_pose_detection_model(model_files: Sequence[str], backend_files=model_files, device=device, model_cfg=model_cfg, - deploy_cfg=deploy_cfg) + deploy_cfg=deploy_cfg, + **kwargs) return backend_pose_model diff --git a/mmdeploy/codebase/mmrotate/core/bbox/__init__.py b/mmdeploy/codebase/mmrotate/core/bbox/__init__.py index 2933ca8be..d7f70b075 100644 --- a/mmdeploy/codebase/mmrotate/core/bbox/__init__.py +++ b/mmdeploy/codebase/mmrotate/core/bbox/__init__.py @@ -1,3 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. from .delta_midpointoffset_rbbox_coder import * # noqa: F401,F403 from .delta_xywha_rbbox_coder import * # noqa: F401,F403 +from .gliding_vertex_coder import * # noqa: F401,F403 +from .transforms import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmrotate/core/bbox/gliding_vertex_coder.py b/mmdeploy/codebase/mmrotate/core/bbox/gliding_vertex_coder.py new file mode 100644 index 000000000..3e7c07955 --- /dev/null +++ b/mmdeploy/codebase/mmrotate/core/bbox/gliding_vertex_coder.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + 'mmrotate.core.bbox.coder.gliding_vertex_coder' + '.GVFixCoder.decode') +def gvfixcoder__decode(ctx, self, hbboxes, fix_deltas): + """Rewriter for GVFixCoder decode, support more dimension input.""" + + from mmrotate.core.bbox.transforms import poly2obb + x1 = hbboxes[..., 0::4] + y1 = hbboxes[..., 1::4] + x2 = hbboxes[..., 2::4] + y2 = hbboxes[..., 3::4] + w = hbboxes[..., 2::4] - hbboxes[..., 0::4] + h = hbboxes[..., 3::4] - hbboxes[..., 1::4] + + pred_t_x = x1 + w * fix_deltas[..., 0::4] + pred_r_y = y1 + h * fix_deltas[..., 1::4] + pred_d_x = x2 - w * fix_deltas[..., 2::4] + pred_l_y = y2 - h * fix_deltas[..., 3::4] + + polys = torch.stack( + [pred_t_x, y1, x2, pred_r_y, pred_d_x, y2, x1, pred_l_y], dim=-1) + polys = polys.flatten(2) + rbboxes = poly2obb(polys, self.version) + + return rbboxes diff --git a/mmdeploy/codebase/mmrotate/core/bbox/transforms.py b/mmdeploy/codebase/mmrotate/core/bbox/transforms.py new file mode 100644 index 000000000..c02a130ca --- /dev/null +++ b/mmdeploy/codebase/mmrotate/core/bbox/transforms.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmrotate.core.bbox.transforms import norm_angle + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmrotate.core.bbox.transforms.poly2obb_le90', + backend='tensorrt') +def poly2obb_le90__tensorrt(ctx, polys: torch.Tensor) -> torch.Tensor: + """This is a rewrite for poly2obb to remove NonZero ops. + + Args: + ctx : context of the rewriter. + polys (torch.Tensor): input + + Returns: + torch.Tensor: output + """ + polys = torch.reshape(polys, [-1, 8]) + pt1, pt2, pt3, pt4 = polys[..., :8].chunk(4, 1) + edge1 = torch.sqrt( + torch.pow(pt1[..., 0] - pt2[..., 0], 2) + + torch.pow(pt1[..., 1] - pt2[..., 1], 2)) + edge2 = torch.sqrt( + torch.pow(pt2[..., 0] - pt3[..., 0], 2) + + torch.pow(pt2[..., 1] - pt3[..., 1], 2)) + angles1 = torch.atan2((pt2[..., 1] - pt1[..., 1]), + (pt2[..., 0] - pt1[..., 0])) + angles2 = torch.atan2((pt4[..., 1] - pt1[..., 1]), + (pt4[..., 0] - pt1[..., 0])) + angles = torch.where(edge1 > edge2, angles1, angles2) + angles = norm_angle(angles, 'le90') + x_ctr = (pt1[..., 0] + pt3[..., 0]) / 2.0 + y_ctr = (pt1[..., 1] + pt3[..., 1]) / 2.0 + edges = torch.stack([edge1, edge2], dim=1) + width, _ = torch.max(edges, 1) + height, _ = torch.min(edges, 1) + return torch.stack([x_ctr, y_ctr, width, height, angles], 1) + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmrotate.core.bbox.transforms.poly2obb_le135') +def poly2obb_le135__default(ctx, polys): + """This is a rewrite for poly2obb to remove NonZero ops. + + Args: + polys (torch.Tensor): [x0,y0,x1,y1,x2,y2,x3,y3] + + Returns: + obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] + """ + polys = torch.reshape(polys, [-1, 8]) + pt1, pt2, pt3, pt4 = polys[..., :8].chunk(4, 1) + edge1 = torch.sqrt( + torch.pow(pt1[..., 0] - pt2[..., 0], 2) + + torch.pow(pt1[..., 1] - pt2[..., 1], 2)) + edge2 = torch.sqrt( + torch.pow(pt2[..., 0] - pt3[..., 0], 2) + + torch.pow(pt2[..., 1] - pt3[..., 1], 2)) + angles1 = torch.atan2((pt2[..., 1] - pt1[..., 1]), + (pt2[..., 0] - pt1[..., 0])) + angles2 = torch.atan2((pt4[..., 1] - pt1[..., 1]), + (pt4[..., 0] - pt1[..., 0])) + angles = torch.where(edge1 > edge2, angles1, angles2) + angles = norm_angle(angles, 'le135') + x_ctr = (pt1[..., 0] + pt3[..., 0]) / 2.0 + y_ctr = (pt1[..., 1] + pt3[..., 1]) / 2.0 + edges = torch.stack([edge1, edge2], dim=1) + width, _ = torch.max(edges, 1) + height, _ = torch.min(edges, 1) + return torch.stack([x_ctr, y_ctr, width, height, angles], 1) + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmrotate.core.bbox.transforms.obb2poly_le135') +def obb2poly_le135__default(ctx, rboxes): + """Support batched input. + + Args: + ctx : context of rewriter + obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] + + Returns: + polys (torch.Tensor): [x0,y0,x1,y1,x2,y2,x3,y3] + """ + B, N = rboxes.shape[:2] + x_ctr, y_ctr, width, height, angle = rboxes[..., 0], rboxes[ + ..., 1], rboxes[..., 2], rboxes[..., 3], rboxes[..., 4] + tl_x, tl_y, br_x, br_y = \ + -width * 0.5, -height * 0.5, \ + width * 0.5, height * 0.5 + rects = torch.stack([tl_x, br_x, br_x, tl_x, tl_y, tl_y, br_y, br_y], + dim=-1).reshape(B, N, 2, 4) + sin, cos = torch.sin(angle), torch.cos(angle) + M = torch.stack([cos, -sin, sin, cos], dim=-1).reshape(B, N, 2, 2) + polys = M.matmul(rects).permute(0, 1, 3, 2) + xy_ctr = torch.stack([x_ctr, y_ctr], dim=-1).unsqueeze(-2) + polys += xy_ctr + polys = polys.reshape(B, N, 8) + return polys.contiguous() diff --git a/mmdeploy/codebase/mmrotate/core/post_processing/bbox_nms.py b/mmdeploy/codebase/mmrotate/core/post_processing/bbox_nms.py index 7c884937a..4a7b8375b 100644 --- a/mmdeploy/codebase/mmrotate/core/post_processing/bbox_nms.py +++ b/mmdeploy/codebase/mmrotate/core/post_processing/bbox_nms.py @@ -5,7 +5,7 @@ from torch import Tensor import mmdeploy from mmdeploy.core import FUNCTION_REWRITER, mark -from mmdeploy.mmcv.ops import (ONNXNMSop, ONNXNMSRotatedOp, +from mmdeploy.mmcv.ops import (ONNXNMSop, ONNXNMSRotatedOp, TRTBatchedNMSop, TRTBatchedRotatedNMSop) @@ -77,6 +77,7 @@ def select_rnms_index(scores: torch.Tensor, def _multiclass_nms_rotated(boxes: Tensor, scores: Tensor, + max_output_boxes_per_class: int = 1000, iou_threshold: float = 0.1, score_threshold: float = 0.05, pre_top_k: int = -1, @@ -127,14 +128,14 @@ def _multiclass_nms_rotated(boxes: Tensor, func_name='mmdeploy.codebase.mmrotate.core.post_processing.bbox_nms.' '_multiclass_nms_rotated', backend='tensorrt') -def multiclass_nms_rotated_static(ctx, - boxes: Tensor, - scores: Tensor, - max_output_boxes_per_class: int = 1000, - iou_threshold: float = 0.5, - score_threshold: float = 0.05, - pre_top_k: int = -1, - keep_top_k: int = -1): +def multiclass_nms_rotated__tensorrt(ctx, + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: int = 1000, + iou_threshold: float = 0.5, + score_threshold: float = 0.05, + pre_top_k: int = -1, + keep_top_k: int = -1): """Wrapper for `multiclass_nms` with TensorRT. Args: @@ -178,18 +179,14 @@ def multiclass_nms_rotated(*args, **kwargs): _multiclass_nms_rotated(*args, **kwargs) -@mark( - 'fake_multiclass_nms_rotated', - inputs=['boxes', 'scores'], - outputs=['dets', 'labels']) -def fake_multiclass_nms_rotated(boxes: Tensor, - scores: Tensor, - max_output_boxes_per_class: int = 1000, - iou_threshold: float = 0.5, - score_threshold: float = 0.0, - pre_top_k: int = -1, - keep_top_k: int = -1, - version: str = 'le90'): +def _fake_multiclass_nms_rotated(boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: int = 1000, + iou_threshold: float = 0.5, + score_threshold: float = 0.0, + pre_top_k: int = -1, + keep_top_k: int = -1, + version: str = 'le90'): """Fake NMSRotated for multi-class bboxes which use horizontal bboxes for NMS, but return the rotated bboxes result. @@ -220,3 +217,70 @@ def fake_multiclass_nms_rotated(boxes: Tensor, scores, boxes, selected_indices, batch_size, keep_top_k=keep_top_k) return dets, labels + + +@mark( + 'fake_multiclass_nms_rotated', + inputs=['boxes', 'scores'], + outputs=['dets', 'labels']) +def fake_multiclass_nms_rotated(*args, **kwargs): + """Wrapper function for `_fake_multiclass_nms_rotated`.""" + return mmdeploy.codebase.mmrotate.core.post_processing.bbox_nms.\ + _fake_multiclass_nms_rotated(*args, **kwargs) + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdeploy.codebase.mmrotate.core.post_processing.bbox_nms.' + '_fake_multiclass_nms_rotated', + backend='tensorrt') +def _fake_multiclass_nms_rotated__tensorrt( + ctx, + boxes: Tensor, + scores: Tensor, + max_output_boxes_per_class: int = 1000, + iou_threshold: float = 0.5, + score_threshold: float = 0.0, + pre_top_k: int = -1, + keep_top_k: int = -1, + version: str = 'le90'): + """Wrapper for `multiclass_nms` with TensorRT. + + Args: + ctx (ContextCaller): The context with additional information. + boxes (Tensor): The bounding boxes of shape [N, num_boxes, 5]. + scores (Tensor): The detection scores of shape + [N, num_boxes, num_classes]. + max_output_boxes_per_class (int): Maximum number of output + boxes per class of nms. Defaults to 1000. + iou_threshold (float): IOU threshold of nms. Defaults to 0.5. + score_threshold (float): score threshold of nms. + Defaults to 0.05. + pre_top_k (int): Number of top K boxes to keep before nms. + Defaults to -1. + keep_top_k (int): Number of top K boxes to keep after nms. + Defaults to -1. + + Returns: + tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 6] + and `labels` of shape [N, num_det]. + """ + batch_size = boxes.size(0) + device = boxes.device + hboxes = obb2xyxy(boxes, version) + hboxes = hboxes if hboxes.dim() == 4 else hboxes.unsqueeze(2) + keep_top_k = max_output_boxes_per_class if keep_top_k < 0 else min( + max_output_boxes_per_class, keep_top_k) + if pre_top_k > 512 * 10 or pre_top_k < 0: + pre_top_k = 512 * 10 + + dets, labels, index = TRTBatchedNMSop.apply(hboxes, scores, + int(scores.shape[-1]), + pre_top_k, keep_top_k, + iou_threshold, score_threshold, + -1, True) + dets = torch.cat([boxes, scores], dim=-1) + dets = torch.cat([dets, dets[:, :1, :] * 0], dim=1) + batch_inds = torch.arange(batch_size, device=device).view(-1, 1) + dets = dets[batch_inds, index, :] + + return dets, labels diff --git a/mmdeploy/codebase/mmrotate/deploy/rotated_detection_model.py b/mmdeploy/codebase/mmrotate/deploy/rotated_detection_model.py index dd73665f4..0d25261a6 100644 --- a/mmdeploy/codebase/mmrotate/deploy/rotated_detection_model.py +++ b/mmdeploy/codebase/mmrotate/deploy/rotated_detection_model.py @@ -75,6 +75,33 @@ class End2EndModel(BaseBackendModel): output_names=output_names, deploy_cfg=self.deploy_cfg) + @staticmethod + def __clear_outputs( + test_outputs: List[Union[torch.Tensor, np.ndarray]] + ) -> List[Union[List[torch.Tensor], List[np.ndarray]]]: + """Removes additional outputs and detections with zero and negative + score. + + Args: + test_outputs (List[Union[torch.Tensor, np.ndarray]]): + outputs of forward_test. + + Returns: + List[Union[List[torch.Tensor], List[np.ndarray]]]: + outputs with without zero score object. + """ + batch_size = len(test_outputs[0]) + + num_outputs = len(test_outputs) + outputs = [[None for _ in range(batch_size)] + for _ in range(num_outputs)] + + for i in range(batch_size): + inds = test_outputs[0][i, :, -1] > 0.0 + for output_id in range(num_outputs): + outputs[output_id][i] = test_outputs[output_id][i, inds, ...] + return outputs + def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[Sequence[dict]], *args, **kwargs) -> list: """Run forward inference. @@ -91,6 +118,7 @@ class End2EndModel(BaseBackendModel): input_img = img[0].contiguous() img_metas = img_metas[0] outputs = self.forward_test(input_img, img_metas, *args, **kwargs) + outputs = End2EndModel.__clear_outputs(outputs) batch_dets, batch_labels = outputs[:2] batch_size = input_img.shape[0] rescale = kwargs.get('rescale', False) @@ -184,7 +212,7 @@ class SDKEnd2EndModel(End2EndModel): """ results = [] dets, labels = self.wrapper.invoke( - [img[0].contiguous().detach().cpu().numpy()])[0] + img[0].contiguous().detach().cpu().numpy()) dets_results = [dets[labels == i, :] for i in range(len(self.CLASSES))] results.append(dets_results) diff --git a/mmdeploy/codebase/mmrotate/models/__init__.py b/mmdeploy/codebase/mmrotate/models/__init__.py index 32a7d21e7..65edb9dba 100644 --- a/mmdeploy/codebase/mmrotate/models/__init__.py +++ b/mmdeploy/codebase/mmrotate/models/__init__.py @@ -1,17 +1,4 @@ # Copyright (c) OpenMMLab. All rights reserved. -from .oriented_standard_roi_head import ( - oriented_standard_roi_head__simple_test, - oriented_standard_roi_head__simple_test_bboxes) -from .rotated_anchor_head import rotated_anchor_head__get_bbox -from .rotated_bbox_head import rotated_bbox_head__get_bboxes -from .rotated_rpn_head import rotated_rpn_head__get_bboxes -from .single_stage_rotated_detector import \ - single_stage_rotated_detector__simple_test - -__all__ = [ - 'single_stage_rotated_detector__simple_test', - 'rotated_anchor_head__get_bbox', 'rotated_rpn_head__get_bboxes', - 'oriented_standard_roi_head__simple_test', - 'oriented_standard_roi_head__simple_test_bboxes', - 'rotated_bbox_head__get_bboxes' -] +from .dense_heads import * # noqa: F401,F403 +from .roi_heads import * # noqa: F401,F403 +from .single_stage_rotated_detector import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmrotate/models/dense_heads/__init__.py b/mmdeploy/codebase/mmrotate/models/dense_heads/__init__.py new file mode 100644 index 000000000..90163f835 --- /dev/null +++ b/mmdeploy/codebase/mmrotate/models/dense_heads/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .oriented_rpn_head import oriented_rpn_head__get_bboxes +from .rotated_anchor_head import rotated_anchor_head__get_bbox +from .rotated_rpn_head import rotated_rpn_head__get_bboxes + +__all__ = [ + 'oriented_rpn_head__get_bboxes', 'rotated_anchor_head__get_bbox', + 'rotated_rpn_head__get_bboxes' +] diff --git a/mmdeploy/codebase/mmrotate/models/dense_heads/oriented_rpn_head.py b/mmdeploy/codebase/mmrotate/models/dense_heads/oriented_rpn_head.py new file mode 100644 index 000000000..9b2adfc2f --- /dev/null +++ b/mmdeploy/codebase/mmrotate/models/dense_heads/oriented_rpn_head.py @@ -0,0 +1,141 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdeploy.codebase.mmdet import (get_post_processing_params, + pad_with_value_if_necessary) +from mmdeploy.codebase.mmrotate.core.post_processing import \ + fake_multiclass_nms_rotated +from mmdeploy.core import FUNCTION_REWRITER +from mmdeploy.utils import is_dynamic_shape + + +@FUNCTION_REWRITER.register_rewriter( + 'mmrotate.models.dense_heads.OrientedRPNHead.get_bboxes') +def oriented_rpn_head__get_bboxes(ctx, + self, + cls_scores, + bbox_preds, + score_factors=None, + img_metas=None, + cfg=None, + rescale=False, + with_nms=True, + **kwargs): + """Rewrite `get_bboxes` of `RPNHead` for default backend. + + Rewrite this function to deploy model, transform network output for a + batch into bbox predictions. + + Args: + ctx (ContextCaller): The context with additional information. + self (FoveaHead): The instance of the class FoveaHead. + cls_scores (list[Tensor]): Box scores for each scale level + with shape (N, num_anchors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W). + score_factors (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, num_priors * 1, H, W). Default None. + img_metas (list[dict]): Meta information of the image, e.g., + image size, scaling factor, etc. + cfg (mmcv.Config | None): Test / postprocessing configuration, + if None, test_cfg would be used. Default: None. + rescale (bool): If True, return boxes in original image space. + Default False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + Returns: + If with_nms == True: + tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), + `dets` of shape [N, num_det, 5] and `labels` of shape + [N, num_det]. + Else: + tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores + """ + assert len(cls_scores) == len(bbox_preds) + deploy_cfg = ctx.cfg + is_dynamic_flag = is_dynamic_shape(deploy_cfg) + num_levels = len(cls_scores) + + device = cls_scores[0].device + featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] + mlvl_anchors = self.anchor_generator.grid_anchors( + featmap_sizes, device=device) + + mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)] + mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)] + assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len(mlvl_anchors) + + cfg = self.test_cfg if cfg is None else cfg + batch_size = mlvl_cls_scores[0].shape[0] + pre_topk = cfg.get('nms_pre', -1) + + # loop over features, decode boxes + mlvl_valid_bboxes = [] + mlvl_scores = [] + mlvl_valid_anchors = [] + for level_id, cls_score, bbox_pred, anchors in zip( + range(num_levels), mlvl_cls_scores, mlvl_bbox_preds, mlvl_anchors): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + cls_score = cls_score.permute(0, 2, 3, 1) + if self.use_sigmoid_cls: + cls_score = cls_score.reshape(batch_size, -1) + scores = cls_score.sigmoid() + else: + cls_score = cls_score.reshape(batch_size, -1, 2) + # We set FG labels to [0, num_class-1] and BG label to + # num_class in RPN head since mmdet v2.5, which is unified to + # be consistent with other head since mmdet v2.0. In mmdet v2.0 + # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. + scores = cls_score.softmax(-1)[..., 0] + scores = scores.reshape(batch_size, -1, 1) + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 6) + + # use static anchor if input shape is static + if not is_dynamic_flag: + anchors = anchors.data + + anchors = anchors.unsqueeze(0) + + # topk in tensorrt does not support shape 0: + _, topk_inds = scores.squeeze(2).topk(pre_topk) + batch_inds = torch.arange(batch_size, device=device).unsqueeze(-1) + prior_inds = topk_inds.new_zeros((1, 1)) + anchors = anchors[prior_inds, topk_inds, :] + bbox_pred = bbox_pred[batch_inds, topk_inds, :] + scores = scores[batch_inds, topk_inds, :] + mlvl_valid_bboxes.append(bbox_pred) + mlvl_scores.append(scores) + mlvl_valid_anchors.append(anchors) + + batch_mlvl_bboxes = torch.cat(mlvl_valid_bboxes, dim=1) + batch_mlvl_scores = torch.cat(mlvl_scores, dim=1) + batch_mlvl_anchors = torch.cat(mlvl_valid_anchors, dim=1) + batch_mlvl_bboxes = self.bbox_coder.decode( + batch_mlvl_anchors, + batch_mlvl_bboxes, + max_shape=img_metas[0]['img_shape']) + # ignore background class + if not self.use_sigmoid_cls: + batch_mlvl_scores = batch_mlvl_scores[..., :self.num_classes] + if not with_nms: + return batch_mlvl_bboxes, batch_mlvl_scores + + post_params = get_post_processing_params(deploy_cfg) + iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) + keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) + # only one class in rpn + max_output_boxes_per_class = keep_top_k + return fake_multiclass_nms_rotated( + batch_mlvl_bboxes, + batch_mlvl_scores, + max_output_boxes_per_class, + iou_threshold=iou_threshold, + keep_top_k=keep_top_k, + version=self.version) diff --git a/mmdeploy/codebase/mmrotate/models/rotated_anchor_head.py b/mmdeploy/codebase/mmrotate/models/dense_heads/rotated_anchor_head.py similarity index 100% rename from mmdeploy/codebase/mmrotate/models/rotated_anchor_head.py rename to mmdeploy/codebase/mmrotate/models/dense_heads/rotated_anchor_head.py diff --git a/mmdeploy/codebase/mmrotate/models/rotated_rpn_head.py b/mmdeploy/codebase/mmrotate/models/dense_heads/rotated_rpn_head.py similarity index 91% rename from mmdeploy/codebase/mmrotate/models/rotated_rpn_head.py rename to mmdeploy/codebase/mmrotate/models/dense_heads/rotated_rpn_head.py index 389c67215..586bec822 100644 --- a/mmdeploy/codebase/mmrotate/models/rotated_rpn_head.py +++ b/mmdeploy/codebase/mmrotate/models/dense_heads/rotated_rpn_head.py @@ -3,8 +3,7 @@ import torch from mmdeploy.codebase.mmdet import (get_post_processing_params, pad_with_value_if_necessary) -from mmdeploy.codebase.mmrotate.core.post_processing import \ - fake_multiclass_nms_rotated +from mmdeploy.codebase.mmdet.core.post_processing import multiclass_nms from mmdeploy.core import FUNCTION_REWRITER from mmdeploy.utils import is_dynamic_shape @@ -89,14 +88,13 @@ def rotated_rpn_head__get_bboxes(ctx, # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. scores = cls_score.softmax(-1)[..., 0] scores = scores.reshape(batch_size, -1, 1) - bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 6) + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4) # use static anchor if input shape is static if not is_dynamic_flag: anchors = anchors.data - # anchors = anchors.expand_as(bbox_pred) - anchors = anchors.expand(batch_size, -1, anchors.size(-1)) + anchors = anchors.unsqueeze(0) # topk in tensorrt does not support shape 0: _, topk_inds = scores.squeeze(2).topk(pre_topk) - batch_inds = torch.arange( - batch_size, device=device).view(-1, 1).expand_as(topk_inds) - anchors = anchors[batch_inds, topk_inds, :] + batch_inds = torch.arange(batch_size, device=device).unsqueeze(-1) + prior_inds = topk_inds.new_zeros((1, 1)) + anchors = anchors[prior_inds, topk_inds, :] bbox_pred = bbox_pred[batch_inds, topk_inds, :] scores = scores[batch_inds, topk_inds, :] mlvl_valid_bboxes.append(bbox_pred) @@ -130,13 +128,16 @@ def rotated_rpn_head__get_bboxes(ctx, post_params = get_post_processing_params(deploy_cfg) iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) + score_threshold = cfg.get('score_thr', post_params.score_threshold) + pre_top_k = post_params.pre_top_k keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) # only one class in rpn max_output_boxes_per_class = keep_top_k - return fake_multiclass_nms_rotated( + return multiclass_nms( batch_mlvl_bboxes, batch_mlvl_scores, max_output_boxes_per_class, iou_threshold=iou_threshold, - keep_top_k=keep_top_k, - version=self.version) + score_threshold=score_threshold, + pre_top_k=pre_top_k, + keep_top_k=keep_top_k) diff --git a/mmdeploy/codebase/mmrotate/models/roi_heads/__init__.py b/mmdeploy/codebase/mmrotate/models/roi_heads/__init__.py new file mode 100644 index 000000000..709df00e1 --- /dev/null +++ b/mmdeploy/codebase/mmrotate/models/roi_heads/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .gv_bbox_head import gv_bbox_head__get_bboxes +from .gv_ratio_roi_head import gv_ratio_roi_head__simple_test_bboxes +from .oriented_standard_roi_head import \ + oriented_standard_roi_head__simple_test_bboxes +from .roi_extractors import rotated_single_roi_extractor__forward__tensorrt +from .roi_trans_roi_head import roi_trans_roi_head__simple_test +from .rotated_bbox_head import rotated_bbox_head__get_bboxes + +__all__ = [ + 'gv_bbox_head__get_bboxes', 'gv_ratio_roi_head__simple_test_bboxes', + 'oriented_standard_roi_head__simple_test_bboxes', + 'roi_trans_roi_head__simple_test', + 'rotated_single_roi_extractor__forward__tensorrt', + 'rotated_bbox_head__get_bboxes' +] diff --git a/mmdeploy/codebase/mmrotate/models/roi_heads/gv_bbox_head.py b/mmdeploy/codebase/mmrotate/models/roi_heads/gv_bbox_head.py new file mode 100644 index 000000000..3d777218d --- /dev/null +++ b/mmdeploy/codebase/mmrotate/models/roi_heads/gv_bbox_head.py @@ -0,0 +1,90 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn.functional as F + +from mmdeploy.codebase.mmdet import get_post_processing_params +from mmdeploy.codebase.mmrotate.core.post_processing import \ + multiclass_nms_rotated +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + 'mmrotate.models.roi_heads.bbox_heads.GVBBoxHead.get_bboxes') +def gv_bbox_head__get_bboxes(ctx, + self, + rois, + cls_score, + bbox_pred, + fix_pred, + ratio_pred, + img_shape, + scale_factor, + rescale=False, + cfg=None): + """Transform network output for a batch into bbox predictions. + + Args: + rois (torch.Tensor): Boxes to be transformed. Has shape + (num_boxes, 6). last dimension 5 arrange as + (batch_index, x, y, w, h, theta). + cls_score (torch.Tensor): Box scores, has shape + (num_boxes, num_classes + 1). + bbox_pred (Tensor, optional): Box energies / deltas. + has shape (num_boxes, num_classes * 6). + img_shape (Sequence[int], optional): Maximum bounds for boxes, + specifies (H, W, C) or (H, W). + scale_factor (ndarray): Scale factor of the + image arrange as (w_scale, h_scale, w_scale, h_scale). + rescale (bool): If True, return boxes in original image space. + Default: False. + cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None + + Returns: + tuple[Tensor, Tensor]: + First tensor is `det_bboxes`, has the shape + (num_boxes, 6) and last + dimension 6 represent (cx, cy, w, h, theta, score). + Second tensor is the labels with shape (num_boxes, ). + """ + assert rois.ndim == 3, 'Only support export two stage ' \ + 'model to ONNX ' \ + 'with batch dimension. ' + + if self.custom_cls_channels: + scores = self.loss_cls.get_activation(cls_score) + else: + scores = F.softmax( + cls_score, dim=-1) if cls_score is not None else None + + assert bbox_pred is not None + bboxes = self.bbox_coder.decode( + rois[..., 1:], bbox_pred, max_shape=img_shape) + + rbboxes = self.fix_coder.decode(bboxes, fix_pred) + + bboxes = bboxes.view(*ratio_pred.size(), 4) + rbboxes = rbboxes.view(*ratio_pred.size(), 5) + + from mmrotate.core import hbb2obb + rbboxes = rbboxes.where( + ratio_pred.unsqueeze(-1) < self.ratio_thr, + hbb2obb(bboxes, self.version)) + rbboxes = rbboxes.squeeze(2) + + # ignore background class + scores = scores[..., :self.num_classes] + + post_params = get_post_processing_params(ctx.cfg) + max_output_boxes_per_class = post_params.max_output_boxes_per_class + iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) + score_threshold = cfg.get('score_thr', post_params.score_threshold) + pre_top_k = post_params.pre_top_k + keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) + + return multiclass_nms_rotated( + rbboxes, + scores, + max_output_boxes_per_class, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + pre_top_k=pre_top_k, + keep_top_k=keep_top_k) diff --git a/mmdeploy/codebase/mmrotate/models/roi_heads/gv_ratio_roi_head.py b/mmdeploy/codebase/mmrotate/models/roi_heads/gv_ratio_roi_head.py new file mode 100644 index 000000000..6582d3fbd --- /dev/null +++ b/mmdeploy/codebase/mmrotate/models/roi_heads/gv_ratio_roi_head.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + 'mmrotate.models.roi_heads.gv_ratio_roi_head' + '.GVRatioRoIHead.simple_test_bboxes') +def gv_ratio_roi_head__simple_test_bboxes(ctx, + self, + x, + img_metas, + proposals, + rcnn_test_cfg, + rescale=False): + """Test only det bboxes without augmentation. + + Args: + x (tuple[Tensor]): Feature maps of all scale level. + img_metas (list[dict]): Image meta info. + proposals (List[Tensor]): Region proposals. + rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. + rescale (bool): If True, return boxes in original image space. + Default: False. + + Returns: + tuple[list[Tensor], list[Tensor]]: The first list contains \ + the boxes of the corresponding image in a batch, each \ + tensor has the shape (num_boxes, 6) and last dimension \ + 6 represent (x, y, w, h, theta, score). Each Tensor \ + in the second list is the labels with shape (num_boxes, ). \ + The length of both lists should be equal to batch_size. + """ + + rois, labels = proposals + batch_index = torch.arange( + rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand( + rois.size(0), rois.size(1), 1) + rois = torch.cat([batch_index, rois[..., :4]], dim=-1) + batch_size = rois.shape[0] + num_proposals_per_img = rois.shape[1] + + # Eliminate the batch dimension + rois = rois.view(-1, 5) + bbox_results = self._bbox_forward(x, rois) + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + fix_pred = bbox_results['fix_pred'] + ratio_pred = bbox_results['ratio_pred'] + + # Recover the batch dimension + rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) + cls_score = cls_score.reshape(batch_size, num_proposals_per_img, + cls_score.size(-1)) + + bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, + bbox_pred.size(-1)) + fix_pred = fix_pred.reshape(batch_size, num_proposals_per_img, + fix_pred.size(-1)) + ratio_pred = ratio_pred.reshape(batch_size, num_proposals_per_img, + ratio_pred.size(-1)) + det_bboxes, det_labels = self.bbox_head.get_bboxes( + rois, + cls_score, + bbox_pred, + fix_pred, + ratio_pred, + img_metas[0]['img_shape'], + None, + rescale=rescale, + cfg=self.test_cfg) + return det_bboxes, det_labels diff --git a/mmdeploy/codebase/mmrotate/models/oriented_standard_roi_head.py b/mmdeploy/codebase/mmrotate/models/roi_heads/oriented_standard_roi_head.py similarity index 93% rename from mmdeploy/codebase/mmrotate/models/oriented_standard_roi_head.py rename to mmdeploy/codebase/mmrotate/models/roi_heads/oriented_standard_roi_head.py index f977e20d6..119ec11d6 100644 --- a/mmdeploy/codebase/mmrotate/models/oriented_standard_roi_head.py +++ b/mmdeploy/codebase/mmrotate/models/roi_heads/oriented_standard_roi_head.py @@ -5,10 +5,10 @@ from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( - 'mmrotate.models.roi_heads.oriented_standard_roi_head' - '.OrientedStandardRoIHead.simple_test') -def oriented_standard_roi_head__simple_test(ctx, self, x, proposals, img_metas, - **kwargs): + 'mmrotate.models.roi_heads.rotate_standard_roi_head' + '.RotatedStandardRoIHead.simple_test') +def rotate_standard_roi_head__simple_test(ctx, self, x, proposals, img_metas, + **kwargs): """Rewrite `simple_test` of `StandardRoIHead` for default backend. This function returns detection result as Tensor instead of numpy diff --git a/mmdeploy/codebase/mmrotate/models/roi_heads/roi_extractors.py b/mmdeploy/codebase/mmrotate/models/roi_heads/roi_extractors.py new file mode 100644 index 000000000..f48e0dcf3 --- /dev/null +++ b/mmdeploy/codebase/mmrotate/models/roi_heads/roi_extractors.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.ops import RoIAlignRotated +from torch.autograd import Function + +from mmdeploy.core.optimizers import mark +from mmdeploy.core.rewriters import FUNCTION_REWRITER + + +class MultiLevelRotatedRoiAlign(Function): + """Create MMCVMultiLevelRotatedRoiAlign op. + + This class is used to create a MultiLevelRotatedRoiAlign in ONNX for the + TensorRT backend. + """ + + def __init__(self) -> None: + super().__init__() + + @staticmethod + def symbolic(g, *args): + """Symbolic function for creating onnx op.""" + aligned = args[-1] + featmap_strides = args[-2] + finest_scale = args[-3] + roi_scale_factor = args[-4] + sampling_ratio = args[-5] + clockwise = args[-6] + output_size = args[-7] + inputs = args[:len(featmap_strides)] + rois = args[len(featmap_strides)] + return g.op( + 'mmdeploy::MMCVMultiLevelRotatedRoiAlign', + rois, + *inputs, + output_height_i=output_size[1], + output_width_i=output_size[0], + clockwise_i=clockwise, + sampling_ratio_i=sampling_ratio, + roi_scale_factor_f=roi_scale_factor, + finest_scale_i=finest_scale, + featmap_strides_f=featmap_strides, + aligned_i=aligned) + + @staticmethod + def forward(g, *args): + """Run forward.""" + # aligned = args[-1] + featmap_strides = args[-2] + # finest_scale = args[-3] + # roi_scale_factor = args[-4] + # sampling_ratio = args[-5] + output_size = args[-7] + inputs = args[:len(featmap_strides)] + rois = args[len(featmap_strides)] + + num_proposals = rois.shape[0] + channel = inputs[0].shape[1] + + return rois.new_zeros( + (num_proposals, channel, output_size[1], output_size[0])) + + +@FUNCTION_REWRITER.register_rewriter( + 'mmrotate.models.roi_heads.roi_extractors.' + 'rotate_single_level_roi_extractor.RotatedSingleRoIExtractor.forward', + backend='tensorrt') +@mark( + 'rotated_roi_extractor', inputs=['feats', 'rois'], outputs=['bbox_feats']) +def rotated_single_roi_extractor__forward__tensorrt(ctx, + self, + feats, + rois, + roi_scale_factor=None): + """Rewrite `forward` of `RotatedSingleRoIExtractor` for TensorRT backend. + + This function uses MMCVMultiLevelRoiAlign op for TensorRT deployment. + """ + featmap_strides = self.featmap_strides + finest_scale = self.finest_scale + + for roi_layer in self.roi_layers: + assert isinstance(roi_layer, RoIAlignRotated + ), f'{type(roi_layer)} is not supported in TensorRT.' + + roi_layer = self.roi_layers[0] + out_size = roi_layer.output_size + sampling_ratio = roi_layer.sampling_ratio + clockwise = roi_layer.clockwise + aligned = roi_layer.aligned + if roi_scale_factor is None: + roi_scale_factor = 1.0 + + featmap_strides = [float(s) for s in featmap_strides] + return MultiLevelRotatedRoiAlign.apply(*feats, rois, out_size, clockwise, + sampling_ratio, roi_scale_factor, + finest_scale, featmap_strides, + aligned) diff --git a/mmdeploy/codebase/mmrotate/models/roi_heads/roi_trans_roi_head.py b/mmdeploy/codebase/mmrotate/models/roi_heads/roi_trans_roi_head.py new file mode 100644 index 000000000..6fcd5db18 --- /dev/null +++ b/mmdeploy/codebase/mmrotate/models/roi_heads/roi_trans_roi_head.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + 'mmrotate.models.roi_heads.roi_trans_roi_head' + '.RoITransRoIHead.simple_test') +def roi_trans_roi_head__simple_test(ctx, self, x, proposal_list, img_metas, + **kwargs): + """Rewrite `simple_test` of `RoITransRoIHead` for default backend. + + This function returns detection result as Tensor instead of numpy + array. + + Args: + ctx (ContextCaller): The context with additional information. + self: The instance of the original class. + x (tuple[Tensor]): Features from upstream network. Each + has shape (batch_size, c, h, w). + proposals (list(Tensor)): Proposals from rpn head. + Each has shape (num_proposals, 6), last dimension + 6 represent (x, y, w, h, theta, score). + img_metas (list[dict]): Meta information of images. + Returns: + tuple[Tensor, Tensor]: (det_bboxes, det_labels), + `det_bboxes` of shape [N, num_det, 6] and `det_labels` + of shape [N, num_det]. + """ + assert self.with_bbox, 'Bbox head must be implemented.' + + rois, labels = proposal_list + assert rois.shape[0] == 1, ('Only support one input image ' + 'while in exporting to ONNX') + # Remove the scores + rois = rois[..., :-1] + batch_size = rois.shape[0] + num_proposals_per_img = rois.shape[1] + # Eliminate the batch dimension + # Note that first RoIs in RoITransformer are horizontal bounding boxes. + rois = rois.view(-1, 4) + + # Add dummy batch index + rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1) + + max_shape = img_metas[0]['img_shape'] + ms_scores = [] + rcnn_test_cfg = self.test_cfg + + for i in range(self.num_stages): + bbox_results = self._bbox_forward(i, x, rois) + + # split batch bbox prediction back to each image + cls_score = bbox_results['cls_score'] + bbox_pred = bbox_results['bbox_pred'] + + # Recover the batch dimension + rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) + cls_score = cls_score.reshape(batch_size, num_proposals_per_img, + cls_score.size(-1)) + bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, + bbox_pred.size(-1)) + + ms_scores.append(cls_score) + + if i < self.num_stages - 1: + assert self.bbox_head[i].reg_class_agnostic + new_rois = self.bbox_head[i].bbox_coder.decode( + rois[..., 1:], bbox_pred, max_shape=max_shape) + rois = new_rois.reshape(-1, new_rois.shape[-1]) + # Add dummy batch index + rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1) + + # average scores of each image by stages + cls_score = sum(ms_scores) / float(len(ms_scores)) + bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, + bbox_pred.size(-1)) + rois = rois.reshape(batch_size, num_proposals_per_img, -1) + + scale_factor = img_metas[0].get('scale_factor', None) + det_bboxes, det_labels = self.bbox_head[-1].get_bboxes( + rois, cls_score, bbox_pred, max_shape, scale_factor, cfg=rcnn_test_cfg) + + return det_bboxes, det_labels diff --git a/mmdeploy/codebase/mmrotate/models/rotated_bbox_head.py b/mmdeploy/codebase/mmrotate/models/roi_heads/rotated_bbox_head.py similarity index 87% rename from mmdeploy/codebase/mmrotate/models/rotated_bbox_head.py rename to mmdeploy/codebase/mmrotate/models/roi_heads/rotated_bbox_head.py index b7cfe91a2..103ebf2f4 100644 --- a/mmdeploy/codebase/mmrotate/models/rotated_bbox_head.py +++ b/mmdeploy/codebase/mmrotate/models/roi_heads/rotated_bbox_head.py @@ -1,4 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. +import torch import torch.nn.functional as F from mmdeploy.codebase.mmdet import get_post_processing_params @@ -57,8 +58,16 @@ def rotated_bbox_head__get_bboxes(ctx, bboxes = self.bbox_coder.decode( rois[..., 1:], bbox_pred, max_shape=img_shape) + batch_size = scores.shape[0] + device = scores.device # ignore background class scores = scores[..., :self.num_classes] + if not self.reg_class_agnostic: + # only keep boxes with the max scores + max_inds = scores.reshape(-1, self.num_classes).argmax(1, keepdim=True) + bboxes = bboxes.reshape(-1, self.num_classes, 5) + dim0_inds = torch.arange(bboxes.shape[0], device=device).unsqueeze(-1) + bboxes = bboxes[dim0_inds, max_inds].reshape(batch_size, -1, 5) post_params = get_post_processing_params(ctx.cfg) iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) diff --git a/mmdeploy/codebase/mmseg/deploy/segmentation.py b/mmdeploy/codebase/mmseg/deploy/segmentation.py index 4c672afaa..26d70fc86 100644 --- a/mmdeploy/codebase/mmseg/deploy/segmentation.py +++ b/mmdeploy/codebase/mmseg/deploy/segmentation.py @@ -70,7 +70,11 @@ class Segmentation(BaseTask): """ from .segmentation_model import build_segmentation_model model = build_segmentation_model( - model_files, self.model_cfg, self.deploy_cfg, device=self.device) + model_files, + self.model_cfg, + self.deploy_cfg, + device=self.device, + **kwargs) return model.eval() def build_pytorch_model(self, diff --git a/mmdeploy/codebase/mmseg/deploy/segmentation_model.py b/mmdeploy/codebase/mmseg/deploy/segmentation_model.py index a57cb9a70..78e8d69b6 100644 --- a/mmdeploy/codebase/mmseg/deploy/segmentation_model.py +++ b/mmdeploy/codebase/mmseg/deploy/segmentation_model.py @@ -37,23 +37,25 @@ class End2EndModel(BaseBackendModel): object. """ - def __init__( - self, - backend: Backend, - backend_files: Sequence[str], - device: str, - class_names: Sequence[str], - palette: np.ndarray, - deploy_cfg: Union[str, mmcv.Config] = None, - ): + def __init__(self, + backend: Backend, + backend_files: Sequence[str], + device: str, + class_names: Sequence[str], + palette: np.ndarray, + deploy_cfg: Union[str, mmcv.Config] = None, + **kwargs): super(End2EndModel, self).__init__(deploy_cfg=deploy_cfg) self.CLASSES = class_names self.PALETTE = palette self.deploy_cfg = deploy_cfg self._init_wrapper( - backend=backend, backend_files=backend_files, device=device) + backend=backend, + backend_files=backend_files, + device=device, + **kwargs) - def _init_wrapper(self, backend, backend_files, device): + def _init_wrapper(self, backend, backend_files, device, **kwargs): output_names = self.output_names self.wrapper = BaseBackendModel._build_wrapper( backend=backend, @@ -61,7 +63,8 @@ class End2EndModel(BaseBackendModel): device=device, input_names=[self.input_name], output_names=output_names, - deploy_cfg=self.deploy_cfg) + deploy_cfg=self.deploy_cfg, + **kwargs) def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[Sequence[dict]], *args, **kwargs): @@ -163,8 +166,7 @@ class SDKEnd2EndModel(End2EndModel): Returns: list: A list contains predictions. """ - masks = self.wrapper.invoke( - [img[0].contiguous().detach().cpu().numpy()])[0] + masks = self.wrapper.invoke(img[0].contiguous().detach().cpu().numpy()) return masks diff --git a/mmdeploy/codebase/mmseg/models/segmentors/base.py b/mmdeploy/codebase/mmseg/models/segmentors/base.py index 4399c9e26..f920342a8 100644 --- a/mmdeploy/codebase/mmseg/models/segmentors/base.py +++ b/mmdeploy/codebase/mmseg/models/segmentors/base.py @@ -23,12 +23,12 @@ def base_segmentor__forward(ctx, self, img, img_metas=None, **kwargs): torch.Tensor: Output segmentation map pf shape [N, 1, H, W]. """ if img_metas is None: - img_metas = {} - while isinstance(img_metas, list): + img_metas = [{}] + else: + assert len(img_metas) == 1, 'do not support aug_test' img_metas = img_metas[0] - if isinstance(img, list): - img = torch.cat(img, 0) + img = img[0] assert isinstance(img, torch.Tensor) deploy_cfg = ctx.cfg @@ -37,5 +37,5 @@ def base_segmentor__forward(ctx, self, img, img_metas=None, **kwargs): img_shape = img.shape[2:] if not is_dynamic_flag: img_shape = [int(val) for val in img_shape] - img_metas['img_shape'] = img_shape + img_metas[0]['img_shape'] = img_shape return self.simple_test(img, img_metas, **kwargs) diff --git a/mmdeploy/core/rewriters/rewriter_manager.py b/mmdeploy/core/rewriters/rewriter_manager.py index de3acaffd..5e84d723d 100644 --- a/mmdeploy/core/rewriters/rewriter_manager.py +++ b/mmdeploy/core/rewriters/rewriter_manager.py @@ -48,7 +48,11 @@ def patch_model(model: nn.Module, Examples: >>> from mmdeploy.core import patch_model - >>> patched_model = patch_model(model, cfg=deploy_cfg, backend=backend) + >>> from mmdeploy.utils import Backend, IR + >>> deploy_cfg = {} + >>> backend = Backend.DEFAULT.value + >>> ir = IR.ONNX + >>> patched_model = patch_model(model, deploy_cfg, backend, ir) """ return MODULE_REWRITER.patch_model(model, cfg, backend, ir, recursive, **kwargs) diff --git a/mmdeploy/core/rewriters/rewriter_utils.py b/mmdeploy/core/rewriters/rewriter_utils.py index 5d8fd8a83..a7c247ced 100644 --- a/mmdeploy/core/rewriters/rewriter_utils.py +++ b/mmdeploy/core/rewriters/rewriter_utils.py @@ -2,6 +2,7 @@ import inspect import warnings from abc import ABCMeta, abstractmethod +from functools import wraps from typing import Any, Callable, Dict, List, Optional, Tuple, Union import mmdeploy @@ -352,6 +353,13 @@ class ContextCaller: self.func = func self.origin_func = origin_func self.cfg = cfg + # PyTorch will do annotation check on symbolic function + # Update the annotation so ContextCaller can pass the check. + if origin_func is not None: + wraps(origin_func)(self) + else: + self.__annotations__ = getattr(func, '__annotations__', {}) + for k, v in kwargs.items(): setattr(self, k, v) diff --git a/mmdeploy/mmcv/__init__.py b/mmdeploy/mmcv/__init__.py index bb464f0e9..a5896f0c3 100644 --- a/mmdeploy/mmcv/__init__.py +++ b/mmdeploy/mmcv/__init__.py @@ -1,2 +1,3 @@ # Copyright (c) OpenMMLab. All rights reserved. +from .cnn import * # noqa: F401,F403 from .ops import * # noqa: F401,F403 diff --git a/mmdeploy/mmcv/cnn/__init__.py b/mmdeploy/mmcv/cnn/__init__.py new file mode 100644 index 000000000..f9dafc2d9 --- /dev/null +++ b/mmdeploy/mmcv/cnn/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .transformer import (MultiHeadAttentionop, + multiheadattention__forward__ncnn) + +__all__ = ['multiheadattention__forward__ncnn', 'MultiHeadAttentionop'] diff --git a/mmdeploy/mmcv/cnn/transformer.py b/mmdeploy/mmcv/cnn/transformer.py new file mode 100644 index 000000000..58f79657c --- /dev/null +++ b/mmdeploy/mmcv/cnn/transformer.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch +from torch import Tensor + +from mmdeploy.core import FUNCTION_REWRITER +from mmdeploy.utils import Backend + + +class MultiHeadAttentionop(torch.autograd.Function): + """Create onnx::MultiHeadAttention op.""" + + @staticmethod + def forward(ctx, q: Tensor, k: Tensor, v: Tensor, q_weight: Tensor, + q_bias: Tensor, k_weight: Tensor, k_bias: Tensor, + v_weight: Tensor, v_bias: Tensor, o_weight: Tensor, + o_bias: Tensor, embed_dims: int, num_heads: int) -> Tensor: + return torch.rand_like(q) + + @staticmethod + def symbolic(g, q: torch._C.Value, k: torch._C.Value, v: torch._C.Value, + q_weight: torch._C.Value, q_bias: torch._C.Value, + k_weight: torch._C.Value, k_bias: torch._C.Value, + v_weight: torch._C.Value, v_bias: torch._C.Value, + o_weight: torch._C.Value, o_bias: torch._C.Value, + embed_dims: int, num_heads: int): + + q_weight.setDebugName('q_weight') + q_bias.setDebugName('q_bias') + + k_weight.setDebugName('k_weight') + k_bias.setDebugName('k_bias') + + v_weight.setDebugName('v_weight') + v_bias.setDebugName('v_bias') + + o_weight.setDebugName('o_weight') + o_bias.setDebugName('o_bias') + + return g.op( + 'mmdeploy::MultiHeadAttention', + q, + k, + v, + q_weight, + q_bias, + k_weight, + k_bias, + v_weight, + v_bias, + o_weight, + o_bias, + embed_dim_i=embed_dims, + num_heads_i=num_heads) + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmcv.cnn.bricks.transformer.MultiheadAttention.forward', + backend=Backend.NCNN.value) +def multiheadattention__forward__ncnn(ctx, + self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """Rewrite `forward` of MultiheadAttention used in vision_transformer for + ncnn backend. + + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + If None, the ``query`` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. + If None, `x` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query, with + the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + Returns: + Tensor: forwarded results with shape + [bs, num_queries embed_dims]. + """ + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + assert query is key and key is value, 'only support query==key==value' + assert self.batch_first, 'only support batch on first dim' + assert attn_mask is None + assert key_padding_mask is None + + # split qkv weight and bias + qkv_weight = self.attn.in_proj_weight.data.reshape(3, -1, self.embed_dims) + + q_weight = qkv_weight[0] + k_weight = qkv_weight[1] + v_weight = qkv_weight[2] + + qkv_bias = self.attn.in_proj_bias.data.reshape(3, self.embed_dims) + q_bias = qkv_bias[0] + k_bias = qkv_bias[1] + v_bias = qkv_bias[2] + + # out weight and bias + o_weight = self.attn.out_proj.weight.data + o_bias = self.attn.out_proj.bias.data + # export to MultiHeadAttention in ncnn + out = MultiHeadAttentionop.apply(query, key, value, q_weight, q_bias, + k_weight, k_bias, v_weight, v_bias, + o_weight, o_bias, self.embed_dims, + self.num_heads) + return identity + self.dropout_layer(self.proj_drop(out)) diff --git a/mmdeploy/mmcv/ops/nms.py b/mmdeploy/mmcv/ops/nms.py index 2071240d9..1ab303bba 100644 --- a/mmdeploy/mmcv/ops/nms.py +++ b/mmdeploy/mmcv/ops/nms.py @@ -147,7 +147,8 @@ class TRTBatchedNMSop(torch.autograd.Function): after_topk: int, iou_threshold: float, score_threshold: float, - background_label_id: int = -1): + background_label_id: int = -1, + return_index: bool = False): """Forward of batched nms. Args: @@ -175,10 +176,13 @@ class TRTBatchedNMSop(torch.autograd.Function): batch_size, num_boxes, num_classes = scores.shape out_boxes = min(num_boxes, after_topk) - return torch.rand(batch_size, out_boxes, - 5).to(scores.device), torch.randint( - 0, num_classes, - (batch_size, out_boxes)).to(scores.device) + ret = (torch.rand(batch_size, out_boxes, 5).to(scores.device), + torch.randint(0, num_classes, + (batch_size, out_boxes)).to(scores.device)) + if return_index: + ret = ret + (torch.randint( + 0, out_boxes, (batch_size, out_boxes)).to(scores.device), ) + return ret @staticmethod def symbolic(g, @@ -189,7 +193,8 @@ class TRTBatchedNMSop(torch.autograd.Function): after_topk: int, iou_threshold: float, score_threshold: float, - background_label_id: int = -1): + background_label_id: int = -1, + return_index: bool = False): """Symbolic function for mmdeploy::TRTBatchedNMS.""" return g.op( 'mmdeploy::TRTBatchedNMS', @@ -203,4 +208,5 @@ class TRTBatchedNMSop(torch.autograd.Function): keep_topk_i=after_topk, is_normalized_i=False, clip_boxes_i=False, - outputs=2) + return_index_i=return_index, + outputs=3 if return_index else 2) diff --git a/mmdeploy/pytorch/functions/__init__.py b/mmdeploy/pytorch/functions/__init__.py index fae3a6698..420194247 100644 --- a/mmdeploy/pytorch/functions/__init__.py +++ b/mmdeploy/pytorch/functions/__init__.py @@ -1,18 +1,24 @@ # Copyright (c) OpenMMLab. All rights reserved. from .atan2 import atan2__default -from .chunk import chunk__ncnn +from .chunk import chunk__ncnn, chunk__torchscript +from .expand import expand__ncnn from .getattribute import tensor__getattribute__ncnn from .group_norm import group_norm__ncnn from .interpolate import interpolate__ncnn, interpolate__tensorrt from .linear import linear__ncnn +from .masked_fill import masked_fill__onnxruntime +from .normalize import normalize__ncnn from .repeat import tensor__repeat__tensorrt from .size import tensor__size__ncnn +from .tensor_setitem import tensor__setitem__default from .topk import topk__dynamic, topk__tensorrt -from .triu import triu +from .triu import triu__default __all__ = [ 'tensor__getattribute__ncnn', 'group_norm__ncnn', 'interpolate__ncnn', 'interpolate__tensorrt', 'linear__ncnn', 'tensor__repeat__tensorrt', 'tensor__size__ncnn', 'topk__dynamic', 'topk__tensorrt', 'chunk__ncnn', - 'triu', 'atan2__default' + 'triu__default', 'atan2__default', 'normalize__ncnn', 'expand__ncnn', + 'chunk__torchscript', 'masked_fill__onnxruntime', + 'tensor__setitem__default' ] diff --git a/mmdeploy/pytorch/functions/chunk.py b/mmdeploy/pytorch/functions/chunk.py index ec0d40b59..98ad1b2ef 100644 --- a/mmdeploy/pytorch/functions/chunk.py +++ b/mmdeploy/pytorch/functions/chunk.py @@ -2,6 +2,7 @@ import torch from mmdeploy.core import FUNCTION_REWRITER +from mmdeploy.utils import IR @FUNCTION_REWRITER.register_rewriter( @@ -31,3 +32,19 @@ def chunk__ncnn(ctx, self, num_chunks: int, dim: int = 0) -> torch.Tensor: ] return output + + +@FUNCTION_REWRITER.register_rewriter( + func_name='torch.Tensor.chunk', ir=IR.TORCHSCRIPT) +def chunk__torchscript(ctx, + self, + num_chunks: int, + dim: int = 0) -> torch.Tensor: + """Rewrite `chunk` for Torchscript. + + Replace chunk op with split op + """ + dim_size = self.shape[dim] + assert dim_size % num_chunks == 0, 'cannot split to equal sizes' + output = self.split(dim_size // num_chunks, dim=dim) + return output diff --git a/mmdeploy/pytorch/functions/expand.py b/mmdeploy/pytorch/functions/expand.py new file mode 100644 index 000000000..0ae90f8a4 --- /dev/null +++ b/mmdeploy/pytorch/functions/expand.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + func_name='torch.Tensor.expand', backend='ncnn') +def expand__ncnn(ctx, self, *sizes) -> torch.Tensor: + """Rewrite `expand` for NCNN backend. + + Do not expand on batch dim for tensor with ndim >= 3 + """ + if self.ndim < 3 or sizes[0] not in [1, -1]: + return ctx.origin_func(*sizes) + return self diff --git a/mmdeploy/pytorch/functions/linear.py b/mmdeploy/pytorch/functions/linear.py index d919cc803..7cfb4735a 100644 --- a/mmdeploy/pytorch/functions/linear.py +++ b/mmdeploy/pytorch/functions/linear.py @@ -6,6 +6,27 @@ import torch from mmdeploy.core import FUNCTION_REWRITER +class GemmOp(torch.autograd.Function): + """Create onnx::Gemm op.""" + + @staticmethod + def forward(ctx, input, weight, bias=None): + out = input @ weight.transpose(0, 1) + if bias is not None: + out += bias + return out + + @staticmethod + def symbolic(g, input, weight, bias=None): + input.setDebugName('A') + weight.setDebugName('B') + args = ['Gemm', input, weight] + if bias is not None: + bias.setDebugName('C') + args.append(bias) + return g.op(*args, alpha_f=1.0, beta_f=1.0, transA_i=0, transB_i=1) + + @FUNCTION_REWRITER.register_rewriter( func_name='torch.nn.functional.linear', backend='ncnn') def linear__ncnn( @@ -20,13 +41,12 @@ def linear__ncnn( add extra reshape and transpose to support linear operation of different input shape. """ - origin_func = ctx.origin_func - dim = input.dim() if dim == 2 or dim == 3 and input.shape[0] == 1: - return origin_func(input, weight, bias) + # export nn.linear to Gemm op in onnx + return GemmOp.apply(input, weight, bias) else: out = origin_func(input, weight) @@ -40,12 +60,12 @@ def linear__ncnn( out = out.reshape([batch_size, broad_cast_size, -1, 1]) # add bias - bias = bias.view([1, -1, 1, 1]) - out = out + bias + if bias is not None: + bias = bias.view([1, -1, 1, 1]) + out = out + bias # permute back # the last dim should be -1 to support dynamic shape out = out.reshape(out_shape[:-1] + (-1, )) out = out.transpose(1, dim - 1) - return out diff --git a/mmdeploy/pytorch/functions/masked_fill.py b/mmdeploy/pytorch/functions/masked_fill.py new file mode 100644 index 000000000..5e4f67b45 --- /dev/null +++ b/mmdeploy/pytorch/functions/masked_fill.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +import torch +from torch.types import Number + +from mmdeploy.core import FUNCTION_REWRITER +from mmdeploy.utils.constants import Backend + + +@FUNCTION_REWRITER.register_rewriter( + func_name='torch.masked_fill', backend=Backend.ONNXRUNTIME.value) +@FUNCTION_REWRITER.register_rewriter( + func_name='torch.Tensor.masked_fill', backend=Backend.ONNXRUNTIME.value) +def masked_fill__onnxruntime( + ctx, input, mask: torch.Tensor, value: Union[torch.Tensor, + Number]) -> torch.Tensor: + """Rewrite `masked_fill` for onnxruntime backend. + + SATRN model as example, when value is set to `float('-inf')`, the results + of ORT inferencing turns out to be NAN. + """ + if value == float('-inf'): + value = -1e34 # hard coding number + return ctx.origin_func(input, mask, value) diff --git a/mmdeploy/pytorch/functions/normalize.py b/mmdeploy/pytorch/functions/normalize.py new file mode 100644 index 000000000..a676439cd --- /dev/null +++ b/mmdeploy/pytorch/functions/normalize.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + func_name='torch.nn.functional.normalize', backend='ncnn') +def normalize__ncnn(ctx, + input: torch.Tensor, + p: int = 2, + dim: int = 1, + eps: float = 1e-12, + *args, + **kwargs): + """Rewrite `normalize` for ncnn backend. + + Make sure L2 norm on channel dim and be exported to ncnn correctly. + """ + if dim < 0: + dim += input.ndim + assert dim != 0, 'Should not normalize on batch index' + origin_func = ctx.origin_func + assert p == 2, 'only support L2 norm' + assert input.ndim in [3, 4] + assert input.shape[0] == 1, \ + f'only support batch size 1, but given {input.shape[0]}' + if input.ndim == 3: + output = origin_func( + input.transpose(1, dim).unsqueeze(2), p=p, dim=1, + eps=eps).squeeze(2).transpose(1, dim) + else: + # input.ndim == 4: + if dim == 1: + output = origin_func(input, p=p, dim=dim, eps=eps) + else: + output = origin_func( + input.transpose(1, dim), p=p, dim=1, + eps=eps).transpose(1, dim) + return output diff --git a/mmdeploy/pytorch/functions/tensor_setitem.py b/mmdeploy/pytorch/functions/tensor_setitem.py new file mode 100644 index 000000000..70ebda68d --- /dev/null +++ b/mmdeploy/pytorch/functions/tensor_setitem.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +from packaging.version import parse + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter(func_name='torch.Tensor.__setitem__') +def tensor__setitem__default(ctx, self, key, value): + """Rewrite `setitem` to ease the index put.""" + + # only support torch>=1.9.0 + if parse(torch.__version__) < parse('1.9.0'): + return ctx.origin_func(self, key, value) + + if isinstance(key, slice): + key = (key, ) + + if not isinstance(key, Sequence): + return ctx.origin_func(self, key, value) + + for k in key: + if not isinstance(k, slice) or k.step is not None: + return ctx.origin_func(self, key, value) + + out = value + for i, k in enumerate(key): + if k == slice(None): + continue + + cat_list = [] + + # slice self start + if k.start is not None: + self_slice_start = (slice(None), ) * i + (slice( + 0, k.start), ) + key[i + 1:] + self_start = self[self_slice_start] + cat_list.append(self_start) + + # add value + cat_list.append(out) + + # slice self end + if k.stop is not None: + self_slice_end = (slice(None), ) * i + (slice( + k.stop, None), ) + key[i + 1:] + self_end = self[self_slice_end] + cat_list.append(self_end) + + # concate + out = torch.cat(cat_list, dim=i) + + # self assign + # Note that set item does not return any value + self[...] = out diff --git a/mmdeploy/pytorch/functions/triu.py b/mmdeploy/pytorch/functions/triu.py index 0291b58a1..025b2029f 100644 --- a/mmdeploy/pytorch/functions/triu.py +++ b/mmdeploy/pytorch/functions/triu.py @@ -5,17 +5,15 @@ from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter(func_name='torch.triu') -def triu(ctx, - input: torch.Tensor, - diagonal: int = 0, - *args, - **kwargs) -> torch.Tensor: +def triu__default(ctx, + input: torch.Tensor, + diagonal: int = 0, + *args, + **kwargs) -> torch.Tensor: """Rewrite `triu` for exporting model to ONNX.""" assert len(input.shape) >= 2 height, width = input.shape[-2:] - arange = torch.arange(width, device=input.device) - mask = arange.expand(height, width) arange0 = torch.arange(width, device=input.device).unsqueeze(0) arange1 = torch.arange(height, device=input.device).unsqueeze(-1) - mask = arange0 >= arange1 + mask = arange0 >= torch.add(arange1, diagonal) return input * mask diff --git a/mmdeploy/pytorch/ops/__init__.py b/mmdeploy/pytorch/ops/__init__.py index 77da00926..173ef2751 100644 --- a/mmdeploy/pytorch/ops/__init__.py +++ b/mmdeploy/pytorch/ops/__init__.py @@ -10,6 +10,8 @@ from .instance_norm import instance_norm__tensorrt from .layer_norm import layer_norm__ncnn from .linear import linear__ncnn from .lstm import generic_rnn__ncnn +from .pad import _prepare_onnx_paddings__tensorrt +from .roll import roll_default from .squeeze import squeeze__default __all__ = [ @@ -17,5 +19,6 @@ __all__ = [ 'adaptive_avg_pool3d__default', 'grid_sampler__default', 'hardsigmoid__default', 'instance_norm__tensorrt', 'generic_rnn__ncnn', 'squeeze__default', 'adaptive_avg_pool2d__ncnn', 'gelu__ncnn', - 'layer_norm__ncnn', 'linear__ncnn' + 'layer_norm__ncnn', 'linear__ncnn', '_prepare_onnx_paddings__tensorrt', + 'roll_default' ] diff --git a/mmdeploy/pytorch/ops/pad.py b/mmdeploy/pytorch/ops/pad.py new file mode 100644 index 000000000..26b6f4a99 --- /dev/null +++ b/mmdeploy/pytorch/ops/pad.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.onnx.symbolic_helper as sym_help +from packaging.version import parse as version_parse + +from mmdeploy.core import FUNCTION_REWRITER + + +# modified from +# https://github.com/pytorch/pytorch/blob/65a37923f9b14c7c9e80535d771ef9e4e92d0502/torch/onnx/symbolic_opset11.py +@FUNCTION_REWRITER.register_rewriter( + func_name='torch.onnx.symbolic_opset11._prepare_onnx_paddings', + backend='tensorrt') +def _prepare_onnx_paddings__tensorrt(ctx, g, input, pad): + """Rewrite `_prepare_onnx_paddings` for TensorRT backend. + + For codes like `x = torch.nn.ZeroPad2d((0, a, 0, b))(x)`, where a and b are + variables of torch.tensor, onnx2tensorrt raises errors like + `INVALID_NODE: Invalid Node - Pad_`. + + Generate paddings in ONNX order based on pad in pytorch. + Args: + input: the input tensor. + pad: the paddings in pytorch. + The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, + ..., dim_m_begin, dim_m_end, + where m is in range [0, n]. + """ + torch_version = version_parse(torch.__version__) + if torch_version.minor < 10: + return ctx.origin_func(g, input, pad) + # The desired order of paddings is + # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end. + # n is the dimension of input. + # Assume zero-dimensions in the beginning, pad the "pad" sequence with + # zeros in the beginning + pad_len = torch.onnx.symbolic_opset9.size( + g, pad, g.op('Constant', value_t=torch.tensor([0]))) + # Set extension = [0] * (dim * 2 - len(pad)) + rank = sym_help._get_tensor_rank(input) + if rank is None: + rank = g.op('Size', g.op('Shape', input)) + else: + rank = g.op('Constant', value_t=torch.tensor(rank, dtype=torch.int64)) + extension = g.op( + 'Sub', + g.op('Mul', rank, + g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))), + pad_len) + # Concat pad with extension: paddings = [dim_n_begin, dim_n_end, + # dim_n-1_begin, dim_n-1_end, 0, 0, ... ] + # Currently ONNX only supports int64 type for Pad + pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long']) + paddings = g.op( + 'Concat', + pad, + g.op( + 'ConstantOfShape', + extension, + value_t=torch.tensor([0], dtype=torch.int64)), + axis_i=0) + # Reshape and reverse order and collate first beginnings and then ends + # paddings = [[..., 0, dim_n-1_begin, dim_n_begin], + # [..., 0, dim_n-1_end, dim_n_end]] + # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin, + # ..., 0, dim_n - 1_end, dim_n_end] + + # replace original Constant-Transpose-Constant with Slices and Concat. + paddings = torch.onnx.symbolic_opset10.flip(g, paddings, [0]) + begins = sym_help._slice_helper( + g, paddings, axes=[0], starts=[1], ends=[0xffff], steps=[2]) + ends = sym_help._slice_helper( + g, paddings, axes=[0], starts=[0], ends=[0xffff], steps=[2]) + paddings = g.op('Concat', begins, ends, axis_i=0) + padding_c = g.op( + 'Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long']) + return padding_c diff --git a/mmdeploy/pytorch/ops/roll.py b/mmdeploy/pytorch/ops/roll.py new file mode 100644 index 000000000..34b892045 --- /dev/null +++ b/mmdeploy/pytorch/ops/roll.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# modified from +# https://github.com/pytorch/pytorch/blob/master/torch/onnx/symbolic_opset9.py +import sys + +from torch.onnx.symbolic_helper import _slice_helper, parse_args + +from mmdeploy.core import SYMBOLIC_REWRITER + + +@parse_args('v', 'is', 'is') +def roll(g, self, shifts, dims): + """Symbolic function for `roll`.""" + assert len(shifts) == len(dims) + + result = self + for i in range(len(shifts)): + shapes = [] + shape = _slice_helper( + g, result, axes=[dims[i]], starts=[-shifts[i]], ends=[sys.maxsize]) + shapes.append(shape) + shape = _slice_helper( + g, result, axes=[dims[i]], starts=[0], ends=[-shifts[i]]) + shapes.append(shape) + result = g.op('Concat', *shapes, axis_i=dims[i]) + + return result + + +@SYMBOLIC_REWRITER.register_symbolic('roll', is_pytorch=True) +def roll_default(ctx, g, self, shifts, dims): + """Support export roll to ONNX with PyTorch version 1.10-.""" + return roll(g, self, shifts, dims) diff --git a/mmdeploy/utils/config_utils.py b/mmdeploy/utils/config_utils.py index e1fe0313d..cd0f79d4f 100644 --- a/mmdeploy/utils/config_utils.py +++ b/mmdeploy/utils/config_utils.py @@ -15,7 +15,7 @@ def load_config(*args) -> List[mmcv.Config]: args (str | Sequence[str]): The path to the config file(s). Returns: - List[mmcv.Config]: The content of config. + List[mmcv.Config | dict]: The content of config. """ def _load_config(cfg): diff --git a/mmdeploy/utils/constants.py b/mmdeploy/utils/constants.py index 370da96e4..56ba0859c 100644 --- a/mmdeploy/utils/constants.py +++ b/mmdeploy/utils/constants.py @@ -55,6 +55,7 @@ class Backend(AdvancedEnum): ONNXRUNTIME = 'onnxruntime' PPLNN = 'pplnn' NCNN = 'ncnn' + SNPE = 'snpe' OPENVINO = 'openvino' SDK = 'sdk' TORCHSCRIPT = 'torchscript' diff --git a/mmdeploy/utils/test.py b/mmdeploy/utils/test.py index 1c5e16d1e..571e3481a 100644 --- a/mmdeploy/utils/test.py +++ b/mmdeploy/utils/test.py @@ -502,11 +502,9 @@ def get_backend_outputs(ir_file_path: str, if not (ncnn_apis.is_available() and ncnn_apis.is_custom_ops_available()): return None - work_dir = tempfile.TemporaryDirectory().name - param_path, bin_path = ncnn_apis.get_output_model_file( - ir_file_path, work_dir) - ir_file_name = osp.splitext(osp.split(ir_file_path)[1])[0] - ncnn_apis.from_onnx(ir_file_path, osp.join(work_dir, ir_file_name)) + param_path, bin_path = ncnn_apis.get_output_model_file(ir_file_path) + ncnn_files_prefix = osp.splitext(ir_file_path)[0] + ncnn_apis.from_onnx(ir_file_path, ncnn_files_prefix) backend_files = [param_path, bin_path] backend_feats = flatten_model_inputs device = 'cpu' diff --git a/mmdeploy/utils/timer.py b/mmdeploy/utils/timer.py index 3c7c71716..850c7d763 100644 --- a/mmdeploy/utils/timer.py +++ b/mmdeploy/utils/timer.py @@ -5,6 +5,7 @@ from contextlib import contextmanager from logging import Logger from typing import Optional +import numpy as np import torch from mmdeploy.utils.logging import get_logger @@ -17,12 +18,14 @@ class TimeCounter: # Avoid instantiating every time @classmethod def count_time(cls, + name: str, warmup: int = 1, log_interval: int = 1, with_sync: bool = False): """Proceed time counting. Args: + name (str): Name of this timer. warmup (int): The warm up steps, default 1. log_interval (int): Interval between each log, default 1. with_sync (bool): Whether use cuda synchronize for time counting, @@ -31,28 +34,28 @@ class TimeCounter: def _register(func): assert warmup >= 1 - assert func.__name__ not in cls.names,\ + assert name not in cls.names,\ 'The registered function name cannot be repeated!' # When adding on multiple functions, we need to ensure that the # data does not interfere with each other - cls.names[func.__name__] = dict( + cls.names[name] = dict( count=0, - execute_time=0, + execute_time=[], log_interval=log_interval, warmup=warmup, with_sync=with_sync, enable=False) def fun(*args, **kwargs): - count = cls.names[func.__name__]['count'] - execute_time = cls.names[func.__name__]['execute_time'] - log_interval = cls.names[func.__name__]['log_interval'] - warmup = cls.names[func.__name__]['warmup'] - with_sync = cls.names[func.__name__]['with_sync'] - enable = cls.names[func.__name__]['enable'] + count = cls.names[name]['count'] + execute_time = cls.names[name]['execute_time'] + log_interval = cls.names[name]['log_interval'] + warmup = cls.names[name]['warmup'] + with_sync = cls.names[name]['with_sync'] + enable = cls.names[name]['enable'] count += 1 - cls.names[func.__name__]['count'] = count + cls.names[name]['count'] = count if enable: if with_sync and torch.cuda.is_available(): @@ -67,15 +70,14 @@ class TimeCounter: elapsed = time.perf_counter() - start_time if enable and count > warmup: - execute_time += elapsed - cls.names[func.__name__]['execute_time'] = execute_time + execute_time.append(elapsed) if (count - warmup) % log_interval == 0: - times_per_count = 1000 * execute_time / ( - count - warmup) - msg = f'[{func.__name__}]-{count} times per count: '\ + times_per_count = 1000 * float(np.mean(execute_time)) + fps = 1000 / times_per_count + msg = f'[{name}]-{count} times per count: '\ f'{times_per_count:.2f} ms, '\ - f'{1000/times_per_count:.2f} FPS' + f'{fps:.2f} FPS' cls.logger.info(msg) return result @@ -130,3 +132,32 @@ class TimeCounter: else: for name in cls.names: cls.names[name]['enable'] = False + + @classmethod + def print_stats(cls, name: str): + """print statistics results of timer. + + Args: + name (str): The name registered with `count_time`. + """ + from prettytable import PrettyTable + + assert name in cls.names + stats = cls.names[name] + execute_time = stats['execute_time'] + latency_mean = 1000 * float(np.mean(execute_time)) + latency_median = 1000 * float(np.median(execute_time)) + latency_min = 1000 * float(np.min(execute_time)) + latency_max = 1000 * float(np.max(execute_time)) + fps_mean, fps_median = 1000 / latency_mean, 1000 / latency_median + fps_min, fps_max = 1000 / latency_min, 1000 / latency_max + results = PrettyTable() + results.field_names = ['Stats', 'Latency/ms', 'FPS'] + results.add_rows([ + ['Mean', latency_mean, fps_mean], + ['Median', latency_median, fps_median], + ['Min', latency_min, fps_min], + ['Max', latency_max, fps_max], + ]) + results.float_format = '.3' + print(results) diff --git a/mmdeploy/version.py b/mmdeploy/version.py index 75268d665..db8cd827e 100644 --- a/mmdeploy/version.py +++ b/mmdeploy/version.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple -__version__ = '0.5.0' +__version__ = '0.7.0' short_version = __version__ diff --git a/requirements/codebases.txt b/requirements/codebases.txt new file mode 100644 index 000000000..a0dfdace6 --- /dev/null +++ b/requirements/codebases.txt @@ -0,0 +1,7 @@ +mmcls>=0.21.0,<=0.22.1 +mmdet>=2.19.0,<=2.20.0 +mmedit +mmocr>=0.3.0,<=0.4.1 +mmpose>=0.24.0,<=0.25.1 +mmrazor>=0.3.0 +mmsegmentation diff --git a/requirements/docs.txt b/requirements/docs.txt index 0464fbec1..bdffca961 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,9 +1,11 @@ docutils==0.16.0 m2r==0.2.1 +markdown>=3.4.0 mistune==0.8.4 myst-parser -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme recommonmark sphinx==4.0.2 sphinx-copybutton -sphinx_markdown_tables +sphinx_markdown_tables>=0.0.16 +sphinxcontrib-mermaid diff --git a/requirements/optional.txt b/requirements/optional.txt index 5f5251130..9a077ff56 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,4 +1,4 @@ -mmcls>=0.21.0,<=0.22.1 +mmcls>=0.21.0,<=0.23.0 mmdet>=2.19.0,<=2.20.0 mmedit mmocr>=0.3.0,<=0.4.1 diff --git a/requirements/readthedocs.txt b/requirements/readthedocs.txt index 3f71e5da8..593ae5077 100644 --- a/requirements/readthedocs.txt +++ b/requirements/readthedocs.txt @@ -2,4 +2,5 @@ h5py mmcv onnx>=1.8.0 opencv-python==4.5.4.60 +sphinxcontrib-mermaid torch diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 341865f46..706ce39a8 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,3 +1,4 @@ +grpcio h5py matplotlib multiprocess diff --git a/resources/introduction.png b/resources/introduction.png index 5ba74184900c1dbab838c67c4d5cb62016d79485..7690dd8fddb9444383933d9420867ff1156b8ec0 100644 GIT binary patch literal 61843 zcmZU)1zeL+7dQ+8N=i3U0;7@MNI_Z{oeD^a6A?BV0Yy5bo6#|mQbMG=VZ>;Z9+O78 z`NsRc|L^z3p5Je~_uO+&-E+@*_MH1f=z-PAh?$6SaB#>pHB=06aPGr#aPZKCcvy%A zUCUSO55A+4juH;ek64l`8v<+}*Udm(8K(ltvVm<|>uDRQ!n^)v_5QuNxxoTlKP6ys zu%Fx8TXk1$Y$LsC9s8Y=vy;_{oq8c3XIWNO*3#0_#B5bpSJ%;VQ9(h`EqKA1?SO)U zBCmLB&q&Cnx8#;DODZb2-(p=eh$s)$qNKcK?zB zme$YLH%>DLRk{~b6Wav8550+rp9k4g0A^J>7!&An6S_~c?`by@OL&&Jl( z-gDlsU;kB99QO2`jE|o&F)=kX9#vOYzk2m5J3AXSfdZQ?)2T&1wklGx9T9Z>>EYqg z*WVWw7S`I@`mOYvWVmoBmZFKf0od@VoE1Ah-NTnS57hY4;<9E!V>g!?d-lwd;W^M7 z+a@C~iLmElK^8ssF700L68;tT?DrYBmCZLsE<Ot z`lc^)Y~De2rWOZ>9Y<3|$;f+d+j1;s4oI^@9F!Yu5+BLH*~Px&9$KexRKyg~YF3m$ zOBAAVbqjb$HP0bRgg?*Gf82aI>ijlz?JWDl?hpBWhdEdooxsNDt;?GOr}LHRqK}U* z57Ja+dg24-8df|8RlC#h>RU)ag`#VEUi7fCB6WBEU0Z7pgz=MKXmu{DZj!8TqO5lq za^wH(x19o0ltY*GaNR`!LG4TO*)H#f0tgeACvp8n7U9IXA3%%)Ya=~IDY*+eYLe%v zyJ4pdoT%N^=cS5SWu7lEwSmCq6Fs=b>Qy1xCI>{ho#sA2dZC7h;7g=oW}?zb?bGLJ zDK@X{{`wLo+8DtvfQ+Q~-)g{pL5!PgrnHEMJO$ZT&!=Bd%&gSReKdL_^AuC_3^T!o znK-u3b*mZ#?R)|Oa0}}`34%#Nw|}m_Ztg|)tKT846}WB2x*d#xJT~AANwW7$;4KZw ziQngBVpa!ycVwnH9MxphvePWPw|=X3Gx*s4q!mf(p{U#yUW?20s9O;}G4`?`=rnm7 zJ!LFX##GHS?ZwfX;V#E`X^~qLXS2`$X%}t(gkdTc!^~XG)?r_^^{FM*4y5+;e2a!tCBCd8W&N9;NK3bV1^sf&OK#F``eNJh@*A zw(=6N-U+7`2k+iXJ;H*X+%Eyo;+~-eoc;Q;r*pFlu7LE^0aS-f0`1_?@)fI=$~ zE*GxrOcy^}R>XC$TjG!@U=pqUog|nY9LpBI6imq{&H7%WX!@(1=uhp?*n~3O(5Lr; z$e+D|Kej)Z_icM3gjW(yDayZu5C2S#R5p+*D0&vx4{vv{&e!1f?j{n0{4eC1t`XLoEhCaPml6gjc+Ie~9B|&#!KPu&* z&DeZ>^b@(SKUeXwl_;){^k03tYM1s$yl4n#L{DhK*H^+y9)asG9A3Z;t&A6@cE=GD zY`&vh@Eye0CN-+hYelE0?V5|YIBM=2zPIbiA?ymyj+7F`>0nMGZgxwg`VSH~B@JR7 z86nbB7_1o?NzVDECo}dmeknR0&0yu5$|&GZ(Jjr_`VqFkt^lLcFEGbXfOK<-bxPC^ z2C^^r5d5aXaM3X>Sn>w6J6_EaxN?85z(Xej!)U0Xf1u8CD5&L9mw63?UY>7Qtd>Zf#`gg5Wv zAm1C{L$?va`+-*J+bH(9E=a9L{&H9XqCf74FXuViE4>DP@ue)7_4g1{lkrIb6EmKH z5CW+?w$%jRvF4@}?T{e1WvVCl3M@SujD^#3LHYeW)vE0onq?I<21e19^#PX4(di%9 zom?_*_8vcq`?P8}#QCR+BpD(XA<=BOoB7Eza8SMRY52+!uib(|u2>I3kCx*)bqiCH%Iq;e}ebBRDrC_||n(9}m3LAtjngtmd{O^~JO-$vVMClUo2b9*9`Hx=JW zU-kmpnlZJo$W{fl`sE-%QYqT(^clNBkf>+Z-UJea13Z)t5IBpO`O0-{%hc z$}zXRUN73xzD9&rDI91fU?@!?^ z&0i~m5gra=OwDZ~El4kJq*;(|iYlbH)XHmF96P1>+-1pP6bz7={6f3zaOrjwLqICnI+ z1W($GlsvOHmoV`nFnKbpzv(KB?B~qSb5kC8bJ8^XU?h4JWF9-h9i<1);G$H**e_Vl zt-*OxB2c7RiT^^8!k9vSFtt0F>=YgM#^CV5j$yxbU0A2*$JUBmpya%v5+?ZUZ+N1l zb|crA_ArQJYyfxv5}wg7Bm;22{}(t0c!8_CWMOMzP*ch=AjKs+@97IB* zkwjUEpWQy6N$wQ-D4u@)&j5-Vjv~QM22aTXUJ|Kef{`VrNTO3Lr98KeWLJ0aU%2lE z+;xN~i(2OYj}Y+i4t$3+3R=$oPm>qiC-GkZR%tA`|4jUS*Cczivby?Tq&xio4M&82 z0B(bg>IhN-ypf^`({Edll|| zN8q>%6e3q49DKB{Nbf_LozELn@Rw%%0LYFkA?>FnCgWI3%`ke1-W(5fYslj8${oy> zlyjV=&hneHgO0z-pzH03{2CC};aaZk`_paw?RAC(@JQ`h;aBuuc=mTH*7x`5;?wef zxdy6Ti7?0SA|5xo3L0hpWw;vVoKd+F`(R@;8kc1lqbz9M_XgiWqd4PNdHRW#A@YBlGsK+G9q-i>%7N1 zr>+;8Bq!W#P&~JAK9)_Hg(|X@TQ%(Od|XK?O6z0w>*?d0)Fmzz^_P8s3qh+ktM2<% zP=88*4o1aJU!H(Xs+)w2gNCxJPCrkx-sJx7#{Wc!;PuAIw2=V_#h?l4RxIFR6yi{s z$XmmaLW~Tcd@5EkAW0h7ZX9H|C`)hAqvNiIIeOZ}n&9pHVT2DuBtmy|RaaGY;7%^> z0M$C+j0;cHk8XZ~vjSg%o##4?W+?M28Oc6|=90doM{c~n(|L@CC=<9{3e-97)(o*T zUU5%mrPt(-Otj^Zp9jfLU07P@qEF0@dQU%K#EL0p5bOWoE*--NnK(qbHr(u+kTU!L zH;|&FY%y>Hu5u21kbG2+XXB4~;a3^0jpq)YWQ0AC@ukvn&Q_q@DFHINg85mT*5O>x zfq)7akNhlXqb8sz>D$|z_s2*B6=zDaufQ>npn&)9RNyMcGe*>5MLpc&zkvT%V9_H8 zx0?DZ_B7;%iy-ZqnE!jFUlEBj;RK;);2&)HZr_IDWJk}pbEL<({bOu&yEW}bG{fvj z+u4QHXf7nN>$WlV+;1xHl_|2sduf0Y>z%?z<{_x0BXmO%O!YBH=>0O!Ay}|-Ak_fV+fBZodqaaCgToHW`r@z zJTzvhI+{(&D;Y|^%CMF3*n}Gad%F3`6fAp0Xy^`warhh!)RCG+$IqKi^|t}E7j^+Z zgBNzQ%zM-DKSq(s09KLxpMV5D$H1soy70ukpBLuWgu-1mmV==p|MV9K0qCNMpJV%e z{x4sDN^(1VkJdPDqf>?kCQHt}5V${-M8vV_ep9Akg-8 z*tp>H3^!57ac!+pE#Z=zVr8!n;BPEZTlAmZfW#|Y535QX4IGgrnp>FEWJ~Y8)%238 znR_Bc=9YUyGCf931xre)=V@+p4Hb>5m_lB#y1rtyIY|r^G^PnIMrH40*gfv{12@8q zpk0(6knJ3@D&{yB0%U}OXP>DkCj@;_$(g5!yeByK`bSGZWDuv=1|R1S70TM{E&BUe zs4UWwAYnxZH6P*EVL!VxnHQ+oNO~*JYVFq41DRC*fB^C_##6&AF(`+T2sZBN{z756 zA!;gCYXI4#-~L`bD}%z3M}#6HmK3EcpaQ$KRIgU!t`c#KQ7T7W z-s%dCir)^O9Cy2v6*zN+NFmQO_4yA@l|MTmK3Lfl=z3(Kwzkh%aQQ*(Gq||-lc9G~ zy}nOKIDWP>oLo1Z3GGGQ&%n(ixxW3_p}#2d_D_T4xO=HRVN`Wv#M{l%ine}(fs%MTZBaPE@_Q@0xFd@*|4+VL4orZ0(ky< zj_9giEzZMOp)baG(iQ;EJ?KMOA3=0d#J*A#fjIy~ma+eqU1jOJSkFsMMcF4H=OXxp zoc)k{(=JSa3Fd3y^ff766bJ6L{?VlrfY3v)h5fk6HmoaY)q~BcfYPsu7>WbZC9-pS zEnpqem)0jRM}mPyegu9ZDnlM&p+c@T|CSk;)_DJrR4~zSl9@9Jwo)ZW?cB>_f}WUY z($-mQs1Q0M&D84*9QZ4DkDlyti>hj*=yY7wg2_P1$g>`8#21Trx40@Fkx231ViL7c zMppLufz1stVl8k0gCWYd_QH@U5hoO64?8$5)JaNYJ6@&a-HMs4Qdl9O8tY_}&+3Hp%Rj zfXXG3M0d5wC!*cP7kcv@+j`I6r=`}}>VxH3#a_#djnEN&6Fx2wUhCo#w=d3^(b1Ig zVCMRdiA1MCfT_&o^{?7)NhIFmbOR91V>?zZ@kWzSJ>A#A*&MqsQOohf(}zCdyogC( zu;euh^Fp+c^U`R|xN=B09d-=_Y7Ic5r+jaWIY;{P2R}G?_r>mH$zJw%pv0FiB%4}VkbMO9Xz2hOZw(H66r1$%gDiwjOxD z=J5yFbv?Ha3)wXhu!DktsZR*2e5b_^(X!Uv7x}U|ZuihMDR8r+SB`HpoA6k6nMf5= zU=7Pqv{glXP_VZ$|HF-x;$!} z`M)W~Fj&3qR*0lSL`V2ycAgCGFSd0Rv9z~#PrDs-q-lUp3}7ghitb9NbNGHi0CMh2 zI=q59(NI72^Z}FhmJe>2DmhO^W1CGy`)m~dmdAPN?AWgM7OhTgH}ARfKtu~rAZ9vd zO~St8MIENA9Fa=5>7AT7kM&GI!vwdR1o@e|!Lu=zgESS|1LYrdh`L z0V122{9f?^-$=@Se{?RKwGz`+px*Pl{zu@m=H(u{=MFks8>>&!V(>F(L>q_+7IMq# z;$XJRK?L*<{6e+AlBp0DfM#BF4fO9|b)-%l<5ElpQt#Z1TcI^K<<-jmu!9Irv8F9X zg3=4t{MW-xK{k>^qUjf)!&q&w-S2aL^>7!Dm?<2lBY4xN&J5P3XN$No7C$(ZAIM^i z-b9t1xd>#;z0dl(wWb`i{Js*-!1F#smGq1>T~Cq7o3^th%U6fG5(fK#T~>Z=;WjkJ zvkM6LnM7S_-(3m!dotxkIIS(u#|vQF#N_sF{zdV6@pJ~-E{^>!wRj3DpK&u_F7Lus zzyr<|zaj(5tr`8nE0*!3FlPtQ!#=RN&sl*sQkj8E;`SY-{tM)8wvH1{19BF3o*YzM zZ5X}H6~XiCJ>z@fT_=ZVd8}!V%SAd+1HS+681_PVSj zB@>4P50vyiyRYM@N-hgxOj%x!^tKqGwv!*|f1-|N3U7mA7j`q%(Nj_lew)#j1+shR z$1jr$H`xaD|5fUEO%EKG?9vvovuDv|1Zi{Z%DsB7!R6d(t{*49$<>Bm^7(-tnA0zA zX+;B!$CDH`HW^RzOghQKa&_Q!mna{gE{rrYru1IRnemIEbjZsTv-_!YNd*n=oK@M0 z^gS=PqLPT1u45jX!Xni{1bT{^vFH`jl(%ZX#7ER^^#|eRLVDm*K7dTihs4y3nH>M; z&LK{Hduz0^yLzgY$;oDqzmRoF+KTJObX@Sg5I`{XJKw0z29AUio!huW>6%A@qRHUe zPhq5d0SH%V18_5|*gKhBg>tCIj;R%dT_C$l%#1(S;(ee{^_i5)nXR_BCPYp{fIMnl z%Nmxkqse?{8&g_SyF>3}i0OE=tD}$Bw$!BzqFDe@*{hoqV?DhAvZ0k<14e;D}Vs{?O5mJ?kEVpQtTC0)X1HUN;1?>Nsuf8s@d zboN+xG`H{(Q&yC$HS{#0_B^B+^i-DR=53CM<{ylT3ofXv2oBq!hT$TTwv|o6^|_5b zqm`hIxwKh-PA_V;zY+54z@|;~`-|!xECW71D0_8)M0aIbqRh;HL6`soaTZ z9{(N>Ah!&9Ekv|8l<;QdiwACf7Q9IwLV{k?yq>tZcriAiV?%C926OZoEaKfxKAqJ9 zyL2=cCd1bhcNg6{#C&OrsDbt04w(Bjhixsq_WDk!#9S2j8;Mr*lM)-|h6r+SshI#b zOt|+tCFJgv{`7u%7ZV;A7fc&w)DvY!{tlPi6v=$yE)UR+J_Z1WKSU12!~NtT0%(b+ z*A@Qh@_SwMPQrfSvZc&V|2e;Ym*GYI-gmlYF>_?LO@AI^*``nL)z2q>BicXza>Ks@ zR26H(&?xWXk&c1#0}d}30Dc!bU)j`pH|d><`Mwj;v>sqQ|0}`c#8$P>bWVXD$jAO_ z3o)hE*G$7T*#K;^i7>9QHxvFgrwt|}{#RI@WT<%DcS?exa#SGf^%LneR4=+r z*uRmT?D}t6a>sCt*{R>OhBIoo^6#VaqtZ|-YyK7kb&@C7vkm+XJ+_K{twhpC_jX^H znEJFR!HA8)3OT`OU!-Q z_Td&R;C3^bJW`M6KamR<;Ul$6xpftxGR1TJ?bYS-Y_oM~M>F#nF84L3-JEMs{C9qi z?AGbbzD>jd>r*`RS_#%u-s+sA_f6+w`xf(VosRz;RI78#YDJ9l^z-R~bn4V^r7fn@ z-nKT>2nE4Tq@lb3m#8YXNf^U{+Xf(lWB8OW3lzkJu4!;+ za;^;+fqV3C_EE4uTyxgE}F z`jp`iWIXJ1?@b5Z1E#hiI4?&WWL}yxUB&=ic-oOT9(tk$vh#sKv2m1ZKW#|x08?*jW zVDJU!!k>baMu@Noh@#J*fBoU}CZapS!UAER|4Wh_qdJPcVZk3D`Y&>K4m=lw?Ilrv zykW2VbcLJjs)wb*s`5WFfP*qOYGGagyL;~c5K%*G!8aLgn7e5>NhqfOLuuFqjHT>= zGXNn&=)%ya?K4>O0|v@#k`GSri}L zYHn4vckYF0`aQ%7M~Y2`dH>hkOA`rmn$Bg}SS>Ca(rtNx0Kuk57&LVhO9JH8G!M>H;%rNNIl{h$YSGh5nC zMx9F@e?KVK0b4y*;jE}>Kb+=pdBC^A-Z%JTZ1<^ z-!D?J-L23O)x?Vg$F1+Egg!1BhK)kO_ZBN*GW+HH5xvQEi66_A?3y7D=(^D8u=9Qh z{hct)>|a3f*47Wr*tq}**+Ldm3RUg47+*?1)O{W14>gd3J4`0-`O}BgntP2? zkHG!NU-_%jRa^y|kltj3k*M1_l(=*g61{u$c=GS%#ad4L>8l()kvY8a&v1>*OMJRC zPgsVhP9tZt^GhGi#PXK`@*(mlE|OojFYj=(pX|0NEL$ZhjFSS0ndP-o(YbZ$ zM1Ir#nl|f)PM-nQv5MIeUEXXD5_5enlG)#mmP#Zz!nEcUQs^HkGlCN|NZdJJ1vYAd z-zdXYzF2S;=@Q+iJ366?W+0K!73o(^Sv^H0!Wlu{tfD$I3WMJIa~dRGoNn*9^i#wY z1NY28jL;DLl;;LY=xJ)NP@I9%LR|G>%$|CZk&#cH4OToJ=bWM@UWY=VWt9(hfrg2w zkGIn|vj|v1P#dXuj&)0`rNKUtJE-O6LJ)_V2X0hqqLT8_I##ob&fUkUAd*zz1}<%W zDfm}4g3}oUxJUQ5hR7#Cmk;Eh{tf<1@ySrl!+#U`>x+B;6mMIa{OCR{BPrtG58>m@ zA700YFl?Aonb@ike`(&BQ}-Eb)DzL7fkc7gd9s4pxQFfq{W`EpN<0d?l~b%s1L;q6 z-PX9+;PMPHDrgAA_a%hKvzW+y^50!og>dRHY&<4!?233uGQ-QAa_H8N-;$G^$ZG2S z3gZ8HiDwhxPq0b+9L=Nl0rWPZYW#et+aWDEp@v*~w6~iPtgPV%99#a5gUn8}2aQ0{ z#ly*oZKMV6Tvc#g8X%>jV<joF;cS|kDRbu5H_dnAAYzL zpUV@Ccoi;D&3j`R9GHNwnIC-iGMw|H^cD^F1jm2R0*KO#YNOc8{WiSL%$I!m=OlL3C78I)8Fh{QO?yU z;s6Hb-I}m#(Q#I?t2!0va^R0k6hS1Anq2Pj#t2RI;A|<_z!||*aWi#q9|7c%e*{I2 znu8c$%p$LadWD>hA8*O=P(!?l>mzLx^I)?6zGSY15QHh#%_7HynFlQ7blXHlacXh^ zvSud;kB!b3>HKO)F^-SB_A!}lt^du0FRZO>$|-8(s1U;P$9GN|0?Cl8EQ5!>bA zlfvg%>XGyL){h?=D`-u&l$GD$@|op>1-X->QHU*$MdkQS1!!xH zUt5Xj-aUAH7uDqOQ ze;@L5UV-KGxpk@{Atf6ia_T0(&plf3#E@;2!@p?qrgnmVS*hnn(OSdrEgY}N%v0-ez*Bz&+?XC<^;yv&$wqn|D{<=I3>L-J{9(!Bg`14;E0nw5Sn*{!I ztj#aMGTMIcXwLhfoWBlf!!~X_0k4^fUh&>GXH10n=)rk7z!sS&iXFDJnoV^Nel=x> zWh8SYcRIg1jiFm#a#);Z4vvbQ<{igM;3%-3p-m7GpnX6d|8bA?<`^OZ%>^32akynl zlTey-dZ+Ypk!uJoMFf2l#;6)TGOFKYBO^y}UvGsycr$oq%{)`8QZvoYORr9KR}#Xd z1^+#(OiOu01HxyDKz#JSksP9SOC@Mx$4jaLrX5Pd-g>dUjGKzSOlT^NT znAjL84wEEgWn)(TGpXrs`Rol!c!nQr4HSc{bXX* z`I@#0&u}!tG+zTt<|N^J>iZ~u@pg4}{kr_*`x^{E4S)LeJE>s{!#Xq1!LNu@9yu-h zS_>aWayyC8)>*Qe!%R>6khk^w}GIdTPqYd_# zd_vx}dOS`^UwWK!*{D9IJ^pk@SU*b-j{JfFC}h7_a}&$T3`cHU=j{$3HbawynPzd`&8UO`i5J^P5>}B1VBPQ*YCCkY< zIp$5>;&w4N2^zkaW2GhtNjB&v zovIrQtlaja3{zgW^DkZTbAv;V;>cCf4P;(ff&`k-G(QOvlCE|IOIt?fxV87HjWtxe zbueLl`pe!6=x_()!n-FK$v~k-SwOqMU-){AE;%$~QhUpyplwVLeAq-Sd}Kb{2P$vO zlOQ>mM3$g3s3WPTczvgNu?vz1%oBf#tzJUjNfNX0g5N9DW$1FVV2hmyT^fKk9(MmN zCt+)`NFuBrMBR`TBqYM{eip6_VtE?wTD<%e+Tr)V(kL&O7yN7eKXuSZYNP}WfaZVo zQf&PbstoOr{7;vH?d5;E|98cd2Ec-yg)P8htGcek|6%igO0EB;uLZm-ykc>%E+Ih- zy}0?P&{CI6JA@f!%)P6&-g$};^rC)sb!GVe3)2q9XmRGvN)zkX$?e++)DUTm4!fRqh|0_^K1~6j225>6$rbtIYT`OZiozz}Q zZ00Fr&N7#&?zr+^j_f7%eciR2Bs%U!4Q9=h4K{OgJ!qlaps&+_sF~NC6d3y%r4h_8 zefZNx!9%?b__$0h!v=(>D&cL)%ERIF+9q>2u-)>=cG?46!#|bsmSkl`Um|b5t?S5Q zQzF;xfV7|6V<7TKD8MC5>}NZ+#(w7y(Z~`V?%!2-TrHta-&XKZf5dbP+!}Jhf}RF{ ze<^Fg?1F$0FmtBcxUFSKY>{LEqqXhIicAfR46{Z;MHuQf{y63|8%Vn`@6kecqB*Jj)yKG^1P53HjF{B)4M$bjF{9(zd<|?o@%ycj1An0G` zSVjSJs@pi5K!8&XQRG)m4=)@M&t>1#a0D8Dp~T55+H?|g@|7Y&#Gl0->!j2a0F28^ zH(y=&C%NszZ@|FyE*Aun50Eb#>7Z(1O3fL@9Wwz0u%`=Z8u`S|d95lQv{xmxq?@W? z;7`l4FtnMuPrqyJi9z{vW80m&&1<%ceGko1KS#X4{v4EF*Gh<*d)&A!;C+n2eG$NT z&o|514Toy#c~ZWH2Lu=r$0yg-@PJFEg!dIBG5EBK$_F*JS%d!sOAk5u*yj2~XbYVCm~!!DtkK_$1KiNd;@xbUGm!_c~*=HS>8I6 zTrW=_W|Pa7%xaS77pNuWpbV$x4FoA2l!2J}D|{xdGxGSwfJXT5A7E~Uiy zi~{i+A{2Pv&0su~PRe9PKI)wWvY(tw=HAjd5$}*v@Sfq$Ss^2IUfqxpIRUB02EUbH zJD3yLzy+WC>$m4t)QDfB5iVsplCl6*)_)LbD<~(pxV}ll>5s2xddP%Z_ijx= zOA`)1>6rhLc9)KHMZBn6q39w#@wr`FP$FgU1I%WSi>d*49lo{BTs97V-OaQ{mlj6gtUlBajd z67ozIksa6foZi8KaUSJ@(;aEZ|5$5B$s?7GoW=1$o~tN}(qj$m`4r%C+EfPhFz--+ zxkpLmJ=*kf;aAyLaJSD!CayHzU$w0XIbY}Bf_JhvN(b96PBU8$h9lhgQ8wKr1#St1 zgq}u5FY1UGDQdsDIsl}UD4{kVrgGZ=Q*mN{@bf9wYMgy%v>F}x=nO4t#jFNF$3ZHV z4ZJ#k_CS(>3HGjYa={FJUL2I;tq6yteeJXl#=b08=G~CC&wwzH@P4VVXxii^H72QO)g8<}75Ucu56_b)8L-OF zSzgC7Z`zj$A#34sJ-x3HK-#Z4Y!~?Xw)v#R^Be_RLT>;OVM#ilvq^@OsBVV z>`gIEV^{$B1O=}nr8hToCt89B3j?*n4|jMlGebWqm`Zvjv1ybAW#MM9mZuR?WeB}z zpt+NHnw9uk5Oia=H|I-@sr_3*IO(bt#ZNgG@C9j<9P|1<#HeHsj?TZj z&2e36%nH6&ulH{1^=mCYvhO`sgM;%6bkpDYRl@rQqaAzCnQ%ICdwde*fs4VE-Z6rR=Q05Of=Sf)!|U*_u}F) zQwljm7<}kwV3q??Gwwku?|9nOYV7_?Bk;pMP(*pE`#q<@>M{Ly#^Fz9R0U6Ad>F2- z3`1uWPglIwC5gS)6KKB=5F)%H5U&3`aA#$#bVSFDTwCVL9^6-nszj2kiuC%S-sTF8 z0mtjFSH%c4sh2SCS^zKWJMIe(G!$ z*whOvgIg&aPe5qSkaiM#C!Q23rNYH8h@Z3xhJ(s)Lk)~g*^By{H7icedG!*-VPJW7 z8`op>oqas00E$4;l)e zdjlLEXet|Csxb|pDaK&nLqTj7?>_rPt29)zd=}a`A*HKKU~;VgHd0_>TkSE9!{e{xb`wt$CYy|5TP9jv{g`WEB{a!}g^pfP9+=j{~{82`)OKN|bk|eN*0x%DdIAO47VM1Nk@IAzGMpXk$OXR13YPt={p+_->3FaEyq&*N zaG4X=uiRZYvA$oi2F*D?YYKWIO?2g*{wB9?jg>{2Q|urf^_dn#f5Ok@`WH$TVPMMe zW#uP+6vM=p7I}^g08J60pwX$Ux6@4f@UwiPz;*q$+a^K8xRaf2B`wX1_#Nhfi(^0$l}S!v|eA@{zAhHh;h1 z;(uFs^&^OU{Lw5nC>N$!tgOaM=}Cl|^Uq@ab~53S|K%Q zt@t%Fb@Vn8kJvF>*bQ4Ak)oov>0WUi?7JS=vq6J|5NfRTI$eHcp{d7@wfJ{I{R=RP=79N7jsKZ0G$7HLbg=_tn@D1ww=l!FG6l^#G2tf+nox4UkTD z@fj9Ohq}E*6$6Es(AZnfBlcQd0jkz18U$X=h7&x|JpfXw6PLpkndy}If_Hqub^hs~ z9BYtSW={T*_O}G``9u&s& z`2kZqIsyd+0m?}mn%=yQOqBZ~TieA~-Q6z-Pi8=@^k*f%wHx^9N%1m10h_d0+%p!Y zfe67LfdXa6Ha#cH*T@JVrdg(L^jZ|7a*_%xU87|uR|fzE<2|+2*{=PZPs0bekQ*!< z)iEKa8iA>49ugzh;iS+i6Y*jSfGL`fuw}dUkt^7NQk7ERpNWYVG0$T@|1u495asGa z&saM2T5EoGUyDs^h9jN1$;kZpm;8CxwT{Hmt9P1JPVb3ttP2z8^1gpgo@_hm8m@Kj_A)t0E6I9ZAO zo2y^`*M5Y-7%{>sEbpBju%!8X~a&F*QFL96sAqD{g6DKWvsFEl)BldI0)d zPR`1q^H>HE$N`ZkqSG@t-iT;$v5Z%I^AmV^XBD$75^ivW453gyL=qdEMKJbjv{+Mzdhec2SXqELz1rrH+#$9HtRTF=&z>2i?$POOPeYjL zCg)~w-++Eth=?0a3;IB{%PFt~ivm;%tn7(p#x?EgK{bs4?{ z|3rR=wIY!OjBj2EPIm=j|5vPGi4g(;|GkbR@L}LIqA)BgWo%$wdxzgXh(e*FOx0h@ z{Wb}l^mPT_T@-+@)dby+ZY&=x=Q}>(D4z)G9yizf{~ZoTfn)$f!JUk4Addee_I9jJ zcUK;QgZ=-(0GspwUs^p|gO55akuJf(@&8}_m9X;5d4UbXsrQwzqTxejvfONg78=pb zd*UXy9_~o+=H{GwvhW`Ncg~ylrl`#wywXPI-k!>f-LbB zU;D- z>vgqRq@Z)1aKTm42o26+E zV=9|p9~&8po(7>!A`Ou!)unNuN=q5tORBbrYqsj7YP)LoxH-q(Tmq4b*!N)kVI zDR(Fe=rme34r#v#nw74{RXSQQuyctu7h~Y-ZVp11pUl zH=tL1cK)QjKg=^LQjgWA0?$q>hA~#;&?lM5xK(1#CM@)0r6g>oAhjpsvXE0U?0qWs zA%lDI&rOM_$uQIPh@t6`&&Ixt0KVk}|7olApg02;MT>t-Y_pNO7qy(#1?37?)v0UO z9(iA=2U?Wjz8Ltn{+{je)sO5j1wtB*!{MCi-5KTC%sTi}IOsnYSCV9ZGE=f0@nlaP;}{vEB8qZ`m{(?MBs2hgnf7R4gsz zhsd*y)nU9O28{&cP&4rrMulM$njFdQ}=GJ8UhxFI^%#hj>w_9 za-CNm;V24Z#38XEKuyrNb(t9LDacSlTBPG~a!kk-1(+rR+&QV3XU)As3~y_BPt9P zNJ@E5{_0x+zTaCyvbo56A#p4OVj&j5cY^id0tBsU9oQ)N3aRM>P!goAmJ^GB0sjJi zvF3btuZ7YTyt5ENN#a1KC4}q^ zc8j$CqjcX*s}q3?Ixf^~H$SI4evT3IF%=uf`7e^b;s`#`M1`~mFHvs|%f>9#hI3fjv- z^{zvSxTmex#=Fb8(4-K+_w}hht-DzVqLUm-q4x6n@aS-n+$sXqIe0oB_7Gi!7i4iy z%M$l?b3p!|I&46rS+$_hY9TArcYZa{euWx18?&Zpw_Gqee>}?ckb_Y`iCV$OBb$0l z|B=90_E0WrJ(d9pi{Il1ZOcMkM1fD?7Oi;C`YkU?w?%EWp%=Q`V8yEe z5C;Vze#de9VCM+ia_TVFQ?dPfW31$htwY$Ha@T=d_-d!$y>rmVpC z0c+obX%|>iMpqOXY5j=z2$U#@1gpGDSm$0)h&arTKfk@b6p5JB<`L)7Nd#t$^p1z@ zq;;ipv9%`lxyW1QKWH`uDP1UGX3*^ z3iaC37rI-|Z&Ex9by51xs)4Z#sqvN4p2jb}U4##m_nn8Q{LFu`$lz&hI>xP#pZ8^K z+Sna=Gi7XiG%KN&cRkGC(3;gGI>36>DE8;7=NI(FW5kUV;b-d&Us&t?_LFPO{+7%O zp0nM;Ew37p?4~ubv95sjlT4%b>4UkBY15LSWZMRd(-%iM8>h~woSub0YVrx5)qAtL zyoxLQf{D%DLIHFp7g;Y#q95s$mCg~LKmPWO<1n{e`O7~cJrnnzl4`R(eMU9$UzYiI zr3I^6cbFbqj)h98nNBhgoYYy`UC9LX@P6F4EKt)hqR0fEo-QsF~Rq{FGlzpr&N zvCZgW^n{;XlH!>_vSHR2{gb6>0Ah%_@*JyMC$3|X~HZ>r{43guSdi0xg zg;8H_bhFX^h2D$FHOBtkCNtlJn&Vr3c9EpE$kl%n+9@6Q`ct{b*eD1luonCETj<

$CrJ7T~6rxwB>5xF)*bE$Jx>)2f!~80a!HvHZ<5=FUlzJ8u~=smc4If3d^{(N!vz zRO=7AZPspmY8P$61(;Xp?rI+{AB&DZ$S8j^&wG<78LDH)FvqY{{-)91u0|dFe^K=w z&~QHAAFv=1B}5m5L|cR)h+Q?JM2ScAUZN#Bt9Q|Bbb>{qt<`&r-h1yY%C6NDEuzJ{ z^8Nka_q@kBYs@qAnS1ZtGCR-Ag-LMGhmVC7i_vE+C^i-r5*G{0ACrp$3vvfT0SHsL zt+{RTujaPND*!9nWU*Fnn?!>E(^Pyg=F-Z;0ilLz`Ah!5v$>bk?;wsGgI4eg;29k+ ziqB$zVq-c{_H9?3eV`*#Z+q$PBMo3czmh@jYE<~h-j>_AAh!Q{J%QZ8jEj{1*M|jD z!u5yA-L~+@Q1b5+GbpCwzfTNhkT}>a{r#8{rf2-^$p2yd_lc>%;Qg=q=`G%?f6#s5 zx5&3h|38fXLxNE({*Av8mWLA5as3T_iD6)jfurH(d!&2)vo@*jeuSGA?8h8tOv%W- zli8Ti71Oda?I6RNe#D$Y=k&^t>wLV2w-1*nT9&fh&YEM##F|r5R>rl0p`QDXjWq1* zlw!_EBpL-Mu%H1{>wkUgW8VHcx;jC#OS;e-jZf}6@kCgWxETnaV3?~GeC2b#`y&)A zD%N^W&u0no4p=Plwe&MI%C>E2y_9!jz)9z(2#JES$EpfjbCHB!PMeO3i zgYJ3%I(d_QpBVT0uRUcqJWC^LMKOAWSpYM={-gPD<(QTKsGoXTpd7W5AdCJtt?wRv zKWL8mykhE%kp_0byfPzwGqoGKdF&sZRw&b)Gvb_MUw+|R#;0hr2%`Vj^rL_n>hu2~ zVXxo4k^N~qx$JSP6?X(eJWm+@-ufZq2fEByZ& z`SWdC`Z+!SC&xB+NttD@pE<|nm;&xr(g0@G{f~boaNwNL#*%BLV_}W-03pu% zE)EzwXZi9TPHcxUBDC3?BI-kMV*hy>w~-Ltcs3oKm8V4$(rWP)|91w2r`E!!ZbJdpaDHdBooNL z8794A(fMuQSp`J8sox=sy;vLIut=+~=%b)#yNlB3)2sbON1Gm-Jjo5kMs^vCfh%Ay z4V|Bl7@8jP-%Z(&YA=Spe1=l3ygS>k3fq82)N)U$k!SV|ghvAYuvl`KI`GX|{o993 zZZ|s0DHeDJCIN!HL{;PIdk&BOZ3E@bdneM+fCR@+P?K^7b!K?Yj#;ss;5S&0lr2;h zuRPeqHgpZJDt12U(#5Z#3&rS~ z?*T|$c!wl_MGN8ZLQ+MTIBt&1zoBgFa`N3qQtX%*y3I25EFow6+6Ain^AIq`c=1@+7hMvf zxP&?&x>M2GoY7-t2GTFDt-$90*rIr`_{ULvf}59d%@Bc=#?RabETAWtFwjN;;Xc)c z^u1xvl?N1+7t7q_WCPlr`z=9mN4Y<{uB^NLMQ#dDZ^2{9X_!IEd$V)|@C~!#70=WZ zSbW~zJ=YQyo5sn(p^ORHl}1R>~ij> zKN9!dJ0Lyj02;@7vV&p;sTdu#MT9FjULYwX3Qc$&vW?<0Ej%Wfm9D$(MvmnLq8nHq zeAr?9B3zMr<)QEWqCXL{3R&yeOgM9qd8T+XfB0si@w3f6OaNT{0$b5k+$LVIDa)+W zMH@QlhAWaP@AemJcE?YShyy1uxSaAfA_lpI{1d5TrMuuEOs)i4`LxCN__M@eko+hb zr{d%gDexeV%`OsDGL*$ybhNpxSJ^fE<@li#CY&Nc@M+me<~q5zl_N^P>z~HL{=?*l z6mNl!29b%$*M4+KD4xp6qR0;U<%k*@;(XQfx^VEbi8!QLu+#AKXbu84}_K|ELzDE!4{pIrEPv%`R z5g$xv(M+6#ZhiQ9W^mbGyOWS0IK9jPn;TBE?xl?ION$DXswCIPzJ;D&kYWgCg4OZd_5Ge0%Fyw|_ zCuo~Nb-cBRFJ|sq!V6uw2p*AsZ#ihdV-ZI*X8GmMbcu{L5e+Xc4i0y9Jj2&Dw_le( z{S>G2B64kXe-`l?Z zw;0uv=bot#vOO`GTvp|FI84_=O!R$;cXDBB6~FLZUN&vsMJKLI<+po1Dhv4@5u2P@ z`(Y1qOG&2FfT?5ixuk(W5p1$Fyn6*;Dy$oauyU3Pxp1ia7XB(ctl1a@(`zM)+&pTs zK*4~s&5w94m6ug1RW#D4&xbz`ko!AfYqJ6sVX!!rX2B7TQ4FB!jPKkQY(fFPds?TJ zTwvE|;#~>rdZ)m|1@qjf@}-aOTsu)+tCW$n@6ID#u?s@R_Wggia!rN z5T&4DeRxD3M0--lBi7z;z}?c!6Qc{;SytD6DG|cq z&xg*nUx|{L{GMG+J<4X#Y6WMiJah#YrbL6=F`I}ZkoGrW3GM|XxAGh=xDl}6R2ma7LGnT>Ml49yx^&LZGMGH z&rl&_GFEN)SIE60YiaOwJb!$Ms04O}qD|Mb1sBa+>hN6iH*3h3MiN|_!iV&8`$+ujg2 z<}&CwEA+)nPM|W`x5sNxZLtC}?d&#*$;C^fEkQIvvb}g&yj7-#P+$ z>}(QDJzYhYF)2Aab8KdZ4|=W&6aB$7$#pNCf=$|eJy&g?X+3vOz10VFFKp-5e&WhA zlVZkRQ+a*#9&gx&IqUOCcxIY!WU6gjjtDyzGOx2N+V=S`=NUQxvYK%ACUp^``lXKR zeUWTWjM>c5&+zB<5%=93P7KZngFHsyBYvJL&Cw(BNhCXwo{OCpj${jD!f@@{l?RRr zChKO!Dal69ZFTNkVUlb(0l)a(i_!fY7SPVD8Ic!HUGzvk+_J3chh%gVtXMe48#s50 z!Nb6L6=TcXjtW7vr;g2Hp)f2oZngrYxaappSN5a(Q zj~pe1lbT9Q1>Cy&i%CI<+DXfTEuw^H6ECt-a;0vqy2>dYvo)Ywqqv?VU%wAC(=Yp# z?U@csusad5S?tUBT9Iu1%2yB8w9!B~@A;lOO2>f8Fed)IK}%Coox}maiV*?nbx%|n zh>jFH1K~@d4Y=sYmtatd(j-nxBLDTW7XI0f;})jzwXGjc&SubO*0IT7oP;TSABC~J zl;ZBvWQ}1&oD*Qw#tjV0aQ{jYUFrhN{-`KF-Ak0bu6Ghvml-?EzWIX~n?%oRL^PcZ zm6d1-s6H-qV&`_V-bFaK?(7NGyEsJ-^?KSBtC-#j@>hYA!qpJYsi;`78}PpL_3m%o zq0W)*cMLfu+o$a==GS{y2P=)8dXgGPmQZuAk&W$zcd#gs$?N5-`c?LML>Oyr%^oz& z5&NHEARA{c9GeHL6)Q0ZMc!B>b!`WQ(T()edVv!!O}hf3;o;R;+ez#VEiOL|46nYW zcmBz6xUBqit>hw~3!F5n2Y&?VBTP~(UP>6Muv$iL-nSfw_TgtvLL5i%#fKnw9h*H; z%zSDMX=c;;3$)C3GZtNcIEqqLAb-hYPgLF)_)sow2`%^xGQCHtTEwyQo)zx%BujBU zx7b)ha+ZG<_W{&=$nLL#AsBgUd}rYajB)AvZ5KApEONyK=&;#qG58{0O3?W)mK^8L zTb%NtYA&MGAnQQZ-}FUC&YWj5&T3qoK&HhRIc#8uW|@$#WYX91l6VyYBVWg5d;)ZC z)2a24KVn#LQi%T07Gz#;q-URMf{9w*TLLYEjz%tf85^7fLkJC8o#S^6n*qM%S_eVbIC#r&-OPcpq3lN8S)-<&cy0pTfmVv0+e zXr#>~U^Qvm|HU?D;Ose7XRv9S%n*dNC%Jo}xjas-qTB}$;||<;i_z0+Ulb&x+5-Vj3O24*KYQs=v$Oo)ro8WBR7eDt zu-tny=QjkI=S(ZA8-`H2@XxbdJJ;#d@-R38*_2_7&z;MhFz+f$Cf7?Ao~(f1QB@=o z`hRc)Fh{^h9>O`Ixwvywn(;Ccid&v!dm_Sx;m$g+2-kOPXMfHZ548IK=ui`SQaJ(N z4K`#JY0dlBOKkC62AJz2yoApl^}5@T--?e5xV&z0aVA+C8rtYAuNDnQ8E66Crs%C!I zS@C>>_IM}M#?V6p&`ua1WP`lWPbp!|4|PcXK}QWJ8i*U&u4}m&f?i}64tDlX6HW*$ zwO&r;Kk@<(d%nyC|GCru?7+$;kJN%IgX_Xc1`L; z4R-DS9T(LXAB?445sl0Ubs3HHR?WXVJN_u`uajHDOcj^j0Kap};4>8RVlX=%r&mwr zfI`P$XI2SRh=;KWO22~Bh_O}#yvT9X`0UQ1Xa0{_DfQWu zy^fLD(#rj?hEt1%C*+>VA*jLW>6U7m>i`?~qP`51qT|RVvS}R5lnU=odEB8By+W01 zC~sC!2T$u-?2i?6WSQ^jecSIHo9*z704qKJLEk?NZ+wKbjIA3wbxdO;>FVcx-*#`} zMX3l|L{Ajs5R=zFL+tVb-OB*li3po`?8k3nyPre%oEwW5{tazmwUAS=?gPovi0%Oezf=pSv$H#vF#wV}U_f z+E5@11Xw=$dHU(rX<{uo0$J{LP>nJWW^I8rvfaAm6yY}CeY-}(@?}O(9qMLgrTzGAOEo_&)ef0mSt~tG91f>nTEDXH(Fu(w3}Dl`K96b6S$+8H=CG% zvUp9p)OrU)7#0>f2#nct9l>|R#=^Xl$L!4dJH5u#J77!z3+r}ahK1$MhVFu3e#6Ky z{QmFBzYdrh%nr+c9sXaS+ZGu1|F8K!{NjL6(vfY`ZVpPdFS9%WW%kf3mmSgT-x`L3 zG3kD--X4!WDEfJ8X|bAIUDj=X{(;mucK4f(u`H#ABr(iJMsbUKoKi!M%V@PQsb3&8;Vt{bl;l($tUnqi_8(rId=pU1P&4ZoIqoPb5X;4d$!c zL{8yE@cUFv7}((nU&mY7uAr`mWSy;J#83GQ*Mn{(eoQ5Cc4~mB zMYv5DYGnE@HT4&!!&@&lGyLWpFZYKhjh21TjIPHk-*0O>+Ec8_j7l5+KI`}jTj&g* zJ8r!0-K+ZB=`-$EpLN2h8G4O(o_F)5bxXohH$ZcCXPXiDM_Ek^&_?41wtHHPY}QvF(tL%jYSIwRfYzj|SW=#e-e-r0d=1$PX*a%+ zxjTmIhCGmj2vV#p2I7q04Q&4#`grce3Y%Dz;QdRI;}idcYVVI#lJN{g_bwSX$IM#= z;Tc@XpAQ;vct@RJpB3?6L;B0^lnB1as&}q5?xgo#AAiv1SiRI$36zMfmk=*@dpC$O zBxK$s+*k~ro0yh~!lhol&q(;g^ln;gA7AkqV{SuvE4`0!G!+RyPo?nYjc&3f~+_gq&BNNmcXRI1S-to!(Th)qevYIBiRZ$g}l> z7I$UE?*LXkmfOEM3aIzLbmsQ}wYi`6_Px(^UpDsYtTV!#Xu_3KO0+g# zLGas`y@ToJuh|KsN7=llSA{;QCHr8*&ptF01~ZNooeUFWsC(Vp5IOu)Ft6WAoat#p z0CWcn5YVz;;eE_x>@YCbDq^}yao)sgeINdl9Tf1qG$w=o(?$;(yxx>5xfG>9P)DKs z4!Gi@5s8`61a#O|&0erm4O!g-xtjm>9ofFtu_x0fKVkPFdf4u0>arAM@vsiV(x3~F z-0;B}2i1l9c7$E5v^U4|k@d&>aAh*4O3i_OD4*^4C@>PZA-i2~v`MwO;YE8)nW(0S zk{*34;ATJ`@NJXo#&Bz%x3^`NgA z)#ou*Y4#C>xyzJVt(J#SrxiHsSfhdt&tAP*by(`lopjf_D=-WAtLbgbdrEv4OLtx% zck;2>(+WD3d#FMfK=8Onw=YsX^vK+noQaEsoLVfiseHA*0%B*?7%oOaJ+K|K%CZS_ z}e*4y6;1S zy!KzHye+fUCQ&rMu)zb{Ie+%zLpMJ@!?DyTpW!e>Oj#;rZtbnwyq|>Qn9OV3`E1a3*|q8(>1Vq94f@ezIdh$yxQ(<| z5Wyd#h-u79Kx#-ZK5X|;AiNwGq$$8ONCEp8V3fPfRuX;#Dwi^m=l0N9!fZ~9aCLYt z_Cg$%>ghks52Bn{)9|@;tOfRQQR=C8XJ0|~aSTi3x=y9lc^(RoIjp})GEaT^p2G2^ zd2soJfC~G}@xg|+9A{5;Ej}r6{`BP)^?sY<1{7wlB`8cD+fIl#fUrDgnze28V>Uud zLns_0nt*@Vy>)(&XF7-NV3hMI0cCPkl?;1}+D==^Bf#b9^kbSo|tk7(GY& zE^^-71S zFcT9i`NtOFAHNRNtcGu?Tn61gar7tOOaGiMW^Zh#yzy;n zG~6F*_23rhp>%ecnYuF7+{S=BOow8X(3Pp-Q&=rmf^82&osDo|J#f_*dk=*RZV$nQ zTXH*2Fht;?PHe&b_vulAVJ$MrQ9F+u;<4qPIkG(OLohFzSm&co59=r`NhG`)N}emv zM{=`BV*%x#I{PyR)DYYmn0TcEi2h~Uj5bLvdSi9~)&f2mEocQ+$RM6O@htag4d%1J zss$u6Z9fIwZ^M6S16KN=(pLFX+O5ymMHW5n|S&XIIcpjYyE06~soIe%hwS=bg!3}WJcq>wU! z!};_zxdxO|v1Ns{g(UICKbbu3wA7tO?P^-%!Gv|tEQBo~GIyB_`FW12n3D84pM;a~ zjZ35A-}0x}!t_!VSjzyfSFdqbap~eW6wD{`Yd)9_*WX6i0eKdrWl^d#Dwh*E-8f;pTyb_otPuc zo`%W6RUq(PZ>rK}Sd?&(V>^6Vhp1ku5+mw@vYrS$i)v~T|-CDUqG z$y#b7ZP(!&tZ`^&gEE}RP}+Mtp^TFH!~KPyuFya>h8t!tja`?z#iAk6zb{2yVl-Gu za@g}PQ9V?N9`U6rNHEXcU67sA6O>UwpA}NEkr?^WB zmStY*d0$(ReuAlTl)7-YL#`yT(A}#IA2X^HWEg&Gyh)rm={7v^*X?(8>X%dRShhfN z)nW42LsUg4X|?gxb-adP)}^uDXnKJ8@EAQ{X<`}$Nu0D9`#rqK*v0r}MaO_~=y|XF zp{0p~s&5O!4NewW8)EjyM|RNMuZwylR1dz*QFr3)N0Bjv@*UaNgw>At=^GvWM#>tM z@bskazFCQH)wZ4$y(R*SiCxqAyhiyk$~)O}3W;5w4=uw=?PqgG9&ft(omEh`S$=fr z5C*j&&^C=3%4sEK1du&<&dYwJf>&+gZ>#={3{U>iCe~hL+|DeE(BBQgJ1|4>zn6#( zyWutUdW0Ft@~GfFqATLr)}s90^JFaG9p=YhEwty3s`dqMtW!KTM?<;cPt~?d6D}Hyl|H<@68B1Noj8&RW!1RE;+`{F)qaM8G-UE9yOyKMW z0#nfFzXM{2#|T@tA^eV&OhlV~>2(4Qm12!YcYqufAcBRh zc&^Q$J)AI?0L9|PlH;cOsksI3^dd>2bk*#MKibzQ>!`Y{yL|#Opwu7CO*|Hpsh%D2 zNW}Y;Z%?gotZSr|1^-r5-eA&M7e4fq$oIt1vFyc0x&isNA_71Z2|okE-6KzuXVT`e zV3}*qYozr81`~uW*oX8|o%LKW=;p9zo@vSXJtsIx$Acaa3g^7;%v@pe7*lRA zI5<%|pw8U$<)J;gQPc^G74hR{ir+n+y~pHFo~RazPnx%!a0hNEefp<3cAOciuYybI zu--td;jn&;>=}bVcGRxUKKCY}ywNTip&gKg#K{8NB zdM#8bF>IMX0v_-jT&Y1ri-PWte?1b}9t-+0A+I=r3;PNyAHjF95}9_Cuxw|r6LOL3 z<4pST4j;7tmU4EStg~gtAo?L-ad4)~tjr0zijQoNEqMnZwx2>K`UK}>W_T4&Y^bie z!GWrfM)uC_;_2P`w!~T%;Ni8Ph}=T!B1sPXDLQPlx~$?F#c}?V-?|>1(&$9Qk&WaeVF;8M zMs z%2&sGs?2I1?i(L#^|2uOOCc{7IklD9{j?3U0d3a0#XbV{axVX zqZPHo>rd6cAW6oYg^M-*VLZfPe;8m(xjIhcaWWrk(q*2$d&13P%jcZUMoPKDO@9x= zNh?n@AIm|3_qb8r8bfDz5;+?37^M4iFnT5(fN%#nYnBwju%w!ugp;JYzaf=F60pDx z4b>UwXZLERlKFEU2YoKeCV!WwkcMp1a~>({CIfy+{<^FX0z0m0jW{i6KE-66-xzXO znE!=>oL24SwIg*F#&~c*LsgE0B=;{FKx@9U#b_EN5lk1oZ)X4zCnrxp`v|2Tn?}NqlGY&0 z1RhRwqari&VQ(9r8ik9p5av!#6e6kUq6_lhHwW~Dqn6h*q1hTcp&93}5j6iLYz}>* z5AH_4jRu>6uaCm(i%URT367G4q(iIH4RgqrE;jun||V{V_frCu6Ua;WYQJ9ErP@G@g9Mpq&d^~XF8kHWv~-h*`8$W+=13^ zTU3xlZp|Tn=`HqGhXH6m>x+pDnzMk;PJAp?l1BJDE$$1*0}2QOcz#TD-|c71=I}i{ zyO)0+fZrQO*z#~C&$O&;E}C?)a3Cd5sA(M5#HllEakdxNYoSn!v(1#i1P2|+V`6~s zt7X&YoU2}7l>*iQ9vsCBP1p}w><8)r8PRN*&HnaXSaE;ZGlWXaR^>A7@83kA^7@Mb zYMnc|hZ0ZmUeia_>#~E?{s%=%Y$Z<8Gu1eogE9@N zj7>oDBy`K1tzmjU;_M?u2EwU)ZTqC;hJ%eTW3lMH{SR*bB|0fR3!;}hrJ18TDu4k0Lc?eaW*$Ls^N>!i z5b|g6vq2}_K=MlYM<+}W*EJ^s6I-h*OiXt@z`WD%MXmNIupiVrbZk7heyq+JUS_`R z6f!C0z_}fbxCqhK!r$Z24dU)2h`|F!O1-jn-}^)^Iuybz0u$|9e^XSdfyZC!l+8yZ zO&+gOSfyuPiS$>!l}&(qRN|p15LvrM{*%tNM01wksRDie*j_Ydu9P2|)_9jF^PFe> zKKChDtMP0-pu05Vv2r61nWTkH&F%&@C214dUtkkD{0g6+)p_~YRsQEIZu};tvFYZQ zacFJbvX1^|dPEn%B0Pn5p*pZxxz4i8Q*E_i+CJ>}j}WySIwA?vB2C-rE5EfaaX6g= z!NmQy9xAlyEKA_wfYTD^hMx;asRSR)k;ATp z@-0stwN@HIf%YTAmr=_LLU!+2r%f6c-2G;X7*~31C(Jc@&pvEM{j}{K*tT?tAm6v4 z&2hdajQWY(EZn7gkz?F?8u^qH6r=O=TetnkW8ZAafVGn=k>kGM%kgMm8$vpc3fSMG zR`Fd_r1;I>>p$|rVe)JQSBhb!%SR5sT^&qN)x_{8(Z-fm5T zM!(TdKMLKHG>6O{>Gv)zS;r5rCd#@$exCWs!s8V--Iyr#!nT!&|6^lR8uN$e4|6^_ z1`&W`xIfbXL?BH7FMzy6w0~f`eE6PLiRU>%diJw$eyy9k;$QIrPQ$12rP`V%Q#*2z zd^w3zZ{&7io~g1S&pk!N6iedDR$sLB`&MI*m`3VA_+S{`Bw)Fnh?Rwr)TBwTO^AnI z9Mosw&EGypM3Jnu+@$ME?#gN?ttCX9C52@{fFGMLq%ox9c_WgHm3a&Hq~*(Ub=)}8 za9hG^Ch-45q@a9~HE6Nz(J5;a*8d>8_%7@Zj-tZn8844?SKg5l7IIWibAkl|Djr=> znG2nelUrQ~`^z9eX(d;n^^nv~0HDw8;Hud9(1^v}DBI5(J|XZ1%Yf%xpprZC5kLg* zBz_#;vO#yI4*nJ~aXxCk7P-r#C}(G7*M`ql<5aji=G*wqBQ$9eldSmgp-NNoy=R29 ztf>l&0|kBxsi%ns$`)^eA!&G0!_}lUs=V8eJ!DK+#>8{*d3Ji0PX!)clifE{{L>P+hFw;`-PO z+zshJ(eE58@6aq#9Y5-ISU=o!NW*1vCbv}THsJR(CNI&HTMH6orF4dQ{;EL?UutLt zxuz){%SmbiJnFzkA+*Ec{%&V~xafcjz7uUis6&Uxz@0oXjh*Ya>G(N>QGMkkOR^iR z`XwcGwtQKRL?8zH2OvHo1EaW(-OA_o8m9NNp8MGdry_*+ae=V~70NA{faz@4?`d2e z)AHZ9jRG)^SmX|APThSEEd8gmyquyh?~uxBO74^p)71I0J>V`U!Zw0oNeuHR6jBT& zPGSQj&o}u$uXSydteF+H2X>br%zwNw^-!DgN4N;}$!6dJ+WAH$d#B=+)jThqg4VC6 zgI-P8-AkW1nfy-q3~m&Z05{}pOltA~gc-1*Q9$k=oVM^8pEkt{RBpqo7|GLn-NjEH zKkcy3omcomC-?z5RQ!LOh5D0+@KPwv4#>rBlT=>|b|CE4W92629)9;-X_pqFZ$Hp2 zfO%GBOsIKUO!vFBpJM`|&=ITylcCLH@c7|r82q*L!#98^Zb)A=bBATMg;-SPB+ng< zNrhh&s1P3Jyuq;fh6+ZYtGLebKUs$QCv=e;#yME&KE`=!@TN0mk$0}mvVba@29I*{ zesskcI@i{NhE&cJXHd2r>S=4+Or!V;`E3@Bq`$Z$y~6rUslz2P;qVeBI19C1#c9KR zx7J=C{dQa1fo?fuGQa!QRk=59_FSJGAM<--!wLt4eNTcdwt@@R_6s>IxfqM5-N)Ne zW9}-Wh0bs?ByF3#tfaO%YOmJ=5MjR}u!Bue55S#L0uuDz3rZ{>Lf(HhB0~#w5qGqH z;Ne*=GZAX2!t8ohw4K-vIY7QgA?H za+Cucjf`4hf0V%2=+P|m!6>d58h(3|z(b4FyD4juZzE=hS1EEM13U@1s0qYT5sl9= z8F)I@#ZeUArvHhJr_oWODVim*sjKZ?f9gndZhB(E?+tob3MKd8Tf+^YlvcKJI*ovmQS$9B2deRfVszUlD1Pwa zKIyLCoA5$Y5Mi`woak~GZwhK}MEjNUoxU7c7$L!Q6eoq z8Xwy041J*Q4DY?{#P@hgyHB#iZEHpT@TtSM=oy?EmOWop*(BFYWyt#lb$h6<4y2h1 zfIswHR(&Pu{AoMEy2`N3)GGbKXKT#;blXmRwcZ_X*SX}kge^#9XI_%yRdGcY;tpcALQ@dm5L7#>GUs@##$N3BhNC%V|}^ws8@=tZJfcDpg_lxgF7LArzQ z{r&w%oOv20>;3a-_}K0g1N&GhTD^7S6nf zS=Xg~GfN)vwQ1(SC7qbm*^O(+{T;aq4>6tX?v>ONwhyE-fjstXA{RyCY5 z>Tqsqu?^?a$j?ODq`{2qMFf|I`#CDy{UHZY8PT0ez9An~X9R=SwtR z4>6PXJ~eZUaIY6cZYd#pnR;bK58{2tylfyO8q8XU*hg6TmHQ}Zql{{E$S>XXuTeop z$20`4EhU_SzYtB*%T=X>9sZG0ac8noGu^5M>;#34HS5`qJHKfGN?>Umk6Nxm7XDJ+EBDTc|E zh=DI9)>X_BG$B#xe@4IlnRjV}mKcdUx=rwTq;ziP+I=sm9GUdqEohBl-r)(z9XNJu zoM7}TMl|EZuN4z)zqeovSv#2e_o#}r!e?2ziCU=RqTACVB%9ZNjbZ8EliA|$U86AF z^@rx#?b@BY=`zPgs}DoI^LO5T0@`zG^fa+P+s3~;>dzYkY!m;H%!RX);LseXJfLPE zxgFDJA(GZMlkSB**DopXjm3jbvG}!s4PP%|e^~jZGirBu^R&_d2g_~q*SfNUkmH+q zuqgVr-z*b%C9~lZEUXs%ajsWG8xOE((6s+dX65<&5#`4_`bYTd`D?AY~i@-+bZM}<|vErBu?74db#QS!0%5$uIsiF$-iKjD`CGh z43G4&tpzmHJxA-)T;8 zvD${|;bBf{fA*16GnDBudJ2H;a3A7L^~yqD_a!Kj>uXuSKrPnBegnx|qz=s0c;fa@gfoBxk!*BLYof zI}9}Nj#R2&56T%9vkyk4*rimWNK@VN^TMtY^BU3j8d@n5;1)PW(uAbvd@hMBdC44p zM=2&pTk%JGIg1KpHA9Mn$jC$?#*BVjR_ZyBOVSDZ=stg5lev&y|Dtng?6Xw!9SPxC zVPR_gkzdrP-#-!O;HJx9&C5l4Xf>j0T1d@v{#j(j^>_#)iR!E`lzzx@v z4rfc(*QGy|>GB20f3yq7I=g*^ib>JTg<7SjO3g?zA47F|qPQClc14v+KE=H$jmL!~ zEh=0jlJINE2M)#erX->nmsR7RR3?t{aEyvKmgLTgrxxXEoo5u~_KNE>7^r6YoHXZQ zYAcG6(ZaKAZ_v8$(ZU9_5f%xPec1SVK;cfNtpsOikg0L?JJ+j5>-b!%IHEnFeoguA z!s+m^)dZPE3*$&Njb*|3j*!eUO$`mPmpZlClKqg9`rgQt*QuGyKhyI!hFNehDuUU3 zN;)+CZgzE_&8R(Zqt|!lPldq?-?tp25*OKrrZ?9}0n03JV#wW_FxXhb0m8{Z|9hq) zz--{nJ{3r;vnn3nR2q_h!CP=Q{G-Zr9Jr}e%Y^h~%sJo6h!d}v8YwWvI8&wLao)9H zMV~p@x{RjinL%)GDr2@!@cgp#fMn5^lVp#$&5@xhcHV;D6VE3^2C9~ttg7G1vI+M5 z;9_mgGtrVSJE|yRQ4!CDD#~XaeaaJ6i}xRDs>ew61LLyh#D^?i*V`GEXuK7h$0qx> zhHtXZ7f}&!DfGfur; z@YvSHCO9_1GCwm_2|II=`^+1bv1np9#7+sMEWE8gl~=3tG65flr|Z}bUsxNI%(TAy z8)D_tEgmVHft40%Jj|%d2u=b6k?(`Fzu* zN&m1ffvmIm8x+LrKZSjZ^GGXAVVOC*{;E@? z!SB@k6y8YJsJmayEpsfE(^R|54&g|#<&rq50jzTS5DfR^4e@7geFB!~BMAQUhiV_1 z4LMY!G`8nof^^GfGe3XLY8-E1T&Uy<`F!upxp7qs$V|B$oY$K>WpGvZbn4@ zJ0y#{vg(W*#=&9E*!fc?B*0uvy`n=5)jlD)1QBksK!6~bp(4dT1q2@?@1a>qMWV*T zH7Z{b`_ZtjvtrrDrD`J zs>~GF00;C%rkbjEDy+-J+I6>b70(?LfNn2n4;wh`oG7blTLV_Yay2C#qNiSLrv%&q* zGjvVkp8xzamS`1viXEFwBrdJT##GhvD83`p{TXFN`A5+TKZIj5tIu*64OAyRK4HEa zIgiH01XGMI@OuIAp*!DD5PJ7Uhi#VW=zJn6Q$`;VBH_L=(_Z6hsg4(~+{NIPMIsEj zvl=7SnV%cvLTdMyh)Dl8s2~JUE*f|Jxr7(N@?gXKX6?mYLPq7cM1}^(RRZxGqu~Zq zdqhMTF9J4aoUkX(Gclp+_QLurAcX1dpTKOEVaevl*(r0RU@!b5ok=D=otmDQ=U$oUZxYMx$g8 z6j~mUO@|1B>*R+f`vpZYL}GNC5>g@e(}D(R!Fo{s%Wt4!U7}KdFro(FPCjc-%scJM zPPWt2G_4y55a_on>MB3_dyeTDORnwG~;`Lw_6i%E5g|6S!A;;tY z6WwFTHJ4qx(tm&X!)7oV64}S`q^1+BSfzd~@;FR1Qfj#!s7{QlQjOaV;ovqOjJc>Y zwIaxqWkBY%%?V~J<#`w*YvT=3Ivk4qtOHpu-pDI+Jclb>BuNM>OsEbfW|fb!yCuRZ zNi-&`8S}jx#Ouv&2)=LM}vEo2gOpC}@Cov=RLeQPt+Yhgy1Ua@9gk`i6FROZD zln(svHe9X$A-&2uG~H#CU`wj&i7|$H^F%<$|5RiN`r2G?^j7=){v5y3&SRtiT(TmA zMo#Z%SUZI|-7a*3FJam@EX}9Y%zeb_zAud8U+C^oOsH<621-;9Lk9YX2)U(^=YRfS zyuBueqTRD?HD7Yfe-+`d$CUoYLKMip@H96RANWl_QNUkIJB z&dT}kDLnBF+H-Y85W4XEuK+`rew(I-jfQp-%^n{A&o^>1$A??&=*)gz9vJ0D9 zXf_YZBEeLTCVnWRcg4oM*!6v{c2b(`FC~0;Yw^is>>yOF zAR?2CO*#Lgm=S$)bDdvkxA)@}A@k@)`}ftfuYV*DcAHQtU6c}Kd!PuhFo30t7tL(_ zPF87_&0ij%Ly!3MitRHydY9_<@S1d6NIpP4jYc~0e)kYY%`P_pr=VTN0dzyH`8 ze(eX^b$ljqJ+^~ltFY+qRTtE@B4Yb_xJstf6mO= zd+oK?E@z*zueC1p&h)OG+r0;~vca)FctXcujQuGD+fl=e7=%P$c274q;PGQ_?9Xec zTK^cRMd&YvV0u3Pw{Kd7p_n}FZ!@JZ(DwB7dO%S?;{&s|zIY-`q#(v`99nGNdKT8? zn?yJD+ID~KcWK4Vxvy0?HyW9T3P{toNH!-~lrfwi!y*8#p6qpQTaJExc3$fGd(`Ia zx=6%1WfLvN&+sKB*mu!H1A7u_mLI*^UZB1@Z<18i-)-5W4 zuN(;Z`N;$(!)f&sHB*gPNIjE2UOTIzVG$bhA62vxoOq;x}kvX|AWD#)Q4IACF%`?mg3ZoZ#PmtCuQ+ z?l)JAKlIq)y@VNLr8#Yj<%)}8#g#63G-Xw164RtaEbyx8ClTQpF*5#ve9aIK3rcd5@%8iKaJaB{ z;|szif=t)zS@TGzKyPH2{Slvfk1KjqR^ka06&Y+EdDrxe@X<`Ve1<&p;}0>egue)p ziy$237Zt7T9El?r+IQvdXTs%tcU}^K9rfeIKVr?FLn0-CFH({D`tlE;vs4)VC}jfj zh>Zz&_!PZGig1KOXUV}g6D2=S}7T>U#YqX2#)KdV_WvU`+~Wy9PU=PKBNz2~ z#Zp2&MZdZkI+cS7bL&45wwr*YY?5@_cV{GmTfJDXo*vxT`VYeFZwGJzwZD26PD}W(b{uhMyPaob`P}SDd;L7h4YPh}uRDde@^*glWuuqe2{cYD@ zGaUVH6@Omzyo%}l<*35{Z}$;0iuL7B$~D68q)$J99$II@8OR9r{ncq51}A?IgYex- zRe_EMl5QNz{-@~gBI&Ddyu)67_}?hJ2c8}?|4%*tw^4uP{u3a=D7Hn?2!zcOFj)Ut zjFv&(4P4sV?*5!{s7Xl1mCSp5qnn#7Cql5Te7QFyayPfn>@4n=th8k&yk&tY@#E(Q z7a6|9U=7w)6J#~1=8~4Swy;yeJ=yBZC*BOOD(8#S7%vLU$Gax66Kcda#|Q0?YHcA+ zPdQD((d+X?OyY^SPl<4b?Ur}lgM))hzns`l?Q6~wTxzF#ja@=rwlZo6+t1PwX@c?Y z0OfXeTo{AHssvx8iz->3!fVizNc-aXmeK2ZZ<*aRC!fu$rhU4ToIlJdq44p!(xJJ; z4anLzmq7o^D;SxtR1z4LR)L9E0j{qA7sVE8`S5I|jD)S1-2p!X2%9lOBF$f{jn4|R zXUD%SsQRp@j2~h1UJgkidhK5CymQ=Dq6GIY?QKiD`z94cH8jMo#7JP4fP81#4bJNE$ zU)sQ$n=BAG&3X?ks-`XJ;XI#JOA)w1Yqnt(j8D*#IdqATQI~612-El}EHoV578|dU zG3&Z|fB9$Ss6A>{&th|e2wajewYzp<$;L3Y*6x915zw;s>A`X&YtTt-T+yT{P8iW# zxASG1ZO00NJ$>G)3!U^FcIhwc-8p3g{n_px-6ny#%IxUb8%XZ+CT2k;`PW*)GG9|; zGSCv)N!s*;#}b7c!IMSAGX=K>nYJ~1)J%ChqEX3pAYCz|DWXi1+(C{*e=)2uUcqhF zcl8#`Tx7k_4?~=x5clo&-C`)Or3y?hxhi2)r}J*@Yxpn=V4~$wW#;Z% z38KmB@;}@D!s||JKxJ12Gua()mA<;={}JbADEy4NMf!@;*;l)bOq$v(h1Je{Gb*r* z6Z5Y;=k(+XCS=9B;2}QQvE!R-(?BDN?U%{$Z4W}EpLbzCPXvtJcJ?lftZ)H1B3tHd{t-O+ZRegfs}FFO zb3w+?r?Bg~*F}mKbm)L)wBy5En)yUK$o?Q>`L9xtIg6`;PCd6HkY05ah9z3iW4*SF zkT!kNB&Rz|l4LT{&F!q^wH!^;q72nChGxe6D0^0zFQm4q6052RFog`9NYG&_gVAPU z1!gs!g>r=5Q@b!c7uY>b&Ww5%vW!1J=eu-XIKVtR09nIkeCp2IUH3kOq8+zZYoBMQ zYWzXny(KcdP|^O|)FE&lU+8Wmlb=VxDYAplW@)@q(iLy=bV4ikUdQqd990Ptu)tvp z2X-^Qmu*wx!Zbp>c{Tc3nS=IQX3zJR^cOo9KwGRaC8(tOTY8%^Q*tF?R!4LAR!0s5Ru@04ANsK;T(28YC z!Y%x0fzJ3og_ZAaOdiPUSHCDx@JZn+jzS`Jimjg_H(b1yg%6s)7Mn3qZSdtd;7Fyl zHUQOgB;&`Q46p$?Lxy1=@cz2$KnaF=Zc0E4+WaiO{za(nx{F<*3{oJ)5?V3xC#y$c z`}5UAbR*uVz-dDT|HPdlwe(SeuCgFYyf3ns5qm;9G9L+%uB~OyucBK{R494<_)QSf zH_}!Uy6P{xU29u0(g8>fN`+}qG5QSU4(_@TAvCtq9)fj9feEJtJwvXxFm99Exs>XO z*B2Vdb*m&>m>c4+TYCM;KtWjoLg8C)GB_Gf8T{|-V*_eAQgqYF9dd6QICF|bSr_K4 zc};Nuais+33dysn7hqGJ-=nMWXQW^jk6|Wc@7nR&*y?r-X%PAHu2ayS<6jp#uF0oV$LwvcO1QP*i>P@{~ihv3GIgLp7(9dDLT* z#7FNQ`>Y(tj1%r$eGasqFclv3AJBn1nm5H0cCR&WS}O#E6;7dkZ0){LaHuM;)A$%R zwj5c-wbHS{QC{$nfnF9Ay*mE+&ub4Y;K#~LG*WDJN816K5Im9$jYk3D%B2BcT;J+4-mZQprl z%lnSyl(f;iaIFt|#&vNE;xOgwRn=GTuhOMG!sM-2lkd1;DwziVs&co&v6mJRT~!?sxCGiHz zA;D0sPax4XX>WAzl1wnnMFJD3kH{^mvU}I$UUyi8(`eZAPH^Q@fswQh_~x%aduwW3 zP8|)fnHCt9YbBXj4L^_8A4$bQT8oReJDex3TwgFN^MPSA(@VYZYIZvLA4wJbnR+Le z1l?5o2(#eBysGN{%Z%)M`x}vAyL{o4)G!$W%*a^Ucudq)~}%Ut29nq0sV<_=$Vao z<2352Z$4;&2^+;eBtLZ{jYF1;WZ74C%p^)Y>(n!{#98d{N3>_)ZIo=lJ0@*VSFfD zL@OqRJPyiHuc*4izIH}|iTVgBP+jefq6C-zgbjZAWFn$-t2oz#5)ra1l!{9cLd#0{^m@)6a?-0c$=C*g zQgIVsBLx<>q*i$9wBecPvy!Ub(qObo^!qEi66;rClg}r+pWRhZ!0Tlhpz76HX5j}< z`qXW}g7y2IpQ(TY`XSlr;{Ge$w6HZb0VM@yjW_Z9kma4zI>qHN9MQwaF>jo@oyr`T z^{+iyZr#uW=C;y^5hN%!+WOcuRFeB1V7(LW67;wgU$FAj z!xl-sdVY!SdRK9PEND>mac4aZ4vd9nPUV?af3O}9f7elJ*c=}W&+io-8rb3y@3u^P zJ(62_e&*mjugYfgUB913q6P@KcS4h^Yfgem3^@6di9Au=({KDPrj$Z!9vx`6sp+YN zN`fnz62mNDAS+l$#3NoVN*&~5rl?Ut#=7`EtmP!=tlx+iMmOJq!;Z_@)6VND)5qLY ze`KOymN@Fh#*6Z0+DZ^Gr#53A7ZM07?ie_rhq7#zef)KJb)lL*1Z4VIkG1O7+6HlJ zY$(V@#Z%Fq~c^M4!Lx zirf=pb51UZf=Mfv0g@l#xQ_WKI+a)O-9YB%Ex>MjMih}A3iX`LJyWd|8MHVy)QG;j zSzqdPx0s>z%;CEsc92{D%T?IN{j=Nb?q_im=M7CXuu{31<>QwFWA{-RVRA2?B2EXO z#c~jlEfP!t&B}-t7x$Vzm(?GY&tZ2ME`o_^$k|@C$NYF1A!gM=byu2$i%9V?vMvy@0$Y7P2(#a;{JTRTP-h1dL5 zYaa|nKK1=QrqnX6(O@19Wt1!|>Lvjc=f|Q2*BOYwK{%p$JlV@&c=wzt5oWFx%bS3A zqJFw@IA~YvDMe1zDV_17Ne+IwGmzE%9<#D_yd@Nbd-OHZ5XDixG`mOy16ja?N|4wK zX2M0=8mXSJ^JeR)Y~#h(0%MVzDqqB%{8_%P-JRFl?W+4*S_lA@ELTYS|MZiavr56rnSS0Zk%sMlT}8RAr?I_4k|zMugc3 z!u4^tm>HIHq!ONh{t*nI0X7-=jtt&y{eLBi!G3#(S2N*R#4v)veTbBnFzvVDCgmitoCUpwjCtZ}9z(EZvF?m8WkoVNvfFCg=ONybmI3sV-f|C0xqK zYmsclrutul2FqrUiskuw#zP+iIOGTexAE7`zjdyuXxm!9kxTtzJs8M~Bw6{lQA99K zllmzq++ZZ+OO+Ba;9Mh+h^ZaT)b@5oBgm?cv{Mgy!y{C5BR)a_UkjM{mZ5F$^w-4T z@yxGwV;`!d*OKm3;Kyt?eZ-{I$sv=@pzo@#S~{6~Gc%zlat3~=Y=LqU3@Wpa?aW(IgqzhHq>Jl& z__7o7W_Z*M#zN4)nzuhHJo4;C-BoJY(D|zzaQS&4E1fgo@bFr;vJ*D>SNvhU)ACH; z(FS?4Vp4JrPHuuwY2|LoN!fP9;AP^foiD{`x2X>13o;gfpuh2FgLj+>9VFJjNe;%{ zi)SgzMBSY|Tn5)DCdJC+*-8sI8+8#REMPnSrf6 zWm&&h$17b?&I;Y1Wt#>ijwvrwtj6vOwSGW_w|REq6sInYNcoKnCRStGjwkwFh1W^ipct?UjA6VzH%XX9C6HA-kP}j62p=ed6w+716OAn|C20@ zoWWZlHnDOCOr4LEz^TDyWPxdQsngEaBVM~$nMK?_VKQ|6m0uNqlY(aO{w3Zo`u5AS z$&gmS!uby6YO`C-f%ErF!b zpT_&_gFtUDMC(d;#9I?4woQ)fk=RK8`1&H>_&5_54V{M&B zL@NhhrZ49)#98DwXr=QzzqN#ASXjUg_xqeow=D_VOwc<%crg!O5>&-h>7(=v_OCUT z;uDBu@65B>Q0S!DD*mJVEeFEx()))@Y0Ca|7BKcfDaD}<60eX}i%^%E;1<=zr2f7& zf^$K}Q#%S4cf!t;G?OgvS3Ysf_79aw_SGPJiYuXXwvh7>Xj2$WmA6BsiC`ZoPcSq_ zy}8KwcBmj3Ax6mtM>MHeDlTIlpKwLeD8d%Aq2A+g((1FmH8^*;eU{CNN5MDUcP}o@ z70*7CmeF&F;yBA7&-#K16K#jOr7MT!jlJ-5VH?U`y6Y4tvDeb6uhXRAS6{E2=J*D1 zG4oq3XRUkckS%;1X}lWw(liRiJM{_giA<608EcIQq*wG7_cp9=etgL$pP4KD_$2n@ zA8*&)Na^F`#+mn$JzcHnL-mGI?df^;fL)i3(1>s85l+h(2cmAmJ#zX2u==H$WZH^H z)s1v4EFY?+i!SdMA8seE@*++79ifqY!D8jzQ*ht~@*wA>smHMJlYs*4)r<$8aZ#ls z8>hE3u4osQ(d$m>Hga6vJ4UWg-Dak#YAq)ilXiN(bNJkVVp5!tOb{Fc{}KvyzU7~L zwb>JKbDm^3M90 zilU#J0R<*%G<@H`x{l&^+q*t>__Tp{W!)Yd^~-yW@OhLwtt7i(QwX(Oc6K`{R(FnF zkD_=6Y|3fr85a|(iVend?&&zcp(qqpNV=ePA0Gv$3z_Xa8qdy0P1m zh2)TyEcmZ1s zyJy_Rcidy|cX{&3%Z0-1v68|3$t1;ej!J3d^<$HTUM21Ny;!G~CWyfvd9Mqc*;>8y z{Nio>{+!?c2Md6~Kru1SQrm&*O%jvO2GMVMp(Fci3WvQV>`&I4Bxw?7 zl?1yJ&fgD0>mYSRP=O}|48qeJgX>_7CjqJP)Lq(^Q z2wz11CPJZW)&Hasg}_hn9ctV}M1r>fMl`H{w;!q!4(~a=lG6-w?ss}oEZk%Fqlhh zF_T9)4XlbIIU`jeN8F%VC{TmcQLc~u$*lS-1EKBuk*W?fpy(gOh z+e~Z9B7M^dnL!}}dC5^pX!8|Ra~*1RAk%N)=&(YgnD#&7gnzje2pj|d3%9~;aFEYb zM%2(`At4I?ZC)^YHBkM}|NFrJAGO;*@#u@| zxIYF^6h!0b{$V#*PQWq$Zwv}Sf4LI=p-PA&K!%{igd;&2A>rTS|Bpf8|7Bqy`iF~w zz@0z{fKNCQs^kIxuM7(RuMUElxBs38S$&^&Y1;Zzu~{e&buuo&yER>*vUdn7=lCWZ0@kSHkRv(;k1<$pG-l4nGEC=w=83_bsZwPq(AS zwKE5{wzLkkERm<~em9%y-gTc~`RoXGeTq7F!a=+aJ@(tjw0~aYHGG$fws1FZfg;mA z24JsUjI%en2yGBb%kr2N+nJs7ue9#(Rz5jxm%!mSXI?Dv{>>CE`u+VknmyZf>sT)_ z%USGIqr1wj<-fuI=hp?E&NT|QX_~#r1^zN1*d63I&icXpdz-#^sX_me3s92ltNOT= z0TCI5hJ2NWrX2CF{w1|YJHUJHXzl%+qe-4vMLAe)9!aGC% z&@E`(#@s)eGwl7#DDhtSwXg7w&OhcX522@NUMJE2(y?Cn=g&T2$clH}Sg7Kh))Iyb*?XipF>oK|l}2#$wVyR}v%zh6YCSn`o-49f zqqF(_(A0Vj-YU_U7k}dMG(GPXe){HE5oh&PENsQ-J0JZI(-AC|y(|*gvv>pGm#gl< zu)iMYDme5A*h6jO0S*qyYP99=D`%w!xPnpVt({5xKi^3QZcMt}c+>y>x!>=bzQGvU z)#6tjzox5G&X0e6VSLEgU33pI_x{+X%mMU>W&AQl3^RUochD64N8~nUV=W{O{xA?6 zkNyHwh5mvxWeF$X)_d3NU%!-W2)$S#ISaVk0q*+hBhm&d%&U7fPkA?UD9__A(ribu z!Cq+>;m&+IAb60Ja>&Pnv8Cv>lzRC5QkNU-11`!iE3LzAcc&37kto_raZ|P@k zOs}}1opHFaXLONmAY?BTT3s2B8;r=)+#EU7g*%C_rzhJ!OSb*}u(|wq0|_)z3&ain zW*qWNrWKDDQ^6~VMI0t(_uWmx*>*150HXJBBia!>7(uMMn!X!b)?=8E(b%^hSo&&V zJzJJ#YJj2k*(`#f<&Gwd(OoDN$h`o$l;OdsEgjgn=usB1{pHcPd6^h!wy^zTFa@`> z=B=NA=5pZE*5CJr)A8x+1-49=_E8{f)b5} zxW=rQdkA?gTpH`{KY1m2v&B4#zT&q?Ri3nyjrNRLhTpR!65i zqZR9)4ivIcAv`jx-X94$*X<}p$DwU`(-@;l`3$wguOB^z?P1qOy-nU{#=!@$#hIBv z;&k!NQ4WW3qnVfzNxE@1J2#ncm>uA5^;y7shUP+XI`5XW|4TCgCAeky0fYj zwQ>vFR^(u0n9JgFM*eFz&d;k?h2O4d6P~fRqi6n&o+Mbj4_IV7t^HRj(`3b z#_iLm z!0XZI&n3@b5;T520;~9u0Rx!e7Tzd_W!2LhM(5rg!VhJ&Y+&TWyuPkGakE}c;m)O~ zy27f|G=sZ<>AaBn^K!Akb;$XyfC72a?{y(?`vX(L5~Kgk;XDLiRbi`8@RCi$Apk`# zL5)Te@PGIbhOW{${K)s~`Xi<<9o{g3_rsx>Jimy>wOQ~(&z->oziqpn{zW!!XE{3L zOSB7yTa;Dj2j1g&j!UOrN($;C;06EkGYnYg1AD$)7fWmVr3oS!_kvm-0NAD;Gt{=R zuQC71ItRndG+-+z6wIr|?pHa>g6Vv#L!0qj;p!iz9UTk6oIrzQ+5Ac*aKxm@9sWRT z*^{vG*I0Ws@0HP(QIG+bdaH!By4 zd4tC<7uwo334+1lu7Vw~Vm$tQye|S1o49JU5s{U_)GM%6@3wkdRl9pnONIw?nsmry z3)Vs>z`c=s<-N5Q=|y;+DB}=3{!dS7qrX-|5$45!fF$(9uE8G&p+3Q>JcR|yV+B&s zTmh35C;0pkE%FHgHKgQQ+z|$|^W)+y((B#(%fY$H^O_iIVt+2-xJJ)jA~NKVEsx~~ zT(Y zf;d#K+%3Er0`ec7QDW6c{|UqW;Rdd_WB33f7AoCTbeNO|D&c2xm1uh@iJux*!|0CT zu+plBLpnT1Yb5Z^Ly5p^88}-63b`jgW|CCu0wb+-;Fqn}N%aj_l`mDNk6-hNtdRXt zOx)scRz5C-Fbx-MTC17oG3qQaFF(Qoe6MjoFu^#YZ5un-wPF7LwE-k2c`GyxI{Uni zRCt{-=LNZ(-m~%|BEyKor(%YX=F#7Jle)}5&)!Cee>6-3DIQl!0M~!nRZ<<=X;ES( z4C(y?49%}_auUbOqZd}D*hPKQaktQGjW6!s7s=+zcKw>+N(K9AB$M*1c|6ctX+i2U zGz|}Q>q`RE8@pEJQ&jtEzlR8|5o!sW9KMc$Hci z!rF+o)h#vD3BMCF1w?@m4ZQa-j)T8nbScHITQ&oqXlcJ$Bcm*N!n) zD!}|>ip;B#S0un3u&gCYKd-9KTwYyPpyx3(R(&a1oZdfe`+ZYEspTme zQjEgFDH|oiGr3>Z#^y&%ghq2@PP8FSjF1OXlZ&tDuKi~Sm`f`fI^@GchZ2;rt-W{J z#Fmbh0ksy}d<%~N>EC!TrjX=NB@@ozKO>QO|^Zf8cm}E}BtUd#pqT4KI}UqylW^EhejOIWlgG zFD*E5qxJyE0I7Te!Dq9XJ?(Gpor7M3<)W&TgRRT@i;#mn zCJ5sV{#7z<+v$jLmNOd~52&O7k~nr6RrHRXZb(pb;lY}K(hlUhAT zDy`wWD%;9D@hUPtG-!D`q!{g&2W`GzJ6}whRirFE5Pt%Kn6k;>VJ`z^(+oBiSVO^D zB~U67KS7k>Z$x_dydXk16<*&7O7UO3u911KkLpHiW{s@|KiPGs

wkHRc3hym&{xKYR29-@RE-=KytJ3{L)<=EBkJRH#e!u09BwH=p zi&dn-#MrpMWrBSjBxfN(vONU_?eJ2mo-iN;hsYFq6t_fQR+@}u+lp1h_n+=4z5r0y z;tFSh^o9Z{a7A13H*JTj7A`jk1g1DL1Lr@}m=&?3_oB_((V$}a&RRb1Z1UR0bw-YF zq*57Z6z+pFJ(=+QDP<&Z;um_UW`rPdsEB`e`(^n}4`i??w>%uX9 z6$o-2C~kYC_K(TYcfsH9n;%RRn7A=OT&CxF*rknZ{Y)XVhaose8)?>YRa!R~ZN@IQ z@#K@y3h^wyck2R8Ygb3;FYCpC5!ywi^pR`MhQ}CeEZuW>+miH8y{pySDr>Ixp5DW)0-c`lqDRm4Fy@FcG(Hq+LaDuX+e<(z2Lmu~n)8c3Z-pTQs% z?!GNL8~yM#eC!I?1~z#!4EJ+U%2X>%8c6M;`IeL}Z*eH^uBW}GwLAUo>blbR6Tx$N7WcdaFo@0u-*7m*&>r1mj3PTvnC`=OG;lyVN>NQ z1!h082p4}k!LI$%Sx*D{cp&mlOcwNW!}7PjVpoxA%kHiaDT_G3>VW24F&WZIYWSu`aS!kcyXGO9#!a8Aof$ zB)YXJ**^f6QVh0XPq(oCZ)zQWC{4v$-Z*(Te))cCKGC4E?h}`xsbIW0CU+*j`|zic zLN%01@V7UL#N9OWC)Kc6lKGWlE?`D4s8IGpM7yT|1*ROVxZ&YntP_kNzyzF#jORoT0s^JmY&-UGKJnsFy%qC)!yubYH`6dJRJkIF$%;E zZ3G&y<@K)bBimYPY_x6>MZ^ZzVf`e#IbALGXPmpz8C&h@rhuV#+%v>f`}h#Yto_-=W%&2$j8-?YY=#A@}gQhyIYdn%)xE1aK5`jnSt^P*n{*ZBz|w9&!g*@hwo+n z96o?qDnQdyi#P{WG}xV zWUta=SC&aDhjd=1XF(c?jn( zgzu8Yh8bY4B6QyuzguSxv+Bi40I45wllc;=TG>^$fzJaV`uRiBn7xBIX=N1h=AqAW zO`Y2fm>d_-$JW9|e^ByVtAo$v+CuNtP#9^`*{CE~1JImOe74VgO9Xts+as>>qyZyS znygD@Gmnp99-X#!&z-FL@dvkN4_gj{)dwAYYou@ab9vTizHaV$PXK*R%T9Nn*Yv6{ z=?eeZq+b~*-0}w1J1f{q{i{B9yP=DHCLM)HDAeE!=yzUiG#q|i>BW`WGPmKxT zb6`OUdOop%wPi8*#&ACr2a_sqrGjWPQG#>RKDlz2_o0|Y$_0DKbrRq+8F(x&l7U+stvJn$T^Vd@QqQ`7Cgx*5o&iY+Ux=^1cC;WEuhARh zHujtE<84ab42IX=jcmDDt8V$L|3q4?LHgg}4mw_wRhcOaQ13n@qiX z$Bm2|FH(Mqt$j21Bc3s)h8YeeB`Z}c^ zqU+)4IM~iK+VXJYo(OiN^jUAQh}pXD&4iLOha12R2fl?~svR{xq?45(xbZRd-Kb+B zX4UVtb)JSw7los~c>cCPuEQV4J4K+F*eDQ#H>V zNiNwSlkb&*8yxc4TaXE-Pm3quiJ#LXb#zavGJb6YSenuw~wGt6V?c; z=NJa-H&UY5kxW;?UcMqJBrL}`I`=;0b3lb>BAg=LT%v-e3axHKe!Hc;h#B(vr=y&+ zQH3@~3E#rVF9irSc#sx3I)0~ZjWXPwfE^>FjH=Da69A9$Un~@7xgyylE{1*ec2+Zj z(9p$(-Ze9Gv^IH0*(gR;;`0(hLl`?Tr`g1^_Ni?JXm_xHvf?C;Lhi(aKdc(SDyE1X z#nhl{E_popV)04Cp;J(BGD)ug)C>Qq(yAhkdqMZ9fmDyFmqH}Q4)Q;7KR#y}Xda%5 zK0VB>_CvN8gS3tnN1iV8WPB>=#Xe7sP=3pO^&506`O%I3wqiIH=7al}BD4ETv>uOD z#q^-+<_!^{4Hoa8LdbGZ;ThXmO|&Bo)pm-O z8)__yoJVjEJIo%I_4B6TCuQ^xSM+iw!fyzq;H+OLLez)Jguo9)O`*I4*IBuOn@E%3 zo+NA96krcc2z#q}IS0V8(NI6A6VjUZIS+xmQ{n z0^oPI%<=<7Cp(ZZD{U4yEdxx$o~jYfaF959s=ZsQ>AU2h+bW}|*;kh}d1t&7xG&*v zrw>!t_KZGe@#ZFcOR_BnhoaBCH`Eeu!?EBdgPn^D>sqs4{`3)`WSF8fSB&3=gRN^+ zMGj|%NVAnP+bkEB2E)%Wst=2gGnPo4{*?2YF6g-vLN^g^DdJf4oaPrr5RMTJ)4JE8 zMj?oCzbk<0N`mL754}Fxf*YM0<%5FYG`|^?0-p+BY+K=`r_~ge6MySLW`pmYoYK1W z%Qb2wC1uZB7TLYN;-226GCwWyj%X8q0b2~0z2=#3QHji&||Bp6XLlfx8l zoTqsDI4`dt3ossw47i^e58J4=k&o|*z1A$5iDSf8fpUSM!>7(VurwB!#tCvnYi1Al{k zKg4%xWpaSX;7h?v<4HmwVzxuy-ODh3!pfjhi2Oj0$sld_-Rxp#;%eYr%Wr)CM6ikS zMyJMuFbCQNZurx#NVSrsg`Umx_m55|Y+8_3S3i;JbG?%?<9ZSw8U`Oe6uP6A)4|pU z6-n7~8a#d)xs=gx)&~Xa(8r5i$CE!Z6Fqv317G(3p$De&OcgPihzh2Jz>_`Bv0SX*FLF)pK%F6}MNRIdlSkfTSW>Vj^r%gs zrq#jXqvZEHH1)ow{2v~DRj)**O)mLH-u-YFjLUAv4i(UIP(sc@8BJ!sf_TRpakz3T}NYoi+Vl`qnP1bo}~Z|2WQ zSoN!{jHG4-zkl6k)N`Q;)fNb^rrdo;0Rv8dDjKuE0E<>k2@qxQowf&0)%>eo+d zErQ18RSb_>{d8Eef>dG7KJ7ivd$qn6SBX4J>(#k=eBP@*=kl@#jgoDV%#M)zop1Xi zqZe9S{*pVWkT;uY?VzITmx!Ht6@1*8?V5>}wn53-zzS62V-ieF9k4|sZJqjlq z6|{tO9SRpUKzi;OOrxn+!S7$9VE5(^wnK3>$An9w~-Zb$Q`Ms9?nr%<^lyE3dly`afP#LGJd z{t@6Gbmp&{c?FcvCgQ(`jXdKNMYhvKa6*Y_v(NS=WBWWmw)IBGpAKNVFor#tWyeHG zNRzU(grtko*sT6Zlfq-jsGjeo+Tx!d)m!^QVB>+cIB{M_Vi@!lt7sHpVW%T}LXU^E zkUY5{&l#(b4|9vN=3f?E{vCvdn~(*}J3?2(UNw@3RuIu%Yw`2n9dI9!y)|V;Eu9tU z@fG`a{Q|Ug*j(fP`?tB>)FYsZWDzU3*k)t~VB2<%Y}DF4CX>Zds4r zdMFb7bjg=AgHtCQ*`S>`r(Y8QlP~aGr*)IA7{p#oY0xPKSd6>UV(8a?$LUIz{$Pn7 zUt7=P7i$XxIf|AA?sl6VKo<^J^k6zx!SHwLl|=e<$IOxK9;v!dqB2gDZ(Y{pqo_&1 zWdOAhASfd{YfbZlfwJsO*FgiC#(epBOs0+ky0ld47DWoPmN9OFuqzm}W2FKVT$Wu@ z6>r8X-R+0Th;#iRE_oAB$7^mouoyofjB-Ul!%hWFbxbwX?1+Fq0edD)z zjQQ3hocDqQmY|)%zJ#;7tMNGF;E)%p3!i9U))dBAfl5HM4cef%ZwT|(XKFh^ymr3XfZm`f(WA< z{M6BV??ecqj@~#V)b+54`2&iC_;&3>mF&9||*3+oh- z7F+Og)Av9V#8DAHPB&N@M;3;jK-Ryo=zo2r^`du2iVRtcEmx;>z?AkkN-_&6$Q3SVenR+w`sli7R6ED@Z zDngc3o1GIn>^ihG>P7!rRlIt)E)PKBg}}}aXmVgimAy~U_M(i#h;sb(oX;;2hzF|) z`-u1;|IqUnU!i{>OzYJL`#cvL76c$w9&|+QLRdxXYQcASBi(|Bh3YDn!zlLby9)cd zD>DJQo*oc$1hXcWW^4H@e6ctE+HA@>eO8Jrb488{T4ZVMxU5kh0S`WmZHj~d^! zbd*{XQjLSE;FZm1ZsHabC&r1W@`oz_4ZUp;g#*-_ZB>aw6B8%lJRs@tg_WE$8)hHSITayTeF~|7dbiVRB_1rCO)cTjK z+_k0O`WbRfrA02qWE)ubA#r%abD~zr(%0{_<2$Z{8DyFOYbjyMW}1Mc@qBJ~3NX3W z-HEur+ZVQDkd0vJBO*8-1L}Ro2^nRovS{`V3ejWjPEUfUW}Fn?vbQPJZdnL&xp{M> ziZW*jWztX17Ib2f8zD1|ruUV;fB^0Bq0Wc^eL#$A+zUCQ_auha!MQ^VQTc2mlynl{ zI1FmD;PPR%VcE3|8+OQg`pzvQW1gi`^Xsk5e(6D-x+RHf^X0r`?r6+$rz+vtHE|cK z^FjC6j!J6jn4?aKt^?wrpBvEbuUP$Onz-EaC)t!AcWQ7y3H%-Rqh!0c_onye^Idpg zKq5KSr>E`8>SRZEmKgadB?*VnOVpP*iqyD%=cY!)&H48B47Au>Jr*VbvoN!8tOyzk zzptv8N@OyRi(H75=1jYsYmEQO^|p=py&lxy$at;D?GV!8-}~&Hrj^s7qGfD3_Neu5 z8>w?(;*)nU7TER?1lwWfhqICM3_s#P$Z_iQ50)Z$+yZ97GaeUyg(3(06zlk85CmZ! zcf!c+Qu;P6rK%Uc#>1SHnozdh<9^~D%CFZ?oetubsu>4Sr-~LFniB80TA=b9p~*|X zk5-h39zDps{7DNl15kXjrKC8u=D-#SU7*?zDEGsoLW$=ZckbYq{o|W!_j#6GlhVdF zt$rD6(7~yl4&$3SqmIU$etEDj)lMY)1lVwFmO(9a@mk#~fpg~^g4aq??PdZONXaRKlJv?7>KP zEe0?Kth#!J^g*y+#c~ycBI~_upU8ke7ecX???u5OcfmAaZ1AlBQ6`KrZ5U}Wuv{* z(Vh*ZLJFr9&+g~@gdN{?AC0Yyb%9;; z*LQ1^f$fSB%fg}ZNVw!n0lQg_uq|^yC|zaFACR2>(yu*QmN!Qh2xHYxvT$QrPafOt z2%kQixMr-i0GQa!z*EFcNZP! z@Y5n8Q35;a%knPIG>t>TG~D#}*a*=*#HfAxWU$D* z&q^;VUJa9~>FH4%HX_d)5KmZv`vUqycF(~9q~HR%;|!3n?YoUFvA`a?oj<7e6*H2< zdz|K0DM^SBjsjKc=Rc`@N%-vB)x*hlSappy zpHyxEhRreh4MZ`6Furix&i3(Y2%6?}?wtywEL(Q(2oZfmC<8Q8Ja7u8(nk!+$I*o=i`@b4<@H zBV3BE3a3TPvsruSe@>faAn$;(z9z$G z$AvHG|0O>=+`nc2^Th8t*U|%J!NSP*6-2XVpTm%BvNC9SJMXtP(4D=#WK6*dD(~6( ziEd|0@_v_p<{zTm7nFO5LELMLBxIBUAF8j=CVL&x^2-@Gos@um1d=G@sw(qimd70^ zG!2d|f;=-{i%!w%*{r8Yrg;{h8%!6SeaSCgFtY7Ou@g)Pf|j0F{;gzrMV`C@r5{{56DFK$jZak5&; zp>^HH2bJRgS5Hak2EpND??q&aWGxHsacS=Pm1qU(WvqW1@Ynn?qqfzK>Kf)!>~-u% z^%)NfqMs?De~H_v$j8NX3zNGqjd3WOeUohC4w!v2fGLcZ)()Nt_bVPniZz^NiPtP- zca(~SfJu|*%sP1qL3zP+{U`!r*6t!Q@C*rtg*ugOxIbRrAGsKo1d7soPR}((ulcG#;Xz z)Z+AfQ`_6hRBFYT)p;hbPX=TyGzkB7D4-e#cdeTKT$jWwRIZ{>ZXOj1|BMYkkDf{I z_g8=y@1t$c?*b=RXs5PTaSH2!{@6)t^f^~r(Q0Gu#z7Ucnm9!2@u`+f?zvQ|T+f8u z2y0x{R3C;`Pn>C#wSE3DT5VO^;*lxgk|G;-u?8p4Ths14X^%BmMm!|@@VMU%Zr+lo zO1JTK4^jANI0X_;{aA=U>;`W5QY)xK8vSCBUOrtdFkO#Omk*bw^L4;CPx^PU1A3ue z&EpldJ49Ut2P-%!uksw&U;qU8&O_#=dpWx20j3T z$Hp4{rttU$QFb-#jIG_E1AF>;ffyz2N~3VFY7ev6z?1LEr9zc{9+fw0^ZS>;d)bk2 zjAbQ&(Q_d6TRCkF$u>i2;Bv5HyuB0O z?Adh~5+;cJJeZo18;E{=L~>R7N~xnICidd_s5M~=-dUfi=x(Z|!PJQAEDbTNAI7Jy z`j)UnWgNjfgyMV%dPkR)KGvsrcy+J}H#D4bb7&{hQD8M*9V(R+FeH8WmJ!S^6CLxZ|Vn1)HY{O6&lyc=B_4#UyDZ z6|-}bt#q_&P|zZ82N$qTYa9!alG3BqmU6MrT|0po{}l>?x-IY#_uOYGMI%5JgM5bU z%zts)GfIFW*QkIZQ+bPKzgO<;Q8YUg*3Al{O&v=dGR(*Tdqmhp;SZEm$oz=a+f3z7 zP>AN!baH-Y*BQZVCZFc2k-s~GyBrFiaH&+n$IMB9OoHLKs}B;>Y}^|-0F$aCnq<$- z9C#!Qou0O0)u0b(h!|2{NV?R}xZB8oh}JEhoH}|j|CjE=ReAF3UT=EJUwa6`w(`2K zZ^EgB!?ou-UmR^dE5jcp1x&)3V@2^+Xf>N_HOgLcJ-PXqhYwO8c6l1Hv_>`Yan>3W z2sK4v?5hkb8lM%R8yRcLPQBilGEdi+Ll-xVlIOu(o@y$&NdROy1=aRbja^F# z;D35dc!TQv>>^qG_b1a1ePIS#{a4(15&gFpv}Y8o&D1&u7ZatEvZ!kEJ)1e%t#~s% z%CzBF2#rg!xHGJi`lXJh0Un6djdC$}X-(MJsFrg7=vmEeP|=XG`sUiH4>xWLZ`D)w zip-cQ@ia$llzMZm?~dO`-a_qr<<`!@%#1yUzym~+dBPh^H$+?kG9+3RN!D|=;W($QiZsUFE1wS%=mIC zlc0nS;d(tcp;2uOJHmCG}YmRRr=2p_~Bgt=6y#{o;ppd@@WD~5l&TdV9s?0-S_5W;wq z%{;nMzwQ{ylzCt&1ND==uRvlvZ0)p#yJc@py4_*BI(ikdie#Bp+bo#HCmoo34|fn0 zo%ScK-n`7)o2X=;Rs4QYlaJ1s=YD;<|Nd+2vo~3YvoP1^M3gyhWx;cHR{=WEIP-xU zEp+CcgByvJmuJNAVaM*GW>r5C_S(8s8q_MoMZ^C*6g}}MiXw2?~3vq8pk=%F+n!|5e8k>0hHA*J~1KU_L z-?pSN#1jMImrwwp_lV*AGfUU&#A}u^1*1ZW*D`tUnpiUVpL@t=j%t`WCvWMkgB>Z#9x@7%$ZwDkiGP4R;1kzS-{Mj(=`xx;v0N z-PnMag@+eodHYX)5cY1+%lG63<3UmlgVbd#*woi|s38oX0S}nJai+Y1_{`_AqF)@eDI?yVaa2xOM8E z8Twq`6<1=!ASp&v?|}wv^s{qb3v&3iU#SSOxiqK&e4{??PWY-R4n$cDIvkN)+=~{5 zuQ<_m+5E9gLC|_>_S#uefs#>g6hLWUmV3dO0$Szh#e39?hmd$Q2K$EY8vUJ-&p;+t z=n7ILVl7lrO^ zIi7yNS1X7xj-+wH zC6Y|YE2Cf;nBBr%#H7131-rh+;j0q3#i>O4x8|IZnV~CIRo7N+c;E0np3ZhK8#!1U z%7Rw$ZJ5s&6=d!hwr-i&Jat1wyNFLQi>pw0OLmtU1eZ@#yaIaf+u%)aoKtSUxSa*6 z@7X%9;H=sAUq99yPBa z3vh#C`qt&CQ~Ru`>HH?1Q$iiTj*>(Ma&`8eT1vRK!fNsj`4n0AdxsvuH?I05UQ$P2 zM7ip~!!QFwtO2k+Oq1&kpS3;t-ujE^&sv;ncE0aQSHbUi0i_>M{d6clgftN7Hz`x} zgU60zYh%$?oWYbG@CMI%(;hnHAzZxSTkcKkjRkuVGHC_Go<$O+u^$ti~N6}&qn1l*;}fwm84N8_jh zq9$ClQQK6&^&HU3{L~|k1rk5j5ecllR%wg9H0E*sCE8^>? zYbj;t^&z;@e%(Ypx~TdBd!ahi^hh;)!#0Bo%+zjh+WzX5t=Ik=+)bC!{VfG*G;w;^ z{64$RT>5-mb41$Azb9ARy@Nb!!cC51-;W^_uJ(Eq0FcHxu_PQfL{u$}{to;01wP|E z(czf(q>AZl1hjm}&R=0}p@RtLx3W1%yl!!!lqaUpw^zfCnr%?^q4o6<4^;CM2H&hU>D#}K30Dh>FM5godIRK@g~Qz1K2HmO>W?OG zS}v@dZj{H5VSgm9>kH2eI6l#O8>Nz7k5fVsT6T%Dj&SCYx!<|Y1}YuWAZ{g++L4XL zviMEmiT{wssLw;UE&XmEQPPHFS-k}w_knaXxCw^_^L$cFS7RG2-n8a?nkUfOd-ti&V@dYBfNebz{bRtpX6B3kJRKa1q<$Gw0~ zY-yL%uQENTaVUyC+2OIT`KVq%n@`3=1bHZjLJYbg=z~ox(+3$e;DzrjT%xtnAO7*< zoA!)cN64l<=vOzyZy}GqL`U0?OpZt>rusG&_eAo}2&`vhh?bG<5l{O7(V<+@UB;RWenYHAob4#9cx&Sd zI+__Pgvl?yIutH7+6noiq6_7;hXY&4+=?&Z#Vph8!9||$Q z4*UTbx*~m0Cm9n`cfPHtZ2h9(Gk#wsDEAXqEt;;95=&pr)>xlPt#Z+(xkfFj;EXe* z@koxn{one_2Ati8mva*qC#adxGP!qODLEFxp|~13PZ#%%6n-Ja?gd(XV`8gDg*?$5 zXx=eiNkb>?N%cSi6t#0>gT2Z+ngui*K^4GG3K96i?fomfSzQ%SH^SpB=7>Y>*Z;G3 zrss-|D8pjPZ6ZUG{J#7%PZx!Vy+IXDH^zdhq~}W~FH%X_N7>rx&);cspd~v-OVlgD zSc1Go=;$&h=rDO7Th}}Fl3~8ABX!AY_C%`fP8z+h(6@}(4C<3tf8rRSj?Mz7 z5q;FGqiR)7LC}W{*%4RMIdVX$l6T%qqv_M~M%EOV(ieq?ep|PMkEx5TTYhIpzo}Y~ zr9cm==@-8@_3b(GR{@ZAXKZ{2GNyLSPPBb|{U%We zY*A_EaU5&m{(DBiPBvvYb$&Z)9vdfVjhJeLv2o=^c+F5K^(H$#ctG}CSwU7?9tgpB zz3txORq@~b7L@Gf7n$@e%s;I3^wS4UISl#7n6^h1h|q0oRO{srJ_`+5#*(*cUFAP? z@YI3V;)*d;(?QMiS@h(x4m0GJFnU{eC7-Z}p98A{L)tPP$?Omrw4!|LOl*5dIQkvT6*+zT( z2`0No(=a6FG3J+#8C(ukzxHryM4kkq)zvAnP&$=z1 zb(P5_XYVdyK14-VA{IYFwC7?!!u1{Xj&9+c=pVb$yEj0v96-}Y930u)RRAoOOtM8l zTN;`EAg1S668Rl0q;hUF(WJNU7k9dC|U)|G)*hEq-Ul#3QeOrAqJIMz*lY+JKi_W76&k`u`dj`)_J0~!RxtZ5GvR#9l7iZ2;hfZ$!18K&rQpAP4Kd!;Wa zXT-mX-|Bj>FDJOV@r-F+xpj*8o;tkwx)U7zGa)327v{`+$<~d8rkD^42z=o5MvfQ6 zqC-h3Uiu>nOyul&d}QsBA5g-&E&ZB9Y(CWsWD2r_mDGb;dcJuIk#+QZfJJ?}iG3pu z#oNK09wi)ug1va)h%ETu_4Y z-J3OAX9tCi;e;)5XxB5b<2FjSMhHlQSd50ua#ANC^*6Wu0bHNn)ScGhQIu!0^7hT&K9jP@PKi z*yi?qs-kx*{B-9gPo-Tiu=vSz>msWu#E*HdFfkH!Ei@`CWBOzh>c5D8m<%z@K;E?1 z!=#Y+Ss?{__E%PUx`cW_xBGVku7yKO@O!G>P?WtZf@jTAV28^S5QWPr?DTp9&mlBG z2Gj_W_sChL9~dDeh{=0UKH$0ijKrf#e#K5j!wBI9k1H$Roe5Tb{IdkHR9zh!i*%_P z4u$!9t+=-@Q&UM-lbkKhKx*H}4u;EoEm2 z^pYE@x1(&*#lPrq4Lv8FZ;EiI4ZKS;Lsn%_8ZEmiy)u_nXIt zloil+ytolRh&co(WYI-+`!1~g4Mg8_R)Y&vl7_++@>GjvH4C^ug=%;DKhl2LphvQc zw|!dgou*8n3Jed$rJjN*j@K%;%ztC=`$09krx|w|Z?_BA&aslpI(m=hE??-cKOsHg z-w|0xfIf1t)(_^caYoJBE!(O%NxC;smHGoP5n7w7?dkaBaPHSlo>8_^2ZpX89%7km zA2uW{>^lpe_O4ZXoHJOixmx_Q{N07p;NX10s`gpkl-<->#cak)CDDg0#>GS-OwjlE zVNT}n1A|b>k#E&u8;6ga?UhP2+H5LxB`%ip$kFZJipng4PtN&ehq+n8!Q*K&Zig7I zuBlRW6RXJQX)vB)ipL&u4_IDZf@nAkPc8ovoS+U@a#+wU9JBXFrFhOW%pN6R4o=X; z+k@9Y%prA%x(St-LM$}Jjwn!f4VhJ2P;`rB?Ke;~ycjVCpD$-HmnfRDY;||Uci7OO zT?L$^>vqXcmRG$k((8=!?pZ@Zp*%y%U8JK0^DCl#-0(u{ib_e8y99E!_XkUGm|a{i z)^E?a-=0C4nkG22>hWl27mUutHkbCN|5x&^7bF}{Ub4Ix;5JXN=3JQ0{%3QYyT4uP z;imI}bMvL{D}%Gr#%t~K(Ay%63gBQp>B_90Y$j(p?? z)K^4789>t=h@ewY$5UU{EycfnstL*a;J@lec@*+UOX}!w5$X%xPE$n|&B4l5OPkJE zhfI@)il0W&?43aC=8p1K>NKWPLmbG}bMod9yLm0f(M_lK34XCw>4qEhZCJ^gjedRR z$Yl1s_#mO7wN4}Gcn{G?>JbNQ&W>B8F$|`C%WA$rlJteUG^V#Cy@vB?G^j?oikcoe z!mUzXLUl*tFy0fvm=U4GrZdAnta2I373#Gqf=(b`je{23Nm+gH`|CNe$!j_j?R)Av z_t^FW9tq`=qg0JEaU-bH+&ED5=Cc8Vj@;gtNvo30W#p~KL<`B2mapY# z0y*Zm2pZkPs6d?DRjTI6YNmdvnzke()AGRQ+}~2xsLzgfT(^xrcA@%|xZ(E7poCIE zk!q7-+K4%gebi~nOC%r=2W<_92^x4XrEnF(`mhib9KoFnRfU9aqF&NNXfBF)C5>KO z4r9!JBBEYzixuw0A)_+zyi^6cba0+gRajQ>eF^9IE-m>PMJf&J2bd)Vw~dIb*A=9^ zVq6ynY*^jHFj`|0QW&D%BCi{SRex{4ql1m0zLF;9sxnsdm)atB?0`OdI!l%Gb8FIx zGLX2Vyxi|ov8ToeOd4ZUW`p@#Uixw?>!Co`SUI3*_AHL1oVG%T4|W0G9Mc3eVnxuP z6E?p>Cl0-Pw-?adzVMFYEGtL!(kL|9NqdwM2R zz5uOE_5K^xgY)|ZIdYKQ$MCvN8h>{)#UI|!QVPLp%>taf&9M@5t6YV#npc&`XjA?K z;#9IGCz{>?bgW&qwq$L*V3Eergct`tg^Omc4TxmUCC;t|6<9bA8sZoc?z7XIsC(@m zPhPZ-zAjrSF@)@OHvJhstGdaJpUt`3#=_Op>E@_zGPBc3U)a zw?T{9G?_fGqP4;EocM(pZT1^wyE?>|Qpk9px#YsNWupYz=R8rr!&xu-NuGrytSPL2KW-!Fb@LYi(dQW0 zK4sXFA$#YZklnH&tTxmf$&kpW=yW!tQmS`<6AnJg1;V6IUB%h>GuXVe$wy^#TecmD zbrFa#g#-b$eZ5XD)WN$8)WSsb1U)eh$s~KOo9T=hY7D)-uH3j;6r?%1XANrNNq!`nQduun8dlYESW*`NVeUpNb34{}imxwKmM8k`ouD?>mtrNbQ zl`I$``H|r37;Qm4D0KK+yfiY6N%~ZMX1l2^5iU66B}gIq>mXo(WqxSIzSya4Ct@Lt zmgsJP_Y06;Cosu{HZ}PK#FA22uTu|Lbu(-u9C4@nHl`gpKusojiH-RW7f5=4 zr9LT%9=w#E`W~r~c+l>~_deJW)G6L#l@9?)!?hQ)r4E(*gp@UJU)#cg{^^iNVo-JOkT~RDQQ@3y~q($1HX4W z>W;QZct=3!WY3P9^en{6WLEE(9{JV5Cad?Gt(tH`0M};~)*4@3(NZhYdm+qK?l`Yj z71I3lj*>-q?9A)M^U`0=4~z_vQ|?!@yP1fa2@!@6{yN;aO^;tR_Qeq+5`;!AxDS>t z@RVfuUn%9|-1`~cv#L z*IOYfc2%RnQZoiD6Z1=MmN7)2w)CgO;(kw|kDg~O7nhKO$rkYMYK)KL-Q2r(J*D-{ z1tq0%so-cLnAMDPsk**)HEFYQhkIWi&%xA|FbB^>#rvMPHtXZkF+-Q5$=R8iil(PE zvULebNekP<$~AgySUh#SmS=Y^-d5KBebrVTH5cdoHs^Qdquwi_h2uQOmTvQ60yUPJ zRdu>8r(wKo0^43}T}>->b4Rm@j-}I8*%xhBPSGb8l2sc~`+P0c^{2Bjj-~S!p)C|1 z&Zp-RJ9me}-6jhI?z?g^bLAIpc2kLJL`C$N%`?9j!jH>m?i{%1K4y-qa-QAk4 ziG8!9l-vXFpu;$xXXdVKn8mNTNm*>^U}cay~D3c%XqLtJuh z%zzDbr%!9W9bNHecevJ_!qvWFrh(myd}A^RR-!LtR?+rw&tzWT@UV6sYRI2w*nV&9 z{fa+heuJr2VWbr8HH#ZxJ8y69=0wzyykkTJj{qJSo7eX}GrPBMUSc)%mtIxQ&*W^; z)<%f54p69LrODY=uja(`EJiJ<+A2oHp;?E+9 zk;Y`WxuPK+At9lf`j1X!iUgEK+3+A7UpPbv96T5b_a7e_nBW=7-QCx1MlH!FI_j$O zwsyxIGw@B{_BZ^rSB_$b6dPG<;afEZ{I5?xj){Y(p@?;QOV3K5Lqwy`hzuVCV(skk zt3arxbu0duO@t_YQH%Wl%=l2KgawrqemsM>ohza#NjbAdc6{Q11I48`g~10P%&zJzYm-G2QBv zt5#-8hF&OLQ7~^z6Gi9^SIyX3-?+rcqVR+%X{BAn!GIhOSM^ZMt<-ZHVzve&KudIV z?5l%}1~*i~n~r$1)tyIrl1}Htf7gi#OsAy+dTQ$alU-w=VXZ-~Vwd}^Ph9M4;V?kO z*0z2!FgJwW#OeY{z1KW*cxNieS{E^7WK?r>8WaCIOwvEQTuQTq;<{~JU28g6fgdBs z5E9k6rWJ^&h?b40EekTkhS3}(8=qyBL|DqzcX0jRGWhpsZq$%9-#Cn64A={6C5&0u zm4dS+r+xuHKq|6h6iR$86^^?Z5lh4$S=IaK*T1M^KyuB9O-<95)TbY;C0zp?{`qaB$UrNQbY^*6 z%x{A)sNmzUwLT0*u$uW9s6(rHR*A4*|EDSW`!znk87(Q`sL+>|x&HBZfQ3U802$$o zB#}9+gP9B#f9QZ;O?L<6HXCs-*$y z47sj&I;AMgk-EtO>I_&vMkg8~6;9p90CfhhAEWX;Ui1sYe-OJr$+Iy0q?B$^UT-ME ztXjWxcG#F1H0ukM2Vce(+gvOdJ-Ewfru3Lz_TGX6C)ykqSe?PEvA)kyJR2Tf-AtS$ zhB^Q0FQ0I^#y4%~;4wOoFcP_id5Z=MR6~E}3!5&1U}W3fz|c z`!?|O&EHQ4^FI9qCj$c+8HY;0%y{4Lq`SA#5G~|6zwh7=B?s1-Kefl}^5P@^G#A-f z#l>_9h2wM;%8CJ8K^J(Tcxumd0QkLm@TJk*uCth$N`G;ghA~1A0Ny* z6WyFGNtm0rXh_ZF#El2xK^YYnd*!4OQHkfG=DeqEwS-fW=KA|W8wf~J-a{fxKx&a% zNEVWX{!W!?a}`-l3;YLa?=fP$nIHHQEf4oPgRQ+}Ln5u2H`MO;J{3vz>wOSqCoFmT z8O}cA=vGXEvp%z~s3#EO$nRVCH3+KKj5EwHS7no_#9(cW4VZ1!> zUu}nmytfB;9E1hVy*5RCynk-x+B6%xUY4QXTqDcx@9ha>=H|N8=F)r9C8nkE_}E9k zpaQ|CK@}7fbYS%^PR8Ee`+ZYW2^-(O{ic{|x>-B0UbrV&@Y>Etp-=Hp{l&3QUsHh+ zUcOWRW&Xsv=u@$0(wD)ZRX?&wJa^F-WD0PaZ`Getk!=4ASdpjo)iLGWh``0nTKOSQ z?VD`M+riX@dh`2J_<_d)MN!@2#|xbb!*+xC(+li;VZ+e(&9isQ8(ZO~Uxl%a#aci7 znw`Ck*$JQI;oxX;FgM@*F`?^W-9d4`r0P;@n)mR-yEzm3vho$U36Mc23`8xFRzwF` z!RPh4Gu3s{Teyn z+sh(4m@F<-Ee1z~J7#e)TN@QZ$h0oVo=HV)?MMQ@D%MVVvF7h(k@x)PHTCtY<47A_ z_n)XHcjJSDon!m_e(KDbs}l5hUz3vD>=v5S=5KSJ-)*!^eSd#L9{=?0b)K+@8*9* z!;ms5eQRrL%YoGRS7!(Ja|sW9?^0e}o>ZL`e37Hn?-0ihF%p%>XBL*03qh>cQG<{u zdBz_W@B*LNE{TG_9Gd9|`Tk}9G)Qf~PEy#7$ffDHbkR~=UM?GWAuj-VN9xXbDhK3dNj*U42)2vDAQkenxOnBQUCUMDG zCfW)}9WNqv4Bqwp;NmZ%eV$sUo#KP(%B^>LwuJ=APMfGKIGZ4lR;I_*@tE*Uh44+1 zN#phQN=?)0FrE@x=j9y^y82TB3+H}ZP(JPp%9O;Qxz$KxwA1r*%Zm}gzth30E^D_P zm(%tN1RYOg;j?^{hrLM2lAAg1_E3Gw2oewnhw^~uj<(NLkKX0a1K|}at~Kv(`88V} zYAn~ z;6r>)C0%Z}r>k=B30*>A1*-ED)&yy-r>DRC(r#uCycWl`A0&E6w0->ho|WbJ+fNV< z!87OLKdebp28^f6{$OtG#D(usCsVyD$P~(seE3Dbe-$rD;WbRL%Frx^1hS%iAtZD) zY9f3?VLkbMg+mRukiFkae3piDRV*6Lt?>rz7+eJ9c3dcw3rT_u(%S!^_y0>~pqRia z?b3c_+$j>G!85R<5PS{p(VfpR{lo5E_^a!=IjrB=H)iREB{$TH zX!t8t;$T8*iaV7{Os6S1Mo97PqRqqk2Ag(;@rv35{4UO>GYA2}k7VKDe&MEa<;ALZ zVt>Igl~H0Wr3vdO@@yyS8Ur%O>B%MtIKI4-lvX~(dMMjv!k0hm3Pr4cBY&x~t1Yvj zYUlGSl;?hGYG>T)`n`+1UW@{ohdQ1{wdHCuwvX!!Jt^W&c*K*wYwFwjnnJ$F%TrXy zrxBAxLW(vt*Qx0$lTyKx-opFIVmpSVk1#kyK@c@{A-Bz3{fgiIaQaPx5|mxZsEf(a zgf&Jw2G1nL>C8u59-k>q`VF9JVrn$q$kRBC%~GzafTU6p-U`ab;8s$2icL}~%A|i@ z_4neLa)}AMt{!eJT#|k6b&ytjHad0`{MSUxYNSV>DXuyvN?hXKyiE5NV3t%XX{0X` zNp}Oe|9H_(4T0fuj|aWFU23^@stF#bU#5-6e+?l6?Tn{f9n7uI6sFiT8@6^wgBX>J zEVq<|^s`(H`GyvRT|Ykg49pOPim#)l5`$Su8MS~?A_7H3161iv2?MpM@a5Ms(ez{} zVK=JR`^n}&QR}I6pO?J6_U?Q{B|rj5Rc5ym@Nm>g&GUu75Z{bjyA>b7KC1Bk)78}` z_ru1y^n8YtA%76y-}}1?wG7QcB9d#x-bd437URj)$D3I^*7@ytv(R*HR6w;T?qS%T z$LjB{w-0Z01_N2lJr0Uw!XLJufA0{Y{OtNesHX>j}FHUAWx2s6(`<_5}=jU&--@v$Bfn z1%P>}LH7m9|vgWG0m(<8-IDsIm@Mefx9eeNMgD?^L6{M$?F2y2nUt#Xz znEFBqyHH8e^X@E2bu@QypQ5aDI_i5?b;1{FCprWK3jU!% zfc^h#1!Z5`Cn1VFu#eqHX$b5;b75p4Oq@*-KHLF(+az?o2{<-#7<2;n8M&_8I@iQ) zV%*(0L_C&)ByZA*Map&uH67nnXr)L*k$UdSZug8RwXb@S#+_P27cK}VPkynNqqK3d zu1rS5sO#0oJA1XcXwUhxmGIoFMeWP z;^?p4X0=`^r7r&hHh7&LjanG& zniS}3Bc$?-J)5Vvn;$?(vFP4$|C$)?apLg=KHG84gFz*RXJBZ$1gNczc{Ip`11Sn0 zB)%D*7m3`jB@HAv%v;6>HHdz9F*Vm`;a^yUS#I)hs1W(%p*A)P_JlVY`Rie7s59Rk z@TzefqH+1D==p_sf_=16@!tSmeEOOj*YzzJ{|(#a)W<)T4-c_kN?z*aps=9luWmZq zBL>9!FsPp1L!NBq9{nP=lFGX;FsM1=D5d|FC#YG!47uU>Jl(ljfmq^E)Hz_}dX)NP z8|RgJ^SfH~q<5~=^!$_^pWdV3F!(>ZfYu}le1TvnCGdJl|GZumuw#7k=FSe03=&By zG+h=9WX=AvM1=}Jx8Dmmqhdx)$vj4@(luV*vQm?!HrCc^-SCfdQN!HS(FJn9X;h%q zn#PuPfq|HktC@t?&sX6uVO}(0cLtA2KWcog@1M;f0PY48x`GRK#D)2>FE2tCM41Wm z?%SadRS+@eXS0v3)m9A?LE$!liuWIC1#WnYzDr4HLP3%Vd<$U!<9~!k=57HF7p zr(_(4C}2mct2tD}WD4n0fL+1(V^3TAdp%zVLe1GL;|Hyz-EmVF85kcb-k%e6{pK4>DX?u;F)*>K+DxY99)Wk&Pj48n z4GM@Q=nR}~b`WEEKty9E^gQhZry&D-luIvM6~n&6b!veK4xS+z5@mP`7TP$)d_T!N zm1M~dd8o!suc)=!)P7QiigqF>ShGV)2?xtT)wn_pmlaO1A&evcY~*SUWIb@8D%>)5 zd~yp9e&G_9`=?olvn3AxaENhq`fK!pYN?qHEFKmS=r= zrN*u!xo(BWr1hSC5rfD^&+n_eIkA zKyymY^|UE{O_+k1un)kMH*nQe*Q*NVl!qF1c!%B$XVN<~cbI=xdM9Y}7Jwjf|SoUu$ zW$*C_nr>0a{t>7G77{@OIG=>*W&*RZUJoga^`fp4BAOV;)<2+RGf17Vm7cdIb zJ!ylK|1YBa_tFz^0l@A5ucNx=Ko9nXL_C2lEv0TYo&-MeN=ggQ6gVIaH#Whu=^#`P zw1e=ke_d67IGZrZ>YzyDKedfBAVcFU`x168L7zNGQT$E-r&wo-1QYX2%)#MprOkp) zrOAM0rOSa%rNf5f>3Y(>Apr;PBwmV-H)9QbT5^c4R~1?j)4d%!$c@gno3SU8I(%35 z%kt;v{lARpj=nWa9{g$(C&W~*C%fB}#2fnMkH?K77iW;-I1H^cmMgz4zm$5_+uJKS zGIH`g`EpBvaMgd#X5xD;eF#m~OPtOSXq=O4oI$`XmcW}mf7s^a`y1*iqEYz>K3jdZ z=@y^)yY);$-LGM(KHhYmF%DoyE*Q(s0ZZF-EwI(=&26iI9Clr+PYwHt8r_=hHx?7; zk~k{=Rgw1VKhc9S zXRJvaQesSz%GVeAmW(umOo-yv^0&`U1}L0?Gs}}~`T*0cD<8n5)2O}V5UyZxzdK9V zocEy5RM$0l0rd?TVWW*Ox=S(A2(2c#ozOQlEMd%PEl_PT+KX-2XHrG;KO!YbDlEWH z7h2lDIj=501f;+9zUl>4-%H0q}?J0h=wxr)dU;_qKa4HC4Vu81&X)XD-Zg%J5^12bFSX;h|}s>GVc z9AWnjfq2PuWdtWsxT^xaS@8ssbm*#q(O<^mDKZ` z!r2cw>P9T?`9U!76FNg^l15hN9lPZkcUtmpOq~|o#&>^tzgcWU7J?;EB$zxjlvUN7saKhal!t(SW>?QBl2 zKV;^aSUBh&J60f`F1_m_^>WlJNKq~Z%Z}VHoRthE5`et5RJU~9)}$J5gMG*Z5P{Gd z%3)I=S6Hf6Lg7iXkV^=q@JA~17AK_T_^+n$rEdfYa>%K!qq6|w+m@u+P)7tGv9 z=wJ`Y%T!$n+}qdJV1yqn`72+rdC#hC->=2f`1SZLDmGvd{>EsrWq8c+x7haa{1q zv#Mw{1PCF+Sh88?1AHcfXB;f0c{oyNCzJ0{GIvTyoUGh=G7BWKxVu%=Srt^kG;SHM z(SwEpAR0F$#sR!7-?ok8X+AH19BZCXz?9p z!&WjyXSYFFV}*pd`{qPkj96PZEcCx)m+=mJOs1V&rAk!|%+S1ieDa6o(BSt-mPs(f zTz9|cy8?K1DcP9~U(QnI8-}_csEGAnef5k3GGfnM;tUvB2ZV*mN*7o?bbC1l-bh{> zKn76}X|SVWKq)WMr8tOy>?BGw{1rbDHvepu1-H^hOEb~wT|{N+($xlbJ4O_~Yqu!l zQ9o~1rJl${l~1dJH#~DXagkdQa3ra)$A-*zqeeZnAluqU*a+#%Di{In8Q0letpjlD$wB)-|wO)u`#u% z{C>{;jBGD-f?)KzecIL3({sM~f@fyJDnPWW23sBC=vB z(hR&$mC=&bSmQKP%Q*-izf(nlO@llx*kXM8#PT{e5#Di>;WzI_$=;l^qQSL`T48W( zXX`8a4K049{o}aJ2}GR&3v_loULU_sa#P!8c@yF{{lVPi>kx9rINQj)LM8ndj1K2V z+d9!*rV_{2QT>(|5zrD8-ndF+*YJWvm&3@=koz9_tzNs+1ncvJK8ALG{AU+)>imO0E=G2hoVRDPqlSD@ z_i|RT!=kjU%DyP2KdGuOIdEjw^5k?iG_mbB>qm7Rv+aK`a|%ExzW(lc_~4YF6tB`| ze!S=3aqG3W8vzN9gVy3l@Gn#{_A5ywx^VUCd036B+H%;@(zAL}OLW79V({)1{fLtA zJ%}G?9+wIZBG-DRxwrsWB*7Io#~G-84w{Q* zab1n_5Pzz1 z^ZnT!K^r#@Ddivmv*4CH)EQ2yGWzP`YrbkwQf(Q z0VLvIp_oik7P8hm?<=XPCjnafDhntz3$N6aBetP}TiNYvZz>yy93>8(S89h{-Yso6-QFW? zsTJPOj{e>BHo*7iutd9-!wXBn%cDUsT%TC&`SjY+slWsIWBId{xilv3N|DarUKCTB z%b4UP5N*ibR?yLhMZ8fuEsk*bljp+jG#6phA+Hss-j4xr>~%BH*bKfYx=r(3IdV zCDwF$j{WKKUtz~{VWbV!?Im0u_izl|T`2I1|GMAo(bHALqN^*tNL1qm%S{HN9XLGt zSjBmCfMY>l96+fkE3Tz$5bumLj+ObVbv{8gzl3UnIijcrY+Kal zWIZw}zeV$Rd>KR$8&(!c-tB*=Q(>(#|33%ImMd{_Es+!(B$4${((LZW$*$!6oaJGA zwBdR?a+JpyYvmUo;xNr%0ly$)STUNc><&$FTQR0T7u8|?jzpH;{ThMw^nYI((epJk{b;QbHR@ z;hOGAl$QPD3)4mOP;Z(opUFeB&BHX1pFEr@pil+S)@hiHXFa?rKIgLNj-~QAkMYJE z+a}zHD2jpBXtO`%)5yJh4V^Y>5!I$h;O18&h+pX9;>&lkBPqprjD7Y7M;okdE~UqK}W6>jh1O{c9N%J+pM9z-;}~85OO5 zMJuF*yf#IOn=Qr#kJ$PmVQMPn2JZ1pI3Wdf!h5;!aB@=LYu>BE*1J)$DO0$v0 z4DJ}WjJ4kV+V_I<3Q!}xZeNFfB9RBQ^ zg`1yX(0SB6_#&6<%N4>{y(mn;X}JHgfXbVWVv{rJseu-A}Vy-6f^D=A5 zhUMW6@hG9aI__MqG$GR_0K;B@HH%&{JKM8q}|&cMhfNj`PJzkHK5qL3D9UAG2};oOAE zy!P>fI)QFig!hFx4Q(FouQmby5H5gJ<)8kA5!t56$qw^~V}O)pXS3yIKX`LUlF%r) z*3<@N$P!Vw1c1i-5M?~z8WIrA4qQP?(@n%bO2U{D$|@A7FA`rCgDqTZ zE2&g(PrpaR4t2gRRAKF9E_r6R1CQMxKYA}p?T4Rx;O!!~|7>s zB_C#{(}}m35)33{_b#dR_S}U!fRKt-S9&MtrULXO;EGE?KyNtMWTIW%B0A~q`(fv=vIHp{ald9(Yu7X(iujNz`; zpYA5+v*u7CgHq&M@m;+6Z|jF^dgL@x?>6GfYp9AiV?hSDceMULxdUeDFkb=ZeXfJh zbc&goX3S_dC4#A?(clcxon=d9Vg-2t}n=F4^-Y^P1Lm^5Kos5RG_ zaQ>|<5iW6+x~2Z>TGHj6(t$WuQNsZXzt8 zS!4Tyz&68GqiHtHWJ}9cf0sB#;Y_7U=Ud6M*e_qJ>9fryuG_Dzrt#aUeJdqu;}E3G z873V_DI@4XN|skQXnC&--xe=1xWyhA13=o?6LmeT#2a%Nc&j((Kd{M-S(t?aUY7v!sbSF>z>lv1F5l=7YEOXd2p|sm4b_izXc=ys@zO)Asv^#uMa~{yt-N<&a=)Ya)k`Pu-!OCwgRP z8)?F%bGB`0xnk*}gSEE3!Z6!!5g+_N1lasM-diZ_NHx+JTh0lqU+aoKe2d#84xeD@pCRZo0Q$%^8J(+xYwUL`epZYZ<{swi9&8g%pd<>TEg~++5 zPMvqR1zS2Pu&g?0t!Zhh?b6>2bO5gg`+o2h#=IO=mgQ+zF>&L_+PbYYCW@o5Hl48i zWNU`QO1PdB9PF06sNKlUr+L$iw#1I`xmn*2`U*AsU68u0QKEraLaFR?vM|XNS)yF$ zu<^6;H!U&}M%vVCm5j9u1_wT6J#wa_LhM@Ojld@(LLRit>_}BEyj&M z3oT-U_$dE{J}zUTU`WSFp?;+iz(+<1B&O|iaHD~=r#$QpZo`Y41;lc0+OBmEd-=pI z7sJF2gxCUn7tSZ>R|Y7}?q(@4a#ww-dnnIE73r9Cx{;`a zB%ul2l|Ak0`?Kw7W3j*iJ1m$Q-lMLv7fIAzWqs_!4kb_h%0Q;SxWFMG33lX%kA2&c z)vSz+Z}vM3S>TGpYcx=!;@k6m<~&+G&emZH#dMP>V@O_$;n{Zk0r|4JVrmc?oH-l$BRl?RyPAu1TX(X&>^wNxFZYNT=@03f^44x&6rkf&~ z1{1PMN{1IpF}Yml@_{t+MlfV!C)=t*+KduF+s-`Qax^22;%jK;oI zwcOep5h>YRmtzzv&4%%P6HevdS(sq)%}r%u?J&q)GzdD2bA((ESU5+;_-BZ;yWmaV zMYb&o^-~UFfy~C2WZQ*{%T8aCI715Vh0aEeRI}2je3puYeOg~D1*J_RNJ~&$bDO^2 z^B>%uQl6I1lLvKW&^Vk=&7Wq|py>EPlfWM|eN2edvUsCE{6eL7)sh%^-}eN3VSa4Q zW0fN`#3>?}7Y)@o2;5r`#Ac8QnbBU*(w-}HEGVVqq{N=Xp5fc3kaFifmGwinTF>mR zo<=tx;hc7RPAfLNE6s}M*3Y+QhPu)Q@P9&ql9_eS5u21v6;_kFF5{h|)l*3Tn?v<2 zdvCor4ci3LnUwXW%3AXe_W+tjt7aW8{7RKQr!>y7AnG<&Pm{=EeBr!0Qi}LyM4tKJ z!{BwsQaQDjI_K8=j5u361eh}pn54v;v|#RJZp^%NU9ZZc>N;&zJ_|OM;nGe(JtQ#y zQmH&9Xx74E$kup3Ss|89^;@X$0AKEH)@zFE< zZi&NaJnmi_dVc#eWHasIBzEX*B0P?Tz%YsM2Hw1jx`iyuRoA}DHXD-!s1q^6%$*#o zJVLW`6Me?^xt^#|4*>Zx$@8%#VGp1xwG0os=VC;o4ZodwbARW)S4qE38+fDk7eeo> zaWTt5{(Nu6VaQj65jhI~W?g>aNx^|A(kJ`}>9bn|>^E_k^}~=%b2Je~oUvD6xu}PX zB=ZdbLnNTBYUt;E|02~&b5C>sb@~i=@X0|n>A}a-{rRRBXc{{@fppS^WcB$Qf_r77 zOWqfAD@PsG6xecZ4fodscH)H+bxbCq%W~$ng_&yN8l^+{LS`p-Lp;Pk=vkZiu`I=w zh9obtgfL8lRE&~J@?_dXmQou1>p5Y{Fe2e3Tz*X@!m81b{b zuE*Cq97nxrhd}b>v6W*s;i^Y^<#N5{_dVI|bg0W!8Ygr;^5-Cr`Cw2=@JNo==kF(< z*@(kTUYJcEQ-{Bih)M(UsAwnv!B$mf6UE^_Xgzsj)b%CCl~Cse>BI^#Z7z|Y78}$G zi3Db1?KIjeJ$BPP^raQ3^RPT5YNo$N1#lH=%qq>RF8V?|+oO}$OXiBcEhV)vPa9kY zep_57K4=+>ja;?A-a~2^+BJPq1H?9jpyHC2JI88%7RG~ZE`Sr~dU`s7PEg;Bvftv9 zDfWYb$POs)%(--YV4*(Uqa}K4W~|t>)FZSPERlEgh8Zly_XvuY4Xe%O5RKp8ogB@J z8r-~FI7vLaX`={>XE~x4hm}~ocs=~>7XYrN@?I)dlpQqKP}N^9x66BbMe8{9eTpWF z9t!sn)MST9%RcKWK9@WWJ!eNV;kdc5sPcKZc?3)=GC#y+FH_V0nrN;Teu-Ii9om5O zr#fYsMA(mYckowzDyY9~HMP~S@YTiJJ|jezdfZ{{k6-sN`uVI7U@`>x#It5em2K|( zbqTIS8D%T)j?~^aXo&)qftp$S^b9QbVv3Tr&)im8yN4qv4gAL+DrY|tKNtjMNgSFY zhyrFW7)q-d!P#CtL4= z-sw>G2M+O`Y-U<7`t!UIoE&K9r<2`3PW-i&LB7L&^~~?gw1=yqsPclc95CzYF8l` zLMO8hBl5UIdgImLyA@K<9%OL)dPYIHu&p4sc(J3WM)+#AGD%!eyWG0A8S~Z##i7OJ zz>-P!^Kj~mOwt)Ss1c_$;~*+R$Z4q0axj9Ck&=wJQ*t4-u8~}YJJ++fXb&_erz6VW zK`*vaON7oRRf?XS?zpp3(**dd(GEgItgwoVu(>oKU9%1YV!_9dE;-hT(VC1Oaq+rZ z{K`tQ#OB6p)ZsB$U-H@&W@B(Q4vt-te8yNIc1Z#YyLxS@HlQxrmkW0Na=XdMZ?wwx ze%MG_Q%$zxN}FpG{*<4_&((ZW4Bn9xPW(us_hC;F;OsxEi! zmh(kkln4}oWWTY(69>Av;L;)xr*R?6S7xrNmz@*I%!mk-wyBh6Agoz1+2_p~ue>yS zvckgXm|qQ}k@!Fc8G%G{kdaSZaEB^F#f<)4Vb2I!SoE!2z1q@INW5yW*|=}q>5znO zERzB&eKy+ZlnJv-t11S~QXu#GP^XaDAc6I8CzaC+bN(a1BG=0JY=!yx>(?T}s__)5 zz31wc=3I%5co+>d19SQX}-^GW)eE<_aLh(OlgB_-d9bKwemz z(ABk;et%cFBZZq5iw9lqSaM?2O%ENf&TA!fYO`U1%taa8u~PM^%F{QFZFp1qDoer# z(+|8xfDE#tOutZXF_)l7kR=NJ_O4*|15q4t^>x*>REcTmTcRWxmiFxK=GFem$>9{| zl@;`?Ph8AUvd`?>rlUYtL^)7uCj(NwR{~@0TEn6(uhe^0^#rzxtK&MPN;&-n@}Cn8TnmlhEiv0mVGe*A+J=HFNIHxiK2qH`*1 z2z#Y%=Gr{(LgpxcKIT1~ige9tWU9a-{^>>&_p#s(`VBe!rlr6kh`NMTFtaS2( z&s@`x^&nn&KU+lBLUVCgZ+L~wFs_ake|0Eu@go$^(@PXlm<{5b^=8qBwD5G zQA>m#egE!g%+B@X)c`;&2Iplp6F8lJRLVF%?;tW^`7BA9_~fl*ADdFHfoZlaSgt7r zd+8)u6b?w=O#^!K@ja&TVmobxFhQD3`{fwS!J+tAK&fio7$j*eO@T?X>Er=-_N&_m zQ%0q!6!)Qzn33Rg1dh6Rt@KAIagGfF#`ar8)2&RcxaTq!IgGlW3-|1R_=8tCB_P)$Xdlnt-Fa*%uZQ^>ZRZM;XrR!K4*`73M zm@h!p{_BzSDn!m1MYJ259Ir%53*RA&(zi9aU7Xn7X5p%Y*mp0;-lSKy&;LNB-2RWw zMyLlWjn}0G=K5p7WR@{~C`c>n5TFXJ3e;4L0oa zDu*%3apDLF)+y<_A6~HFT*uZ|3Ufv-_OI81E9KD^}~Jq zcNQBI7oZKMOOhur84gR~{re>RiO{tGApw3(G4lULXoG`A0O>~rc;nxv`kTr2=Udpv zCjgmp@C&WxfBmfppj*{nY@2tX+Uml657NUp&a$w@HyW(?YktmN+h9yg_|5OE!LemF<<^;ty5??%k zlEqM|a~S>k4_EmeV!ed?s5nx`U#~iX6%e|q<9;U2o!&2PBqTFDk*cl!2rw#~uc@hO z35<-A;!^4nfl)w<6No=qBpn$blmld8;$w1poIJ5d4zvQ>|3~Nt@foNXy1nG#;j$p) z1vtO5cwf`floP&Rt4Th##Q~=wTUOntj7RK^xSr;7zr=Hk4(K;P>;2tvu_tf)Hm3U- zUt~m`&POOUA{rggNfp|ND2M>_h5lImn8yDv5;R0nADG6|?PZJ2!v4#KAGjbKjl|a8 z-VZz&N;<@dY+8>lu?M$?6|V5p2D#hn_&cEK31#{z9w-nRggaTa)4uU8rWj-DZGJ?P z)bHWIWCo>@sJLD*z6L;CXZQFF&ND?sVA+I9?9We6G2)aIB zdTb|{?9cIVaWzj^+X9T~0zB81(2X<)wxa&3SExeS7SF)hk64&FgTq=RYPcxDk+Q;^ zmqLl5)CWj}GAKG4#TmfOe9p2zr;Z;LWdG_yP%Ssal+;$5k zu=(B+VDnHaQoul6zyiE9!!5unG>VYFPk;^;Uq#+ug@)&}0$!~mTwm{2*3Lpg`#~-7@s&N?d zbPhGC{`73X0Vjp}a3kfcr~Atf|Ka{X*sf!A^w&Ld*%27AKBRRe{Na#0UccpeCW9Y- z0~3V+?RN$we%jpa&$+6W=BR8P8= zVobNs>Yl{6G^e}%M{Jh6ycd zv<7OSsPa#-mM%iQ+pdxW&kc^W9y!ZkFrvMf{=vMu-eqCS^;u~xgIsM|-1ITCVWF=B z{z_oh$7IuIWB{`^1dRny&&EX@(+DVR?V|$XAF<^}bqr2^!9@2w8>j64Q@+)V_sP!6 z%8hdZbr=v5F~U6dueu5Ke7rWuUB|yTz@OIW=QFYzl7o?R&5s;E@5fB+_0BI$aE~P_|?h*k(I;Fe2C5Mpi9LYgoXc)Q& z-h_*(2b;BC@;wu zZN<;)njK{;{M%gB+OelTa?`tcS|E}FkVh;M0;|fD+S!i;vcYNI`6*j0Sdu({J?F8d z;B9O9r|Vn&hRtlPsq}^o;)JouYE`*Nql74~wLAfph$s<YczX?uUvtBX%1hS8rk5PFN0t zz7-YCjiSg6ikmK5CH>D{n;ok4%4B0c`p41+G!Y=c!w=Hx!p4bpf88WcxqI5{Jx5lv z8Wes0(2!b2@?vopar3jg^nw~+Xm3z^O?iwT_c1;wzV#Z}=#1>tzv>sgygn5LggX9x z0bX7VRy@|PSkiyY1XgUPSVRz~B-;OICO0qJlgiBo%lE zlgUU{%F8{LUKh079=^HCEo(s0tmE_zpCwqwL0w+9*+uq4u13;?FSn6gjc9>>B!J`& z-q)w8UQQcX!ZZTCpb4gE2jsh@syIh;B-wk@T;AuzvwoJEDJN(s8*?|Z*x6~N#|yUI zH(nMdCI^$a6hi{G1_ow5x*gzj;GBu|*ly+Z zX@sf|k~|&sP-Zn)`P~2JKAHvXi1BUs?3edVDrExHd~L%vf7V?g0azjCNbH9vK3@C< z?ZgcUwf9-a&G`hNvP}}*{m!G{-;up(QP(rDglIw?e>^GGnVcp&Ge5 zm_K9y88B~Ce_8So(lKWPZ-WItU<=bY9=@KY*0o)56?LB8+Y459ro0QJa^KRtfB933 z?$?wdH#nT<_C=}fgJMuySz*nXJ0K|Y!`HU4D&U#(%s~>c!uMC5;Phv{rME%7j|<(o zAr%bmLl<3({4Eq@rW-l7Yd88Fwi&LZ|HJ3f{7xo}6cE*(IiJHJZWmn0F$BbWvM?*m z4hnQa;h@?N30>`R)ZGA9bD<6d^=+phHNYNcj5Lsh*I>yc4g+~CxHwD1__b0Xo+>^H z@yxN1->e^+ofa0mV#$-Z=Xwup^B*a#K}gUp3O-S7AoOdmHj6|ZW)e*GTsHwBrQ6lJ zz!J@Qm}vSM@C#*7$#a&U<1f@%LgmXE$WRnvPO~>IiyL_e??DEjZ4U_;YOuN}2X*#V zZJrMcn%!HJ#xp+42;xDam2+`GGyk|x!Kj0lM57_)jvehjE?A2G{jc|RL|_H_)h0k7 znGFae#ZJd*ItfozZ~SnTLc;z0ImpUU zknSlb0{+(+Iu;XhWlQW2?OyxFmg2Hz+7|`FKHYpMegUD~EXFRlh_3 zqALlYQs{PtW9Mae(7K&u)56Wo!(;rQAne*Lt~Stz^?j8*Ee-!KUg5*pup!xQ3bu}( zps-;%A|+}RqfVlx2weF)OHPCUFJQChtOo6(mrD_Kl~7pDNn?Rk4B%&-AR`oCjYjb=yBnf z8pZ}22mRBl&@Va6BT+25BqQ6OT9B8g>!v9I2ji@+rhuZ(8SThgySw>q>Pc##9Ln1t zC*zG}X{`G7ho^*3Z?@O1N!yu)dxFG`7=p}vd=|Xf#(RQQe0l;@sFEG=QC7r1xQ2%0 zhZ2_t%ia;FaA-*kd8&3%I>sN^QgPLSHV+jZ9GDWu?IXWfIK74i&LOx>)%D-rjEwZP z#j=Gz-rm_6uUa^~ui_x|w4PQellM*WYIpbPYsCCz;l-n?QynC(RxB9$!61_bV}f6~ zK(T$k9Zcg1>^R3v6Zxs%P8W7`@V~?S!fJp`JxCIPp^t-tJo5T!0#?Ize^Al*)fe1? zrLjC7R_N0q_p|KFTiRL0P-UdmpvYl|E@;A)*_{v&d48#vODrTTKlmVRB>9Zv52_>D zr}wVl6VkXLQ==baAs?05Q5mVXza#@Fk9}o`?*(NP2FoVPd=-KhaKTBh?vfzIf|10; zmlt#4V!?R#bLp_YSt%LqP@cls}z572alf!941znvzzZ zBv3m;N)vpZl{CemsN6T(pi#wIM=GIPiJFG8Ig{Ld;@eAs;tmB+BQPa(QwVwTiK>55 z+R$rSnp3rNtpslG`I2_BoY(N_F*9>DaYtMMTB2{6gypBY(wDSsa`u`!5x6<8Ua&m} z=F0Q@Ye;@LY3Wz7I|9o3B8ka+5Uv5t`Fjwa3EDyt?8{Cz((m?L(MpX!E@-#cIf3R? zHdME|ojm6`@8QWc15W}s*yK%a7R%jY{dO2s$*cFL?XM7TtnAx;nH-J%W+gTkus%#wJay zcq<0Wg(uN{m7|S!!)z#kYPKiwF?6H+Tk< z!inf}A4TXsC`1dzW+7H0l5(SdMQG5E(d!wd(yH49rilku#1_-C`j5)l%hIgrod#jk z;Th#?l6mHmo}kRNXB+phH4>nXMv^k7zaCFV4EPsy0!Wm6E}KZgCSla<;Q}SDBG<-IiOImmO2|%5Zzi(v9}a+m_I;)13oGsCJoW2=Pc& zGemuwR5Uhdrje^rx6bmqs%8GP=!=tlQSaT9B;Ak*T*-msUAgGyWv{}LtF8RYi(YH! zPU20mjk7{ymqx>D&h@$NCF5u9h@VxKpa8}Vt=nD?qt~qq_M$q`n4rxT=M4W20fuFiQ$36M*pqv2=%%%eo3PdG#e!=-h_;mg!QbEqJXo>Ys5^N$jAE5)2xFpH_h*RF?C;vU_OCl}f_@?_Z%QC~XEAD~-hKVM~8*-w97^Dz)u zlCOXz*#{Q8iBalAQOq_;s5z0iZfdyRBJAz{xFOpBcjz92-s6yvVmxy>SK6wB&%hlHXq) zvxhv39jWMZVhL>$Htp7743lvbMu9v5uubUFPRQa4^V99-TKoDP6GNdN;8zZ%)OPJ+ ztt6j(^rr@`r5Ti*kP#0uE6gHrzp|G<2nRFYjcMN_x63&3AZ3dtb2@Up5SjM8AqRTU z6MO(tmXmwD;~*q;t-6}dx{EO=Ubo6na7p?bflQK5^P$rbQn6tb){W;I2WU`7o>kW$ z{VdZku1f=56ju#zJMr?z(ZKKN zV3X$``RRb=_6#+}&!9MJa@6Z&)SBYrs2SnuUB3G1-h%yL@Pk0zF_@@>9l#ez1eEOX zT~bltkInT8jqFPlVAnraM6OKTg@; z;)d#Y?j;Hu&?vE^+S~JR?arQ#Cqa&KZm#$RovdKXDopU!fwnBZr4r?`AI_@iVaZBi zhBS*l@e~?j@7TTTP3G664Xm6U#vlb82WZBVa`4RQ_pCoLfMI|xL~jJe zodi$kH9!OSg13@-aQZUV_v?}4=4N5Rt1rWYl7jX>sW+tw-N8Nq>UXkz^5VLu&Zb6V zQ10^2AN4xM>m2xo>m!uC>Zs3J|--mez~igV#^Z+OHN5~wYIRhikxrMzurCBf$Y!crMi6_lT?Qh zUDS^7e{SIb@H1N`4Zkki$NGSSKxP_WJaT$n@Wlp0FZsuf^Dm07PYlktyay~zT751# zW*d&DETJW{N9Ir({n$JiVp2d4VDNrvu*`dv=VlW&=Xna{vjq2{ARAtD`y}^l`qL$c zF^uHgVqPk2*a?R}f#du9>8lyp947b!kyZpMH{Bv9ewN_^t3P1mj~+GroaaB4^|Hl9 zrIc@F!S-tKz9}7sd0npET#~lzxtD$aEW@3i7K74a*GhuwC1KJ>{)lTJu6eMGrgK%C1JZ zcY};~^PXjfYp7-mSY`m(;mEZ7(%6#`f8d@YruzLv{TW95?Pv$&FS|w@RngY-onmHG z{iv_^V5!GYZy?i|DiK6hDbRilYBZ_r)Z`{$eu%=q1^)u*L9qZ z61)8CUgR-(X`cET0_9na{(luhj1<8Sh;v=gihVP(_{Kg2v@S}EHY}4uONEc4cT>rC zfH1>{avIsNq3#9Gx`CPJHG9_F%?M~Ld-Rmd+_+GtnEiE6&=WYTxj7kd&;e{i$f`WjA@o{Y%p*lc{S|XgHGTb#=~0$D7MuH)S^$ zIb6El$eWrehM^X&BjHlJOt+~r%Xt$qd>r`5ic>ySF0#jlU&1LKj9Jh(Y7IsfvF+OBb$l z+W1QXXtA5c8W?ybd{|Dbet|R+c`9HbMBAg$mH}ly-GTXg^=qzmFG| ztk<*>^#m(h!eF`U-pz8PwDlv$mV$wwB>Gz~)M+Fwn&_BfvL0dag3}jomr<)=8yx}j zcb)S4wN4Se*fo*aBub6ud%)4CeSC>#m2FX#AY1p8#HQM-rVYu*)ehsgKP#`#v_+ysqzgSB~ z`J>jsD5^Lb=bL+Ps8p(D+$0V7FtsvD`4qoa0I;?`G;mfuA|OBmeQVv@>*Fb_Uxgkiv*q}3(7vgb0@-Ey z!IU#o@&6QbJeu@=WJ&;M?wlId8rm|_o~G-h-loNx_A`B1;S1k8AKL-7hrWMMvF0gN zan4|5YAt=#D>#4ZR_T(r6)G$%il$=^9^d|`Jyjbq078tdimi~Mwl@7l3T6OVX&m7o+!hnCaP(*Qg8i;pNLCa}5 zlp0c7W&@^62fQZu0jlLx@{ykNY+9rhym#}RpxVB__FuJ3hubYkprM{eg_QrnTgdl{ zmUkVwZ>>RG@|bXj_%G7C*u{FDM=YFCwH`{NQ&%q)R<3dBKc$GL8{8Tw3m4V?0H5!t zUjK+1E+#x&5dgiR3L^upJO;uG)6*9(JeC?^NWqJt@;2!Uy*$O-#}Cwh*B)u^qP_u> zfvqJ7EXs>AsQ9ObL0go?i0Ob=G(AHMPlcM^mwavJn_^(vq5DacbEv@I6QnRKwghwf zGXb>_&6fa33aG(o)XGRbCihs`?pOQ2P_B&Skcf~c9{O(-g`9scOB!`x)K^#q6T?Et ziB41()vDn{@Jr$jtU$ZDxBSQMH*YN%reV7lZ`6({ZPoN(Bn?8i>{s-vNS2xsw#~Amd zr6ra2H>wLL(_MKe{#4@6@u5;9OVXc-pQ{HetZ>SIQOYA`{&I_Py7(${WiOddW0yg$ z+Izp|>k;L&_dzeIxf286#q%Ru0qP_7P=N{k>>c_WL5UWJ&ANc4u-}2m7w<_r4wkAA zs>NS0Cpv|?_#}qqNGu8fGt8#J%?^WPw^n^cEH+D+;cS!A^a9c zD5kf!ejwJ$QP3h1(i_JSh`3*7o2n0_<%t6nJG4rhomXLFhp&bwWKKi*W{{S*rP&!Q z4cAILqqQ306n*t~Aa>95P;PENa0_stP+4iTb{^*Fy4~)g?bfGjhYr+XgMDI|*rc6W zChT1yh&*WkaWx<_j+sXN_taofp2?}icufMJ?Ed1Lw=XFMQzJ;Z}9?wp<=F3+Q@vR1?2 zTr7Di=&M!j9654*R@zEqqIQa70z} zYBxO2`+o0F&w@-O;O|M^txm96nyv7B<{wb&5=UGyZ%*W4QPqg;RH?Z;V0e_lkvcbS!6Ng~`Kd{~4c!G3^ zD*0!LMzt|_;Q2W&`+y$;<}vPxkBR|{_}o8xMQ$(T8{q@uyY-MrG-knLo@MOc;X&p$ zI>q=dQT}n%|D!kJkJkjAil*$=k?@&*A^oqPG6oDr1tZw`-~aFbay3A82LO_w$+UX^ z^;0oGZB2%ZdEvj}whwMM;d=G~F8}pYD!0ll<8+rl6a2sY)*d$CNX>PSYzh6(B>ra` z%wJQR0QT;?c1&@;-P=88bOT})b&sIqQ;F0RW3|S z*Rm24m=nK!`wCzkz5=`DZum_P?TBBxv7Qqt~Oo|h*0Iq$5B zD=||BOjsD_Y`A>XNi80VVrJ_G_U zD**@=BDY8)v*FbG+W1qgS$r?8#F{#~0h8r{!+%ZypvsGNrvtG(_`&ClHX-IQCfm>2 zC7bv>dbJM%a_mc zA`_P)3f|in+x#{bNE(jRIjZo5c{MXhdY%<$g%wiO$qCpkw(1=1bwrOUdPV6xWE1Xo z1WsZAf|YCByW``xHuxN|Ov`}Gp!rncrJ{(Y=6QV#b?8D_4RR#^Ms~P{ZXnwQXchj) z6H79+rlLv?|4Pm+8)LsCsZPBm{rHz#rRpudh^0?%oXqsTl<3W2)@b3|=?9I+TbP*? z)*6H<_Crb&FhnlSo2Q9{ALkvjuyK4lF)=XQGQVy(O^jR*W|LZC@QFLv*({RI?7%O3O_-H{mY0?) zWZ_=$v2cDk>UE9F$FeFcfU5dN_fwwMc^QAsEnZ5lhiJa zZe`Mq5^Sgu-C~8vA8!XSGWExg6`cHn$s}u0#!X$-FgRxSkEoz*9;q|c#R-kdxWGa} zj-*V}gh{wMm-Q8A^|q|;g$`)f6uzO|8{knL^*nto1spn!1~GAd9RgX7gm&q@>aJhI z>y(c-D><}aEVC-?OmI(;20vRqU+a&j8;{a7A07ZWL|TkSR#q7X$Do>K5<CdmS8Q)DMfD(h496CHgz_|eZo%B!A;fX zVtA)`R?eN8aJiz_xAV0;8uoNNMAYlw&Jdt8lpFW|64Z6G+05l73kAH_K-*Sv|sn1$e_bv#cqnP0k&ga^m(Zb>$d6+cP1%d!6ODg8E+T-DqP9G>anyI_X3$)!Dk4 zVzsrdcau?g93DZIh1BWsp`Zj@kMi6}v5I%#R)HjZI(LiBvmb4uu)g|1JRV4by1F`T z+8;|5tj@!T`+V^8a8-&t_*omQT$@KXAg~|St-*z+A!|P!urMWKXMxi8o87B-yyi>(h%W zfXrzYx!miTTHdY$y&&1S^iD!nNo)cn1nV+2Mvg9*6Y^Eu6 zxLDrdww!=9nOgH)9I%xUu3oYl)$Eax`yJy7+7J72V(mLwdU+{wuZrtZVo2~y|7}A+ zCQF#q)>l(T=3Uj%dS>zru#AM z4B7am92_go))!tW0BEKuA^$nq{ZOex7$D+UlIb{YwlQ885`o&GUGz4cEjc*!a~M`2 zBc*enAdN0qkG61eW4p!VKJ&h!vl_!96$QonfO;l1UFK}R9=xQ$38b@;a6RZ_%Xyan zdraj$veRd)3#!u#=}My_lSdcs#!l%#tx_9z$rW|HdQVj;gJ;Th>{zy+@;V=E?RV#8 z22!Uw;7B!UdBr?i0_Zx1#^USoaqNJyDq;N+Mjg(`Sm>g%%b|`VL(|^?Jefr=Yix$F z`>M{GmpRh1>MZI?De3)qZkq;K#Dvqgl$3B&vjs=!%lETc3nzY^zW_4c(np5Cj%OdN zd3z+}8O={J(gLJ9Vj!Wl(d-HUtn}~n;1+5C;cWn*1~vjH9DK{pK9&xaFP2K0h|3(D z#7ZIuZbkLk;qOAbPD4eKk7+5%9S8K?Ha=Fn9@^y*$Z}?GW_tsWn9N%mo%}O@5neBm zEezVNHHy&H$p(1KbhG>OnHo0A9I&v})e8fJHELOt3M+;9G+`fUv#FcSEpDae&6Rip z%c*RCg!vDoKPg^4u0(uyKik{Q{Ah1=-5Q}dhF{lA4@r9qoFVSKYOHDp($;XO>l$t+ zMY1~i19tf5tEed6UarB+s7h(;?2%Rn{y*tJA%ptlLoCxp8FMvRTCO^%z9=~x3R?|j z_1b7xXg?6v9)+6XO%jFY%*mXz9H#Uyi&4uO&!ombQwj!-FWGg8dUv7rR;GdlfDu3Z zX9xutbaTt!)*cbpg)tb=StHTR-Xz$xhc%b;3^{8L{#K0tf~f{dm{mV?F zjpTd7K3CESL5s62vY?(2ad&`P`R;N}%cLXigYl`JcXNz?slLN#xpqU=8=w(0IxLC- zz!U*cBx)5B9otTsre77pdB$BQJ4vC{k>AVKYc4nUpxd8En6K6Ko98%ARThpS(pr@B zRwvcNvs;v_sWDEj0I4tMEup1HewwQj0^0Y?s-z-`Jv#w%6x(i(sId|6g>6=S6^sCP zoN1(1_~yDgllLP?-oWsW$ zL?>+X;EYDqLjEw#>>3AWgtZ5+8vq`N!ZtL7o0E~VA{X^*3ik{jmeyy|5P_)#;Btgm&iFOEM<8$d-sGN?e zfh&{IdTr{V*3XG$YU{6xU_=!t`-4NHQJ_)JLi0WNE4ney@%&H9UVvVN+O$eu!OU+I zP%g60VuIyQKKE~%;YR_oHk3{_)S0eAZ$N#i@-{3UB?a5%L zDIf2N+yWpwY*}O|D%OQ}Vbm$bgA?9Y%VWmkL!#dKFB#1z4w0vp!?7%+dFL)t9e`AU z8b>iJjIYwAJIp_4Xz9-R76>2$z%~6He-L_(8pm^3lb70MhdB=JDZ4^NSiGj+OQu=L16q2+4%{&coz;pw;;)46TiKU>{>t#8S<+V(B4H>MJsTlNKa z`=n3e8jjP7IswbB`C*#tP!oL?xo4=AVxlF|F>rRKa}<8_U9{5l6pP|}&2tqUIZpEq zgI+Qgd&eUvE>tQDfElr}PPneK(E!BPu5(WsJ=D%IcJ%Je$>stM_0>FdcTKlz@K%DGEDvC*V`I|h5k)M9vB}E66!i0Q>{wuZg zKSc!&JiZ44SM#s@s_T+{K;c(##_H!SAP=K<1tCW*K0`pE9_OMZorah~R71@m5p4im z&{2kVQ8X`9$g|3N9z2cp{>H2ML{6T->_=K0F$f@}h5d9xD%e3tlCn*|WB7hNf^bOZ zCh%OZ{v@AY7$~NT1h5>TcxrRUs!WNFVNCGa7zc851jXQ;7gu{@VIGdxmpCZ)Wq87l z1D8_4SgJ2JFMeulRElhROt{^cCRecUVpW^!yS!t(IH~Ji3X|_-OAa-1HQcTFRUpM3 zD0OZ`_r+0hqb%=`_bB{uh$+HY_tvmh2Ns&EM)@JG>Dg8D-p_Tcm5w(kokQGld?lfq zKCr61Hx=n*Xrbi24_VOo+}Eqp=1FY`s4j9mgMUo&|6<3w(c^PZospef>J|PUqlb!O zg5x00`G7r4Hp|@^ks2J|y%){uJeUaV7g#e<9$y+&O*vY=D4ZrvA^`}l^s(~Z~|11ExtnqlW^ymJ968||N9!BeP$3J+iy`+YPx~X#*e>H-o;y80o*FE-V znoZ+2Os~>AdR$g99#v3~mk?i_)CHo%@t0(_OP@ke{o zH1f*nVY1#q1nw;h%PLlz$M~o7Vqiorq|9f5&he#x)>sD~Qfl3ZOiksgx~&d0R+APu zwF#>-efRoiWM|#1{-JHbkjVULJoSEm{TMPj?NtN3M-GZs_mMB_9nuYnvBWp5?`F=J z>Y2XgLClH{FmAan7BTS;Q{Gg|{|ef${_RXZACJk8Tl3z024s=J&`YSD@Qe?XpMoFn zN0mt-3wpH)M08nc*pkcFf>D-AhVYw1bE4?AaHj+hUbSyI@cJP4A!k!37AA6TV9fZ4 zyi@-wEdHr$qyne;J*w6YvO4W}4OwAhGL>WDKY682a7^Rgam0Ggpz-luOS|MSxVM|K zrqtQ-mWV6@`_*R+M`Mu^ZEP(w2E68>%MxK0)$E&>vB|wEdc0$Vl3%=4(?%}80uZScPhJjaU!pp6p7yqv28W2rOMRPnYmVqVX&wk zbsZ~A4k#@ng?Pyc3I;G?jne6{H~rf6oQQ`}G>LN+-iL$yAeuNsSFAAR@S0Y0-3v*3Od*G(_bzyt}$sLf4+X zj%im{zl0`O=UYrzD4gN-g^c#9b{$5)!AbP;R^*#2jxD)g%$k?Q2JDvEZ>k6!BGTdeG&t*=3sPd|zn>3PparaHzP7a?L5d zIO|@P=OmD$%n9sE&&y^K>wO>F(wF1?>6OVoHogsn4aBkFuKF)?oQhfgB?#+%1f0iu z^a}V**w|6+pDp)?FZbQ)nkw-<*w@%$5jjs|Lz22+7KpN?J+)Z55=#XiU|i4-BD_WG z*g43!UelKF@>VK9hNIQ`h{p;4tWsd97T%KqgXfzLzB?C!(%i*8MHeZ%*G?X4c*T}A zAI_@Ps}h!D$7V=;h%LbDeN}T$9kdl4&Ce;bVQc9MOumB%Ts(13mW>j#1H`L=YeP7ib^SH4PY4C9<1lAvct`py`$b%F8iksBQK zv(N(?;2mLNGe%GpOko8OZS2X9X1}>ecIV4dC$lb-nk|Ft1)_Xs6vfG6>YnK_DedRqFM z8%?KSV78n1Iy>E1ZKYJ}D8rgcP09Mm_ourNwkO+TGBX9L{tWz@J<;ey;QS^-{p-_~ zHQ@^5HM%idSNKTJxr8woJQm@$*6yDZ+cA{V#z2|S?{Pq{4Ezj)6j3Z3xC{GxHVD=k zd@0mX-3umS96Qig+TWHmy0ay%Ygn2<-F5BMi>{QQeph>O)bJV%bNSPPyk~C_5wW@M zE2ybdE8^EoBBM4!msYiNQf0h|DjWSXH!yfL`m3egeo1A?d|$5T%6o{=CRg}=KmNg5 zntszG=L4g6vP$3s&Ml9_!B4b1_vL-SyNq$iE||?t&N^^Y(GRTStAeOn=?4m$!g`#{ zPzxOC;i{QawNlF(*YQJn;icws&Sm)f^a+%)jDiVVJJXwzI!@8j9K@Z1B;$zm3^c5j zGpOH@=w2KCN$eJ8MT9~GoO89VnIjVwRUUzH@_e3N%uhZ(xR7{>;&?X2S?d0VsuYMj zUNAPNW6XVb-$5u{jXX3FZcv~nifCplU%2USt}M}scW%C@y7G=)&ZyKZJ#Avr5(#Tk z{MjSv;@V*9a3n<`d~i;U%<@`Z>3uIC!~7bp&ovkgwb>7ipa^}AT)_KVn!eZFKs0JN zHag{-NS{{E)6S+Mc5ghj^W)JTknuaw;pjNy5W>G55Y|xyIClJiid9&0UddO>dG3Oe zBtpD&tzqSfTgs!>XNKnCmvi+w&SM)1*~ky|TZlMqYn{*0)Q9*&1kfsW_jtjzl}Kct z7$=bnVkH{v)3V4&860HOH?tP6bvbrEnWSM}# zSIaTrG_zV?r9?-)$`v4f71mxKZf^MGbnk^x(mH1BcS)Z29l`O8G^Yfcj>eWcWo_1S z?7AzPR#+IYao(IP)AY;VSp;s*ym?Pt51sCl{A#?9AOHHIZk>mStvIIzC&DJjPNd~D zpw;b~tBk}7-oOgw=$=$nG!6~-v@+)rCvs^#UC5_x6BO>DJdqAJ386-gUNdyZ@lM;H z&dYDzX%1=X-)Gy(x2l!S<+Hn{Nr?v_`hDHi>~wx;xS|J<-*FBE;6C~_4PvW@CkZq# zV>6?aXK86`vQbobQB$7K+_2?Pa|&mxSPOv5Anq{evoaxcc10GRzhWi$^d5V2N!ew; zJocvTWKDS3oN1*PByeV|gr7 zY!sS|EjX8md3|>eG)?U-6#Qa2j7U-c^za!ZO(N^LS7~&0gS5eZ=>q&%s%F`%B*z-w zFck$gO&cu0^R*9HF4hU}5hBv?SPFvg#BSmFj&HzQ4vv&+zIxf&B6^wP^{ry^&|9wH zF>Y>#=agMuYmF)YVgXF{u_|)Qb{E~pGV`_QcBo6`ml^jeYiYP@Pv(W!xgL%QUc=4X z$~-yVda;7FC*9)>kJ$8O$!adpEsm~!&BBHXbAQA^x$VL-wlL0bytaRiEo*$Dh*jj^ zZBSr|*I$!84*3=UynlMmql#CSy!pBT9ycR)-Rw@0rarcx%eM9IuS^UGw1&rGi2kQ@ z04}|o{R4e#H0p#EoA#I9}oY~N$h37L~XV4f?Jh#ScWI-()H1D9@ac#%K5 z7u!e##Q2bgOMbJaWy(?G*AIIt4;*}T%$ghuE>!VA$k+v_MU~3ME5vIQS5^aPX--?| zF{n$szSN{HcY=?r<(vBP(0lFcF$WoA$P<-t8CB|G;rJ4F{uZ5&SM8)x@ZA3FltW+N zoFP#7y?agq>zW9bC$jRKQY~A$+>=(C>B(xw_LeH9EeCR5)2jE}gqu!F!8L{rGcQCx z7v^YxNT%K@j155B%APLE$e;j}BYN4xK3twGH7m*DiE`2DNocq$<{f=sckQx#=QP=U zJKUTNMDgizS1J;%SxCEXmX_vrtnPh$qOS~3P%d*lmMe^}hSG$q_Hc|JXnDD3lTUZu zJ$^}HlYBgsc*KwyZf?H&D%B;zurWalq#Xa%eXKi3M`KMMu3yJLR^hM`wC$+>txMpX|Eo1RIE3I8cpj{bNE)+cLs()aP`>i zqOd51ul52#Al^r=`H47mgy3{tH2N>!XWY`&7T}qljD|&d_bmoT&s^&LG-j~kN$;X! zoiWk8_Rbl%ycBIoJj8T;_AE(v_9FYJNHb5}l4DJF!Qm|A-$}aAQ)0*IAv61@~VYfPY)Z-Cx4I12r4id9AJ|N|NAel_PaBPS<|6Lga5yL2Ap(HXUp)|K=OG`_$1wf<+GWt z-&+S;Mja^da6v)A2oK4Bj^4laufV@Gh=DIOkRf$6&c#_($)Dy+ z$>{a5sFxP_;r(md z4-DH3*e4xKVfERu=Tpl!7TLyo_v{lKH85qLjMSj!sEFj@tODfOcn^d6%g&QfXUH~maFNXfMy_phJ|B= zb=gqzCQXN3v#mE8*4SW#rcVG)F9kF5i0T>#SPPivrRG=KlKBIgQ(ZN6C4Sy2;_~~b z4}iaLy@@Hka+KAL>}Br>CAp79Gm;Lj(Af4*3MZd1cz!$ozM&+*L`Pn1HChY2i*F=I z<}uaNU?!;yI@YBupc@fJ9UosCE*M*C z9hQS@vyv;-N~-9MTnzX3#UwlOABI1~Cr;tv0U%jR_4D)1V}kqp$0qdB-~>Uon8ixL^hdM&ZzW)R-I zbgz>;mV>RX*!&1pd@urFsR>Cx24v0u{`1yl0$w7ktanTzxt-wV|4a8W0n!3^)b zEkijjdKlRsD=`ZK+(q2evP-!pCj5&Z;%R0v6EpJOH>Z%}d2!l!t<&UkR5Xyz;vj#T ztM~)4FbZrOm%Dc<^(q;hsR5(8n4-U-;2e>3L|-GIXyK3awI*E8bjw&^99<(g03=1W zn-likn;fmYXapK0J!|7tu&Y&5#X_44{s6pE*BjCf|KIK$ERXhy1E?33uI&A)7Y=ww zsih@ky3~d#&DzD0Vn_*jo0`4#H8IEEzhJeHN|1pHzSS^c;v(QUYqE~j;0(Z{nrE^kRkE8eYsav9+Du_S=$LJD9PbWIuHGVcvI%PZwUXl2-Y9t<(#i^~-O_Yxjn0XE+AX{}v$@%UQgM=0^1ju29b${c z>>ID|b!16Q%=M}${D49ti~=zzy#RZ(Fl_N0_T5o5SPN4SDU)Eo6YyEa*TYX1CpHGc+VQq9wRPb~kx8bX61ohPz@FMN4eaAuE zAi5D+)M=|{`_kS=_im22r>S++G>7uz(J)qA_A$_pY;(eS`0D8}Q@nMl&XpB~M zUe#%(_iq82nHoZ=xQy-AIp^1eX17GxW4gxPMcr(^5?nq^yMZb;Uu@PH?(mpAK$u01ng@A{#vQ_M>ysx?;M}fm4 zm#1Y#;{7)F&zouaDK}u`2ic)b^fc+-rt8jp!6?5o58vYlK(NDrJ-?45V}6uSC!!2p zE)3-#N^v*;wcmXMGaOwvaO#f})_xKxQokIC;#*4c@$syP<7T}&9`*Gk>l@@uBy$ zWTE&Ah>JO~1qSAH8`koc%Xx_KTUL19$*$++V1^QJA?XsOuQjRCYx75&EGwkf%idQ( zq5_^5=ZA$&3qC%0I1XEb((^5@EYv_!J@OVcmdRQ7?e}BOR}{~w=_RLT-=rnWFG^GG z$C`Xz`2?|+GXr~0ake)fO_nI~gxhr9lFTM$TWOo=j_vQbv_myE$t?j=u zbcsqg3X0Ow4T{nw-5}lF9fBea0+IvL-7z#G-CY9%(hM>Z0}gq{y|?22pZEQCzMSiv z>zWU17OW@k_}w|r$_;SXz8YM|$c^{jc_cgC3su7Fv-pK{>f*q;GN^15&jBw!4pzf* z`I$X6(tEi=&;6zZ>SIT_CE%2toSr^dLPewut_rA*^p_Pc zsDMo8M$;ZYl@}R5NQ(z>vx)1E2NJtUph^!-C;+B#OXgCW{9`?+$;R5Bu5Z#)lf^#8~=W)q{*HY^5GQ|v9saz8H`r7B6*4%flsjwqm4c}jVUTf zDSHng1^eduk}b#@$$TFh-kNmh6v$9Lue(}OhHcXO(Z^qGjPID4btu9-jRfyS#Nel0zH-smAV z%LrZW2t?~zf4H{JKgM`co!-JgO@4g-wP7E@yb-^qJ)k3CDS;S&z&1sg_}mv}iZb60 zm!pMw8y1vn)n?mouZyI&@X=>M(Jrr*b}@ECH{!qMK;K&P(OQYwF>YxczNi+?+i68S zj+GrA96T2afso;ZXWOB>Qr{;pb=#+FXErsgXhBH#sN-Tur*dweb~{}B9d~7+|MVXM z!5AflEh_~|#<^ZZUj5yo0>zld!GB z95p^#hV!Nt*E`|?Go80mG?ON+3S&fE;uS^i>!xLsq7^c0G$a;LYHW3bGoFaIj)C0? z8+6>>+{W34kWaM4Ud)WOa}6lr7Q)cbi2xE(SlbZHGyM}M7z_{Lcb^?8w0yMv%@;G@ zua?KG3dco`bR`A`{&L91B$4Z0urw)^&{#K>FzlFR+&e>i(FP?X7tl^7bQO3V=P}RT z!nnJf;KoxVyzL72SiUWv*bk=Z z<*FNcawD`1WORIVJ4gr30pD-!E+6d^s<3*JF(iSY$ycmpqm-tGv+UG#v9L3~w^yy% zF?XL-Dr$=UlW9e^JR{@njpf+H{_wEVU|_oIAa=NDN?d}wKwMk`@seI_3zQMfbl&t$<$Y`73Sv>0{nH_V7wh&eUWtk^CqAuB<}xfp?}|-DZ~YTeYab-8e!!@&YBr z^%s&oJK}+-<NLNQJ zv=kL^b{aOtex@E^C;!rD4gI){O<;Trs~YHY>VYUf^%F5*XN3JJ^hT&ZL)cqq7-cFz z&uv`4L;ZlH1Y!gWBl~gz;2689Mf3J_kNQZL2b#{qtWyWr8FDS`=Z#lvu(Rf&irzsB z@?*{jh&Qx?t(gk4@G>AjbGythTUV~+)X4#XoPEu8DXVt}#;FjKFv_VWXX7M$dO2-2 z4G;321r|{@IryCFgdbmhV{M3>Tyqoo>tEYx3X-P>w_&Z{+02s8#4+?~`}h$o?3d@y z#S&SL?%h$w!lP!+it&Lp?`?SlPurkgKj}mD7wp~wP_l_D8`-59eR6gh)jsRn%uVXq5@7dp+M|U5T}EZ4 zzz_bKvRR?0M5V=&*+Et%tuzv`qsMQq~pP^5UMNrJ}Xlg6RM%p@u1fH3`o!}&**zTaVti=w1 zKIynxYAm09@BPV%Z*%W93hW9({qV4Y$bfPe<62Lq=n}o<9C_S8JzQW)blE%GIX!dc z3|u9NHX|i|;?xTCnN_w+_YedkNfmUPn1i6Pyh@kTA$7mtVH>Pz|IL~Ai&iqhpGJs+ z?*k47mA1*fZ`Gc4gd~b{i)PT+m^v)A#fI5<19o=Z;dt*)$*1r3nSAOlo10}D{qu^< zrIsrFmvg~_LDuK)(3$0Vo5O{jGx3;7Scd*{=NNG}loQx4Tja}IL2UPwS@Xb#dHhl( zktS-CxOa#w*P9m1qJgC zv1vcgdi`^fE?h49pT1!@V8T_|tSwIf9)f8lSU8cl)GlYCgv|c@9bNxQ@Yc$Tv*jOt z&YC<8(x7H9#YDQumXs*d#sl}K)y52FdBh61rte>t?Cu@Hyq zQ5+U1+wB+%`C1s1mM^By^*{lSlj;4bL*O&I4hs|~F=oBxvd}=oUZYmw*|g2stJSTi zTI}uc{Ib{ATJC$re~nnXd{(TGVC25>RLibSvWKVHjtOendjKP|Wq?d?YIj{ZWer@9 zN+`^X4@P%dd)b-l9rf-XULp_1($f~>jfYtuaW@A{{kWdDrEc9LD@~e3juEiOCf084 zzNkhM61CnuBRssB?R@?Hh^#bmHnFF~ZgQ`x+4kpmy(f^m{PPC8)RLSM>X5p_rU}oD z`MoOLvS`sc@b&0M?}{v0(*_%;BsM&jqG_r@A;qK zR|W(sMBfIu)dtLx7jL<4RN2gl6<=Q4R!1B7-)?puDH$E#t1l96KSZt~bMnkYe|GL0 zGFl(L7%c&EsxGS2&6Sfwdwlyt9o%B|X{WXr%LBBR-ikO^mfPQjY@IwMNRk~~8atF0 z@365>?U^_KvfdlXmtj8f{WZDR(TatR0?9C(l|a-!E!-Su?dWPXO^(Y__B4ifj#my^Yc4{Xe1xPhFRl7XOt|^PA%e{I5pH#4%=z1ZUZa}OS5hPK zhqe7r`NvJYl0M}1a~~V~=jTo8Z8Qg*OC_2(CVFY7x6WU7ZJ9LS*GB>29TO&%Mff#6 zp~V--BRM$x*%o=Ps6!OIDH40_)2Kp*NqR>wyGCnMSz-cu?GsHZ`-RB~hgs(Qkksn6 z-*`F~S;2zj3G?7tV?b@E)1>~jSjiqOVfC!VUAc&W^@}$%TGRG@nluXe2O0+tdsI~bJAI@;|u@P_f+W6oGnf68l6KmCP7j|qK8@+kXfI? zpwd`!I_}$x?n23Swned2w^z)twAc4HN*q7 zd9(tl()GggK59rE9ZohwAf_oMtV0C75mk$}UQ98a)3yY#7Jr&%n;+MpJ+k9XCmvuL zavw;b_-h|4)&u*sA12}d$*(#L zM)j5=e~NfeG%3G`I{(+Eo}g&Qgw(7BaqhnimJKmhIS)#b)C~`^ zG||e@(FRD{ee#Ar{HqMwB!?_KdDtn*hWs{pmC=Cm*Ybr@b{_2r=T7ON1!!abh}>Bj zESe&YjNpOCauR*-jqbKS5?9Wet>@pksR(M-h_gW{4vib{)6t-z+k1Y;zF~ZjJ8|U# zkI6dj&8`3K>H?WuTd80y`k6|-ndr}}vHG8Jr2Ay-)Bi3w|9_PKjj(^|(*LK+{|$7( zI!mMaQvoX7gAx)#bMjyB9pu)YFKpFhI#C|H+A^V^S(<@TQRufo`Z$+*l@v&LorSy_ z#^Ju;YSxy5GL(L*KU9eI9ldzg>#v+?`&39M`~SWfbV9`M1oG_Qq#~x{ zASk9{1I-D$Rhgy>Y1?}Bxa

yHtX@pbjW`%1V zgk4>!HIdcA8^_x<7g_p=2HToa{NmfTLJ(JSjSp3qU?BhvI1WIhLq|sEHm6=LBEia= z8>Y`h*NKS@=o>sT4Za_AYtk*|P)$79-t+z6HO!x&vXX|3W+9Jduzrmxo}~`BM^!k} z63hLXs{(*bHR#km)M=o>2B>*4t(0b`VyY;`Q_Ty}q*?0QYZ3lI)C^=DI?JVjTHpq6 zl>v1Cv~cZS{*qLZuRB2_OA5!TD9!fxtWVxpQ2<@2*0#&mR_Wy)eg9V=nL zn2&D$=GO$4K1lb;u06bu?)PFltkJDSM8uDtmn7OQUEIT(YmzXK6T}xS$;9Ee7CgD1 zifO%6Z=`TT{3TZ8-kKZO^L`KOZIGpdJhVk|uW%%s^^9h2ZYf9ONPNPoa#!F7)-bBw z-pT84e?0#L`MgjAbKdh`GIOW!P9X_ZthRHpi}8*};H_uqbAtYf1t_k)ote{P!m*ek zF*lE6l_n#3508JSeEleN zccs{SwfblH%1VONKVwG?WNX1cgO6PS-~cp-!u-%&0zg>LnVT-7GtC;+#+mz9bPKsPW#yVrKM0MqqI{Loa~te`W@h6>~+?E4})^iSx! zv#p2Ju8ihyeF8VTOT*n>5jQ(}ezp_jj&(=+j<|tDPLgH~SUA>G4k*H&K=OraRPsOCrcosOI7?S5jxA-(>kCK4ePTO4T?(Zp) zWT{m{4kHKBCjaAR*nJehEDn{k{@UaMJ4XAmf3pSuPq2_J=%l$TdLub>WAsQh z^EQ=GmH)lNpc4|Lrb-V9hHZ$?c_=C~^=LZ^hPh7^w^b`J{qBXOPhKl$RVi=>8w zLx60|Z;2V}0n}yE^jYXjq>>62WYWF+!eRoA4)kWVqUnoO46&jqjN)U{{ta+<#En9J z=9%vZm)uC0xDgLPP&|w6gjQwnTX}t{D}`-i&)rna;{2{(y^4P+!X2bg>|x$)3(GY& z@b>LOE=0&|8maRVB+D!7_QE?;;y;Lv{}c6tengd@Wxu7tk&kJ&;84T)5>n+j^n~OL z5Jt?6YTm&n_^g+qakB8?(QnQyF;wP>HGPw1;+t16`^0iHWfU%2X`tN)y~R2`-KFb{ znkC;)f5owiLTqlEpW+_@&c7ihNHcT(YSihj*39c*6t{&!pS#Q`ol2_fqN59ND}lQSm1eT;L2ggC zpvS~=W3qpV%HPYHBEf156J~Yf`N%VRwI|h(XEWOW!5%5Vm7Th$1Z{rq40~axp#1TM zPVLw5f9>Q(Fi?>e-*IoEyR(EZ1JMuURu4eKYh^&@BA=Mq$ ze<#6j(C+W6theZZXw^QVCBCai3-#m}_s# zBryEv)zTp=l#9+Dm$Ma_?`mE1N^O^r==MYYeC%ND^&(c}Idbp+S>uuQsRt`M`O|#D zwJs7GBeH(T8CSS=z!x2g0@A*UN$WQ@{9ji=_=H&ec?Y93E>_fLUG;$h%$r~90+M>} z&(;Oo#}=UZazA|X+eq91A@XX@Gbr8$!x%KEcMkrcP_;>gH1u4nQE2s|B6a;v!z)ow zeQ(Uezg4kGB5UR2Vp9%7t3FOJK-B)a(8?D6R3QGp{r`ew2TxR+*yYG{P<{U$ax2-u z=7lt4-L_b6YEv;kuP+=!8-(OHVG>g8^clZ{yGDAsj(VtPaW{*B{pw-cSx&q9Vy}fC zB$SGGlyTzb{Gvlp^j*ij<41|}roE;gBiRU%tcuJ9-PLV#vG3#gEbKK-MJx9Q(Z`$s zMdYW{pxlG(L{0;y1tmtp8bf@v2jyt!pI6^e;z^|^CYcESVlPt6^Y2pF_xi>a^q@Ds zTWVBa5;*U=dpoS@x%?j5iRUVl+TDvoWFIYx!}zhOM<2rctR`VqS`_NjP+06X|p ztO;{X!rUO~w8ksWgg}TkO#pUf%!R;t6Ip|KrM(l-0O)jfVT2#PM9kJ+lJdn^k;o4! z=qANg10)yl3Qx0A-uEokbglJ3eU^dv^d+r~nD09VgILG44C~dLhl8r@Ku;G(5N!|c{pOMm z!yQWM$RhQ-M3!+4kdph4S3Vi@uTdSkKqIhHlP(Dw*hMA&Bz8`n{EEQj((V!2t}mix6bVG&)X*=^W&>kpQcGq zFBRVzum?yiO1l}UGm)Lkq1#-~dqEkIr}jU>s3)NTeG@&Ul+Xn@&kCRfhtcVZC43_S zfR3Mo9jWMna+2>;Tq{2gYlR_yZv>>AdG`z8e$d5wocl_LG9c@TS%C!Kk@$p_yjQp* zp~CPB^hKCfh;mLd{o%tRmm%Zx#_sp;Vug_mcx+az0xnXv28YRcmh3^+m?Ey5a9Ude zsZXI*Ws8FzDDWaijI+I6CEQZ8Nw_y6t*S)$T?WXs7?~bmW-$qYS)6n7x7AC!XYQ@Wa_R+FrpnCuZiK#LmH6IoBIe zCN<4dZG=o_6E+JT8U{~XxbKf0SAoAYX`%gsKrKxaF12;Y+0{=t%ip9g z#Et3z`MW4~b%PuM=UCtAJKx$pjm7`E9YQbjw*ei40^-KXm&%vKJ}ML#J>5FUNG_67Zn~qa;AH`S=C*2`MYqc*)8CsVfE96LTnVaojmyy#(0`#;3}?NI25$ z8s@$KA>D0u%5_YV?9>}^$;qeC#WDiB1TwY(n@_)f>Hl#&v{9Kbnqp)1exRZy?)EPb z{pm;8wjfP@6a-`amw)R0qQmVQvBPc0|oUF0~@26tV9*^^X{ChP2FD?8bXNKhFYNkLf5-{Wyy?+ z$Iy2zu>-AVo8o({9y5au+_ffetzx1~Cvc07!M5B9 zdeQ{=F~m`hxy#0K`TE|&KUN>oGqssIJ7h-oo*NP{SSIy@VHiW6>vp;rUmYNKBfwrR+%0SQ4<{)vj8j%SjlZ^qkd6o+1IP6q#ls4^e8?d*NN&x^t;)O(5 z7;-`49lU_eC|r2`>!irpNBNVzqvSHD%?I19LXuy_PL{PyVXVvC6bP#7`h6vN`=)c} zm8pn0%c0xaC?4WvTx8K;Ekk5{BYz)o zyq13#%WG0+$7SkbrPut?^>;mnd#vate(uuW0H3_-+kp+Ww zl79d~P^+W*bN2Jqi;cu!Kz7H>K~}R13sC0KH;qJ@ez>%;+ zkQ#r6=jOKF)c|Pq`{+fwHASIPLBoU(gXg4O_|&o=mnQq>8hs9^GmbS>GDN;^B2&QP zHmjbK6eqUNl-m>+f#3e~>%v5`<^7@Vw?n>V8@$FX>u&q;H4hUUrRZNVFNs-?MDJCa zpG$rNORo1;xC!ot$0G(e#jO{}hjNEHeFv!V!qT7Ha(sMuy9t+mvFwFy$CDp*q7ygK zDu+JaPmwgxf8lV)Hd>Z-ZWo!-7>uB^n;g(=8f7^od zJv&-Q!}?TDrd3QscmKy397uV8>$9}c!{7DpHpSLH;>|&0!!(CgSKj-mFMgv8xZ0??B#Rc0&LeP znYc1$9I&|?PRK{*&iKG-4KpMylGLMh$?LVL58Z}av1YmN44&|H<)zqT<`_TtH@Y2L z(7fkak_RQ@x=Ri;)~iN_`QkHu9x!Q?FKibIS_>KJV#=ziOg(nU?Pt6}H)PsaT<-ht ztX*kMpdOJ@e!GX2?{QX>x3xB0gEVH@uXk?<7{Hhy`(o0CD^6Z8MSEv|e>O(Y;gtu0 zLwWN4^UXQX>eZ1*VRBf?Oy%+4{Ax48oOZfF0C6|m>2~D* zl*(dVI{yhIbNbdk8|(!FoxV)nc|Heo#;x-Ql2)6I=E;UIVS&erBwp5;2Uik{@>Gvu`G zTyR>`YTJm!iP`>^1L||?-J*n$GoLaK=52%bczIV35;y_zu<1Rex(QR^U#4)Z1; zGa+1vte)Bp!F>dtvb-iJYwB7^W!4{lgY-3tOR~JJ>B!y-INxtN1lS>(7nYqr@6S0g z(ok1ozHJ~nm+E=HI)`Wn9e&M3&A&@i62Dj7_w?%e*I59sIy>gfo97BrQ)EoJuV z)X<5t+X}W=_I0A8!V*-iXoDy0Sh+dP^JvTpC)tEexMTypt{h4+-IrZ3ASCZ^E;S;DoQ*H@?G$R6=@pM>_)tdIEc&sv{&MH8REX-ub%?{l6!D9 z;56O#);OF|i8jEAgX*?mm1lsJqp8hn@o!xkhAr;L1v<|oZ}O5HyUW>OH})+s0FXrf z80eFS%igviT^!%jpmGnqK|xtF=#}X*cYXde|8E9;{HhATQ=%zkXd z)p3*TBF*q2E+JZZ!>H@vY_{(ewen><#SU){T-a?dPaUYz7KHe`p2o|tqREC{gk2cK zuOW)W8Upq|5AHj#W1v}YG^HGvGjJ`Q?rdGz|F~}Yz8gBezUNx`p{Pw{|11_BX+3S- z6j#?6&Et868Hx-t)(};=jE0obdKq{XwHbMaZ}cWW4td9UpaQj3n~HR=Z*xgp71u_4 zUw8O0&CS`QoQt*VblkqmT5(3c3I%|TM`pz=M+*(#e5Nxxy3`-()AA&HA1V;zJ5ICiP`cip zgKo69JxRTyL;gN1#BXxW^EK?;*-$;LX3o;sZ+y{#X~Shzo|kyoT&4KP7?f3<`uGsf z`RQ4en7cn0&$p{ci&}Qt43*;%mr)pEI9$_!Dg_zQ@Aket{q&yKv|_&<(hoT{X)~R` zcgPg+I=2{UIUC3yv`{SEOqxPJBEiIXZ^is$9hi8v^ttvxDG!^@a6tXx-R3)fvqc4` zS0iccnTt<()ct(4g5oJ@-KP9!=9H!Fy&zLxiiTCU9);yIj&>gwnJ)RWFMAGvRM9ac zHDp`Y$2m@o$@gvpJY=a^f^4jh>H-uXxQ-0^g#+@`Gc!7 zWhHMe@b&arf1S~bill%%{N)u_Y&>b7Z@TsiA8=lz6Q9y&*mh9uHrNR%51Dn%hrVbm z6y!(}#ck=D1hQYo?@Ip&S$1mkcR#alJv_S#*-~r~=Jr}uV@V#?vH@+|r-bNYcF`gj z{SAcqmbQQF6*XKOBM8}Je%`8 zwbie&f+~u--5e(BRG4B2Fx~f+*yITwZRKOA9M#0hyWihlci~|Nmw1nDz(PR zupMtz7kO62;wQ`Q#@#6z1l&!%UEKWg=X2PNTg`4`N)9w7b8eAXWLox}1L}_J&Fi=5Ai{K%4I;;U!M$Y2UFhJ!vR1E?pEY z$;bziy*pWiIJjyLzVFeSz5U@m`n0bSXQ}r`EZHMwZ?7E*IuR3J*#N^G{XF#nQg*`; z(h*=yhOK!X0`6K;drGgV*bVVoU5C!1q7L(-5aQuQ7hZyP9)-AOGC@N2S!pvx9l-88 zL)dP;=iP;9?5V>hW()8(u_LeZf*S0#HTdR%PwLL?eYtecEp~l=FJI}*Xz7B@B<{3Y z7sC}kj@v4YT$~*EuLYI9)~fPm z-i_FAzqIB&wS1KEKA+I#wbk>-%VO2JMwA(mn!4D^K_~D!S z?MPq5Kfr7w*wXj0r-pu?gC&+$RiQO-nI%#78T^Zka;jk*`iAl8Ug+3&6U zxy79fTy#lU+>1$f1V8sORMGTX((;6KDCqic(65fwQ^P0Xn$OC0 z1#~MTS(KV-3l2R;B!j7sl7|BxvHF&oD=Wm!cTWtJwgh)(2Pqrb4pr|SOB*~q2&_7~ ztg#4-A@8y29g1y2z0Q}eXRn+iuN6+@Y+^p|Tc_+iRo8Z%FTC15KY8d3Y>$;4VLQ=( zn%OTSHDBHCZIS6YzGRgQ)?ap-7A@rJ2gB23)Utg*FFCC0}@glbY$i%hMn zu(OeoM|aj%Dr67+^exn^+H!Ox--UgoFv41qgN#XA!*Pktc4knjBDp)QEt ze`vx7I|Dk6`fuwErDq_-uD|Aw2Ox5o#2qzTYoE`YERt?Y*@FbV%E{D8!8v|Ty80`u z8V3C3$vuZwp7`a=3EfH;Z7lr0t;69?E$Z)8Cp!bBgO=mEFB?> z=3Dn5A2S7kFj|^n!@-=A=vFuE;tb!B%UT?)DZ3EEErsQXy!n*=dHR@#Ta*b4o?9Rhwk-D875{W?Ac^1v(VC#4dX@G4xDyls&QDE zWO~P6+%%(PusB{ittzsidHqfl3BIDcBX;ddkD!TcyH?zx5wow@B`eTnv| zjiIByhm{37aSHNSsZTRL%OU~r!&n@=<57c@$kM!Un%;g3HihehF4FR=eF6qLx)NNp z=XuKf@dT1N0<%rrZch7L0VzYG=S%NPr!IeVy}h>sQ*5&DOv2slG-|vP{l&RciyNwwpqjjO4QtO( z3V@~^GCZT-2?OY!Y$i0c*&Tk~SuIJhI9H>6^la&F9XO7bwyu3bS+KKgE$OLXVZFr* zOwJN`Xncx3XeTQ^<0DW>%ugFYZO)+;*>mQCnnOPyv^pGW`M70oCq(kFSNI0)cD>y0 z+$3#yCF%H`zcyOUZwQq`H|R)O*XmWOh(=1>oAQoETsfUuk46%d$WO~$-=5m;juSfH zoyE~|=eWoE(Y$dVebL+Nz(uDb8A2e(Y{c8bIxV`wa$>cbs#wHex1Jn;o9?-AwJv)e zyUvs0gP4A?yO>OZ3dmN;tzhTkZn5&8{87#<SlQ9B^*5l|Z&*BU$*D>VwYJFJp6?^Hsw_`BL>VUJXj55|9gxW&g75 zm7Mh_)~%PzQ{d@NX3~+{48lPaAo_q)Az4E%`39|&(*amM#hJ+|gvl>^U8P?dnH0Fk zRk1i_<&pc5Z@*Np{;=5_$GD2#Zkkk zd_EGo{4N7sVK?sP`|-R9DgSo7k;#(#^;*C63#x`;fZ@7`Pff~Nk*P78&OV)cqw*f# z*O9%vGX+fI_nH;vE)}ED{19V5RBbPntSXpm5{GY1BD8waX?<#ki&iyA=bPj_U9rzHc|R(hR}%$GIWD@59n zk4m0lsyCm%ZuqE#smM^OSNvmi zYc*yO*bhg`bI&JLVUHk6aDLL60J{3cc z)_#Tls3ti~m!eXs@)Que;Z|~H5&gb5`xVJzDI_sqKj`NR(oD@ojcHAn*VsR#1)IDI zUBn!br;Ii|y92Kc7i#`KJxdazT)dNI$-=nkVr1zes$wr_X+?|9yb->0qmF#YIxDzx z<#(&#m~+s08fT1Bn}OJ4x)h3cEyx!~4OQ_~Am0$cn+Y7eeL`B@(+o8eOuX!j$`6@R z1^69YrSF7MSS#ilH2Etay|;?#s@SI(6bfDL)x{<@JcU`2YYX|h&cnuRWK8Q(P2a8u zAJk{aCWXlDKCcCbvXd@J)q}Yk{9x2Bx*m}DB!9MYk24)6Vn(TST&Bm$9V01uu-Q?nMP%J=L;rhPFnr2Jq1KWtlm$_+SFD~l#@d% zJ{o(SF8T10MS1>+NE*NOmXxTTm>aRBvfsx-#kUO%q??>aHL89lxJTqmZ^OYUV)cBN zeH&}cnCPEYGn)0>$PMB8*+ZN3i1p`VWHihavuDp`6q}h(7?mFz@FUwM6j(91ySlZ2 z)`FIn45YN=D#KWC#AbUu_V2S^gY1$0Ib%_}iJbpvs<;$Zp$0FQ)nh25(b;(KlZmd- zu9+YS$j;EJ6K`v>sIbihF>%Uw9lPsav$@nhfpMXCNa>KoPFEalB(Z8K$}1d?TR8J} z;XczCbfco7l`=y^S}2VoW7df?;#rUl?z`(Ln}bUx={p8bqI`D-T!p=RE6w_;7ei9A zuk5V1#RuM zR2eJN85J|HMiVm;A7<-Wi^dP<4v;5wgtkFjvm<9xb}pDlbaP`u1#9$xAEZUp?G>fU z7H_{in|gUz^(8;UXVZScMbmh)o*IM#&kencS6mCuY6l{0rj#mZkeA=xNxn0^^`=n{<_h8;m|zl>2!#{)uLPNZZN8rNRUsC2S4>Q zau7XTLbzHh@_6x54iI~+pYIi>DQFkdaB27oK8*7}EYa}fa z_Z+iJeUAKh2MjmM*xbB(SO3$S*31rTG(cx8AtjqgY9{uzZR-Y$oLd#SKcr1>GgR%- zQ^G`{Btlj5rDz%4yupMsMcrvropU{uAkLB5PbCl zc0#RWL>%gm`*8%rnlgliXy@7{1WmmS*Ed^zdIMZwaIa~m$z&44+1?^HTzkBz%OPct z{N6oVnf)&W7F;RHic9n53lch2OWv(C&D`yC-U=kM1*4H1~0_juZT=R?O!!|N;ur#jyra+}BW z*7A6&nUw3O+3X zphPHOW%vrRC_H?*%L;1@{Ve~rq+pMCSa zZlLa6hxPa<+3a9DiNmz7@{(UintL|4K*0jo12wEE$8Z_-_4H61&vr(^G!RuI zV9L!-j%S>ZWqq8Z;@ok3nt;1)_rPNtaP0~Qoo8H@e+^B`|%vQ^3+l+l*Y23jroHDvzB_k2BD%gHG7 zVh!0he^??OWbtk)_ch>`_fdHTn0N|}ZjQtaioNWSwTXD+zNBVv{V?&}u>RZ`4Np5a zwSkFa#(Zc6@EB}WA{a(Jaytr-RAU=bBk2w0YWhjkX|CQqk?3#s_S{h~&Ok03Ua0wm zDnfeF`cxjWQHHVht|BF=qj&?Nw#Z{wjYtoJKCOL3mx6n&G~x3Um@K+H8LZWtRC^Yr z#R3uI|3FE$sP``HvmEeFVV@d@F^gJc@#QoQy1LNIq!NQrl_$zf#T}tB5>=LXeDUeX z@c*UHBSU*dMx=uUm(y<8$PH((g0&k^q`707PalVUV(keHIeVVzDrUrM3N|4?{#DpbtlFiAn0zQtibd zBH6Lt08=AwF7E0l5Y2s~kNF`Qhc|sVmGOZUjJ3>Vk$+Hn5dE`8M^wyw3>&c8Ri1-` zmxMzSGj~!|h>JEGBlq5fktVN{*0da-8{MnJ2_VSjT*+J>J7xgJ8+x znci4)ow2TxLRxq)zt-257&=tqYDkcNeHitrRoPKr3P76F#O45Ea?FOMS2RRR7Jweo z&-AsFTdPQI)&TM;@xo-|ECZgowUsQ)5;PtbCrffrS^n6e>t&s1nlmjfKrdCPx>G3^ zvG~G)6kFVOaZ6Oi!Owd^d@rMcKhgI6Bg{siXLgolezm$>s4B{Z=`Dgt zUFAkTx_5N_7b2SS(_4JR_NFBy?`Ha{QkAsGdhjg1B(5j5dIo2Ib+Eep9PvJ*G%t|> zBP8N&2pWy5I4>w5fU5(a&Qz^r#{VLhddrPTSC-AqSi3)cxbg0n`j-)u*4)F7&%V&1 zeo@CP%>n1Twk$OILq@(u1m%)m;JR?N%|{%vPA{;S8nB_MZel3NK&2C$=zc_bOc2eg@gcFK)DG?JzB; zJ66<7v2dP!7V%6FRN+>Bfqb2%cg?t?8e;fG#i1&QQ`-%OdIgK3jR1HyfP?%JyG@~Je6joqs|wp+&` zg&LaiY2Ib_$t;7l@jRHK<(mG*?1P_u?Y^On7ev27b58z(`ZA)}76&JW;MIm28oZUm z;O*Tn@+1J2#r!8^O(RN@+ZR)}PIcB5eTSsmujoCJW?LV06Z$*Z0Fwrb=0m-JNoq_@ zx4*)IL|VVEjK7?HR-OI*R@bVf!=rm^C}F5r9DChlUUatr|9r-PZhxF?=2+|cb7 z)|6tPJ8v(>aBu!-C6u}DkU4E8Z3Yh%^d#-@Y^P@3NAIhU_d}Z4DbmRG=OPEsV@Gs| z-kAy6zkb#*+D=m|cI^Wt1bw(H*IE2%&LqHd-Er{Jg9Uur2B`>@5b=80=aF*$2H}cN zy#W#`u1awb(U+KRDz-ceB@%1vFafY!O-+B))L1v@#aGr3W21q&XC+nP?YoB)O{r;q z?L>CPs5(&3Yc{{`sfc{x-1BAXE19sVD}Di5p}l)1e3~iw^$2k!p!Sg>rOYAgk}Az* zn>|S2{0H-C327qG9w0uS8aG)7~4Vm(64jn1T2*huVbI`RzK+vyB?o7u%BR ziJ|X?G|bcsCUJvHr^=2K^BPTv9xwMBydG1EZBF|L@#lcwe?`oMYs?6$+SDYEJ4GB#l@SMUXG# zPVTk7lT!LPY5Ry(m-Ni<8&s3?>fO}~wj%#r6^Osvd7rJMD!LtXUlv|y%LWI(Y9a}K za(0svRd+4*za)(OpHD&3|vW~(| z5SpkLV2Dn;u+R9Fn>S|hwA%o_i@&|^35*?NvMTuiC`OATCDUIVCzQ0z+A|)WFnujY z7anBP(Ox1o8KW>*UF-z86wp2)4`$HHW%1~-S(m*!@EXG1fAA>tVX|Zd%+gLo#+aY# zSbxc8eruxs^}GMY-dn#_)ot&?QqqESNJ)#NgfxhBgMf5*gLH!+-CY}`YlC!yfJk?D zZ=@SGu;IJWM}5xcocBL?uZth%wb*OTvF03ejCJ2PEUU9S(*&Oq&k9? z4JVYp8e?FLyp(#;psSHj#Vn|C9$SXk)u}~8S|_AXG8rkpRO7-4$xw6EGZ7eWewWN3 zc+oyl>HIdoK-5jY<2KO3OS!>jXlUbnlwH@5r$s;Hv&D-A?HINBH|?v74P@!wek$xJ zg(vON`rDOyPxj~t-Zk^AyCo(JLo>|P+ZW;ATUKb{Dr3wrxXdohLQ$rLjVmaKf7`!D z@I=oQAU==Kox;Ta8i7Z!=U7=875trZ>)7Ab)pLYu1RUo=#vfn$$pHh5CJPWmMy=>F zdk`oUbF{S)xgB>dz`W0-%54QZodF(;*Bp$Ny2I$OPI)PTpE6AcR{UIgNy6mU3*gyC zuY_ox9WoMxa2~{nFT01zzBh4l;R!j?=B2@xle_ZIwqhUU!jHTtz*XTu0QZH%h8Akv z=}AbV0sDgty?79YoSJ||?D}lcXx)O2^V@qqSG=7Q)Y|>0EA{z$$64QWx&wR1ouR{h zyCmh+*ss)(W4PfQf@W3u=4&{$bj&+>>V_t5{>#>^JO-H6icI61lA3i|O852bsJ)3Y zQ{4fOqXSb~j6)+$ka}Th_F4@YvCAypZrK!9g6V0&(0BqFmL3*@?ba>>qCX!w2ZAkA z+7!mOL{IG~&Tr?nY&|7I ziY)Hz_=qE@*>&kUa6R_-y>sS4hdPI0;nmyj?m%J-XCFcSe{MS=m3y`53 zy{p@7Evifjal}szC;Y&1Ia>oDe2v&>PKjEzCV0>;gw-Bni~ccXrls#OVQ zK@RdH6$y{?UM}dFuA>?joh~`^IL}@a1QU6h)(bnh)i5N1)atCz5e{bZK}*T~Cb3dX zfJ6gNOZOiK;Irm^4~XJ!Ki?anNfI!7;#Wh~m$`r@I!V3?TVdgYb?<#jDt^#Z^D?zY zfRK!j@NlR~1T2*rpTi|%TyNddP;Tt2l!?+e!oG~8ILX)94lYE3`j>JURf6k7B_9AX z{Eq2zgfY9U2RuDT;9Ifw?}qa6VcPABbfSn6WP_Q^wm4bG^#jwv{u-tP2 zjhG6m&o7CyJztadi@INPw0(A%rtMD%I*%OPxwiDVi)9`l+ znh8gHkwnqWTJ7|QOd6EMZrNjN)-P_AnZ4)l{H>8k9oF9la2+zJiGdrrxn2R>ywi!P ztc16|j&?3MDLl{I&m9Izor)0tb-C5VP>(xjEebvU{U5I@q=rqNXJ%k{_hq#6oEV`J zY)H%8u)G2^c*l>4w)r{6HM7ZtzGT9ZYsS*+&J(c~2YG*f*N*FyuLJI0T}G5go3`qp zM4twKC>*j1m*kI{cE)r>a**PBujgRrDl*4Sz4D=X_sQzysNoi|iO!2%*zO{R&^x{I zd#*bd5BIg2<8?zH;vceYq*apFb+ms5pp{rOGSpn)XjI4YFXyxF&Ty?ouW|g>|-GXQ7Jze<5N8PBr+y0 z0O00#>`YVMD?@GB(uAX&RsP{tBgoxbYpUE4snypSFwY)8R=b&5u+@4Gn}F+o`=Ype z@$K;yw*4T8HoyQJ_w>t0=EE=x!J$F0fFjUV@eH9aLDBog=~!BzgJPE6j0X9$elZVM zyED7$gE-2BZsC7T(f>k4PsxqfGd1_sm`Q7Bt(CXS#jC;46T6ujmMeN7K>$P>bf@55 za?{L30iRDIIcjlD&D?_E`&)%q-Bn5cvcz`^uSs@^(+{=l%(G4Hw1>Q;e7h+EZI`?E z?w<+bU3F@7Qw!d~64x_1+OITD`{)gYvJofYddM9Az|NZ$4~K%!g)*0d$+-t`ZJ?tJ+MwenUs z&S&33X`2x1oW1R^aL<_$^k&MtK<_bk_^dD;lZOnx{0>1+R`vIlmG~+{{ zEu|=clNO@A8f^m-7s`jvHxV`fAO76~VA}XlCcnUSLYuU4)BsL=6G}rgNqe^_s|evlbbX7oK+;^8v~rd0G?N?l(U+!ruTtK67~!aWNn6 zyuoQ^wN*|^$)&ZSrmLdDh#7A3`HKLM>aeKM@=nT#+;AolvPJCIb)~B%DR^F8VSbFa_dd_B@TPdr9^Z1WB zZ3bf30BB4)UgNx2WIKaT4!Dm$$qZzi7mt_nF#^zNOldvXP*Jlzneuk{-dN{&fzD1r z^%O>*M=uVX5dzxJ?p<94Ag5nM)j0WPfP%>p=jXK8BD-!cgIbH5wiY3i9`cs!sNw@8 zbxeHEKMmjJS)ZQbG0NnpqziIg(B4P)l4WSKQ6Y%Tn%UmJIXvn%D)ph$we)p}^#YOX z6)XT*l%slT%Qo}zNV(Ms0BudTB!i-#`Day$^>$*OItl-!3K+3`74;Z_FSh=~EF;J8 zC`=!)wyRd?|}yiTWO8?1VQ;VmH-g&E6J!QQi_?aESH;=Ijyp1S#y6 zMaFv(<(P=zaYT+0s#Y^yUrg08C2Z>s`xLl`OQDq;c@{86Y3l`I+k17ko?;-ynBL_W zgSOHuO>`RiGhcmY_5{!OE?6q(M&nb>zUNaP<6Y)z_5&~-J9DT=1}VJf2w**3m2#5m zSDNLD#P!b!^Vhc1#0fPK;rD6~3PWefb66q#8)q~=EWgFtwPfZ$4qF_PoO-{9bl2Vi zU0HTT=77!x+)-jbEnK`e4wr4ll(;>J05@uUw_b8S$+QWLj>nu@MC5i#eK^`-ZDx4^z`@DM(Zs~yVm|5_8RP`xgCmcwtB>nm zh)}ea?D^nj**rUMwYPLAV(!){qfcd5iI2Mv=iVgQiZO>{s-%GP1b2cWQY2~QDKxnX z*87xcce3Ws_uHnyV48`;I`E<(r2N}C5X{@vush!p?gnRv92Ry30I*!=6-()Ha5%8~ zgD6{4l?s%~#|TOkr>sxwtVQZdwT3nXJug2@RkLv~rpp^Uh>JByr=bRMmyGggvBBGw zHJV9Q%AoiR;v<=ejJh@!xX^L4RMKX7jZ+ zlF$A4OoTZsh1lk}|Heis*U!nHT)F+i23vg3Ieh1V zup~5C{^P)_BP6wGTkUNS%C$T0#$-3}9TTF6sz2-EOa-@LYWH?vTohz~?+PLh-fKEI zW8l4zF!-#yQlvz7z-A*i<}kOt?YZ-VD<99>)xR&8kWi2=$TzA9Ewt+j*)K-6in#6| zGY727tG;%y+)!|7+9_whcjb63Oj+>E?xbJ4S14wnnPsS9{+55lYgLakc6(rDdvhz; z^Ix;1295Ztcl(5xeqZy8e0~HRp1mBBkMue}`!fBR_6gau)J8iK@-(?X8D-{ck1!n5 zTu&SX+vS4;$?>6iI6&s@SPIv2a5|5>o!;JF$X6|Q3WJ>>tAs0k5QIngjpJ7WL2Aq} zf(@RYMS!EvO?joD?2x?o{ziSUSd^)~^J}*&vUf8=ZFQ`NxTq3$2t-KFv3YGeP{g|xYZevu@YgtdCo#4eW@(@QY2ayGQrPmfqB6JnLKz zjP-+Se7D&`8p=LLdSXrY-z&fPL_N1|lfrtgD2ei^fk=zJ?8Ds3iFxzJB;jKELi47k&Q3#R91PvV}^ecP;LDyAJsIY#t;-NbSlUxeW9zthSH zADge$|l$>}yM!GdthwWM$ugg_2#l%^eU(IcP? zsQZ!VB(9GT>f??EA$h*(ID&z*vnLzRTObsnrp;pv^zC3dd(P%> zNs3;T-_S9{=x}<6<)TchdOLel_QGsWlOKgzJFMflz;)HMpLsL4tOM`1*#icloW$oI zE1Q7|DwQc*>S&CQow<+Xx)FoMB4J7t-F}cz%UO^|M_phr9_eCwcwY<#^ny*7`trtM zVkg-cpLD)($dGy*>Mv7mm?@`e2Zu`EV;HcD7bNEV63uER`Nw)F*#Gl9N70z3`=O-$ zu%X-b!c6w5?*47%@{M#`&RG-li(=vPB5iZsp;uqXbB3U&?(3++;w259eae};oiKCO zcV>%|<>yW66R_`^K>JqPz*^qHBY|Z)jB_V|gu|2cb>it4^*YfJKAHiS0r6TAUCc?7<-(Tu9V{YiW@uu`L6Aez-TPd(?K33VEB5rAN zdnRyTV8E4*OOUw7giS!ZzxXki=PXSI3&r{)9~T*$XLNMgy+UuK%#@nLXb9PHzKOT) zI^NlR^w%@R_S$7T19oGHsNf!|Y=RC2AvVx5V?*hHFeJFw?E7P7|C78qt7_Sk>AtIXNA%59>`^mHcLL8X~mpc6>TMi9+ zwP%-^8=zgYYe=?da}x2JD-e852|F{98d{K*1NFvfi7H*}h$+oY)hlJ_uF`>bEE(yj zN5`I?Hhgwf!n@=rD}x65=!J&UJR>t$fRa9t6r0_;J`EmO57pyoWALH#5-Uvt?^8ud zIhWan&MD?bK>s4Bdfb!_4?e>)ra{Y3kBWF-(n#d^{ntVjAj!#J$odFARpgPF-;2pmAX!zqOKXd_zhT~0^PSnJI-}!p+J%;g;kBSg zl+c(uw~$tpHG?XZ?Ial+f~-4gCN9cZ<8Se}-`057-BxtS8n#9oN+=ooBBWz7y$6RuLWdWhZ`GKZ zUiPKSE07T^rR#UT#3p$*#1yV8Lk%*8P|>PoF@*O^IK;*TZYX4Qko~DoLH#>=w04tQz5=UZ0Ydvo1&pSUx`)lCIN3@^-f<1}W9qv!~W&oS} zK4(Y54a~y~rbr3KAIpwG!TeK=F;S)%3{f>{AaI;{AJ2rfyYGB= zucAz}XzNU}Z%M;QJHDavq+&_HGG?4G31{5W{Eod`Bep7EZc!tJ`#jCbshp88quxzw z`lWJ(j55n$v!3~Frr@}%Q9GR$YU>5Qe5ry*!iidYvhaG`N&3wOS5I`_d8*bs<&mnC zjv|uSxSf~Xuq)@lk)Ab^@zUiyhh0^>!<_5)UnM%H{f?b_erV|Hcjl#6)OxW?o zKo~>*4?(P4t8$uo3p=}T8zUKw*f`>yW+UrFPdUM%Ny}1`%~66t`zxN0>cR8e8g);U zO}pxM>P|+56OUu>_b5Czw#T>B8ia+;U$}r$pvhx_8<$UD=}vhIi3J2z@;ZX$EO$>_ z7r}bO-_k*IhxHF4YY_@o{}K5iq38#suE0V$GHKt(;U>d8M6Gh)DT*iuL zL5S$|?>p5Ond_?@MZx2!@V$xj!tR#oGasEQSe>%=aM;Mvmwvd0@pApUnq>+~AOa8~N{L`qYMUtT< z^K^H(Ba4ceCl}6L>G-NuUBETV%NH+ohQ1|o7d+dSjAR)`WAZO88>H|}h8p8$?&^?K2+$Y44LW=VfIg++OL+JysuWVgghukYN#O7-Q4Pe*LKEviYiI? z!y8{TSU(AQHi3F?XGlJYBOEcOi2;90W9fCN#*G;N?}tCk^wv|T`nbIu#s1Iv5yD17 zv(m>HlD80dm`xF~@a9V$lo3bc->tH$qNCB!A=6yxoP~|EB2tY`=%_({o)sny4{XtN zt#xtS!Zy{BJp`l;=oA<`z$H?@B~XT|ElbPK7}~zR@?>*7(qi*43r&q-^#wdx5e}q= z@Z-DmiL%0qGQv3eICeAa$3$dbKvi2{ zP~0b7K31QTvab`jR`15yCw17zUX~$p!Or+AuslwVr6Lyap~K$`$-{zBo_0cutS@b6 z(trFQnY#7ETzJf3?=^z^SHuZvDC%(!$^LhV22|OF`jI7KhcCczCa+_Aw#JL%2XOEC zLTdZz$nA0-?MtY^d2+eGPKEBO3F?4M)M?c-5ewl`^J1c<#JuNA3csW_zi!Z)GIgYk z*wnyK#C#c39ybh{zT(-S{PLD(v`yd}9IEN{wDnMIKRL?F@Yk@~7qI$&30G@zs7$q* zSV(`^!=E)bi?Y7)-QJ=#(1>WRy(d4nxX*+qO)rLLR?^)^^#5nMYr&h<`LI0WkSt2y9sQsd17eIH z&U~MmC>i9JMTIYCXV#K$l9I2N>h^VPATJsAGnAE(YweIkXB!G`7Lc=Mw?-a$`d16s z9~ehjk&wv$(@Uyk)(Q$8L1pJlqZq74tz`wiB+BsxPgDMF!I<|^Ta}`=f5)UE7Ha6; zOU@CX{-^cLqJUMn<@#7=98p2aC=wR=ub10|PrMbZ^`t`kr>4piO&9j}SpdK7=bv(` zKaGmwhmKTk37-9DM}N%s?}ln5^sN}a6H)%n2K~0H{Fbpq3aY^>-=e<$ec9ha{l^IFZ9=EF!l9HE>+f9rhiJd$ zEq(D|NpJ-p|J!;0sr~ue@Jo0w=Y3zgCH`Be|G4u1590s#Ttf$uynI1{phy$B$FQ|Y zUG>(A|CFoN$frod3*3CIo-gf!zeB&(*&XbxJIPr9>!VgCr|8z+I84wds1>hvLY})p zVXg#Djnj_XGxfS;QC`@sE^^1+V>NU<2%FsLEr946*0Q`kFAa7ppDZ9Lo)Ak{x(t>4 zk@=7z81ROKInNL<47v#7mo*rXuyJl3Iwg55_MZb~;VgB==jxXkn1%UIEOwx$zo0j( zk#;im7kdm6yOt-S?<9=2r%pq3+#+^k)S$ndZuYzEGo0B}M=y1gt!!E@+00=YHrE^6 z^)640+DTEqOyxS#aeJbbIt#t6n)BeY_`?9(Fvt0>>W&5;hTh<6nJ4PIW_c8orm< z=_h1&(!|$Zzm`$kpn5SEC0XaaG)A2hoX*{Lo*3j2#oX&@eCFwlvp@Q8` z#LzI^ta7MQGW85VO&S(H9BZs_q^=bHw%t^c71Q0pOPrCp+5g$}p`k3`6x~yNa-!UF z@Ytm{@BBlVTS;q3YHM;b%&9XCO8ab~2@Ov==kPk&BmB#R{V^40 z>5m9+Gp1^d+IANHjN3UT=1x&~;}U%9@kSA;$i?&lN_SIQWG^*7hkq4=@E6jeEutXj z?P=ZyjYz@9o3c9ZvXhBQ0}L6aVxYLZ=TUS)TdZPyfd)(7shO ze6g{kn_x)UI6;D1rKOg-84TcT+>0U1pek?EFM92u|zut>^+C13U#(&?!2Y44=OIWV0x? zl$OEUEot|1@7(pRRBVHjba$bVnVMR9tF+4r-yd~U_x zqahw?Ba_8W!rlmh)mOOUj|>&=*~4L1IU&=H_mL*X3JjvH(lCWoH;(Cj_~oND zjn-wpc=-kva(zmF$YHdz&dZ+1u;e2o@bH`_;8L;~NcZK- zNh%j=hD5p%mT$OOFh80GW?~_ExWo&VMk9!=O=kh)!Nu^cZJA#&i`dnK1#P*ePvsZ*X?)U@$2+dsz0rB7K z+@f!4t;9LEUpKO*`{XXtkQ?U;*eijpZ4|uKntC6*J!oH63G$4U#6SqZ6f_|pA{^lJ zX`A7K#NgsRM*n)4n4dp!whA9(I~PrB6fvB@_XwCyZHR#0NrsHjDy^o#OZn@=TGKt+ zJs3z^s<2~m94-88QyA3TEY>U;o9=dDWucsXdNUhJ_kJ8+uEoGg(0=5WStoA6t_I@a zvBXGv_QFzhNs~EsIz!L^Qlz##Z;RPl0r5RcRGZc4jiXH<4$M8fxsBS`)=0tLf_ZSf zmXW@L?)8>+9`Ud4(!$TL$sBd78l01J*jIVUslF|PDqV@L=Y_?3exYQcWr`F26T%-f zj&wKQYvs+rE=bmQRiG|go&tofa67JL^P101p5G9RtJoPKOjRE(kDvIq6H$=cXUyGv zWIg8;u4y#~PPba3a$e2^!o%*%LhbK`4gfbMQVWqAZZ;NTQ2@=|ubX@6S|qwXrgYen zoYtDkc;Br-AH+ zB)0*-DW6Y09R!zQt;sU>n+xP|C0UBgg4ibuDwt4z@5!rBmV--&nmHgUzIYWBx@G{w4aq0^QZT~u|Ckn@S z>d0;t9YWfVb zYV;y8$3|TdoM(Toy+D3sEI`*Pq8iWl_@|08P?FDBC9xkgsgaGgQdM8^cA=$x+Nkca z`sV&IgcH(Mb52i9vq68Zd%8H0y(IhSH)4vU(fSK?;U2L z+@pK_!XdN$X}kR=S%R2TcOUUhUkjSI6f8Bgg2U})IRb=Qygmycy@t&F@1@&fBOp?avVTN)DaT+jy* z#QZp_lOyEvFkc=jt;@NPyu*NFG}IOJ`dZvSeb7a}3v}-m(#9`F-)r^rN`ykkSUQghG>NB9Xsp-G35W0J82LYzw)`D+_2&_+31 z+P5(2BY4P_-)%0)`AU|0XnmH#`C zkk|YoVatPGK126_M?!U0vAnSd&d(jGXME>;04r$MJm4Q5Xz6oD1*;W2RPn<38eh6m&NmC#cx@TV0N=MnDz*AbJtE2jwLt zJ-o-qst~8OS*ND1V}^rtm*`=Xv4-jMvMR$16~37`95TG+g=+EF*&pBL5JH-6BxJv| z2iBEYpJGor_eFIDj)x3hqAjC$~Cwu)1|CRE?uKCvwqoqaFI+)bL^HiP1L3J#@l4VGwLSgz)N^F%esOg zlH6xo5mH<9YRa_!Sz3|ijtA>(cz^#88OQ9dd_{UL3hk#_RG;%m-h`xdq8goLZ?R>^ zi$X|RkjbIlu+Me#x5&00cHQWfqaj@daAYKdbdX6XVw8WXvX76eN9T*8rlPOGp`MORtbGjSFw!R5CV zzy!lA+g~Vnl0whYikPgyFdVk8@LF@gAP`-@LksrCvx-P7_2`Use<)eJd|pKxwD z3TinBxd%dyXIOWa9Ru1`niKlT6&4W5zT0l>mkg7AxjQSm&xQcjO~YO7BTnR*7s`Fs zHYj#2Czh-+?P?TG37zLr?+X_#xAuJ*+JN)ppZ3w^y~eLj8<20N89Q|jIg#pl*S@A2 z_L|?QWnKfWefaPEzjSI(MO?lKLU&qkW;K0y(e1~u3xNvK>)((FpDt4@9zEBOglfCC zq;C`EPV%s;d54RydB^ughin#eOf%m{%YuYB1slt@S@fyCLgNpVCwu&M;Ue@4G>n&M zNKqcWDXBI6LL) zaTG8TT9x})sblSy$$p&==G|~ElmVv>b6N|lpT>2;|3og*$TI^rKTMm=P*o!Cv_nb9 zf4cQheATqL8e;V^ZIMifdz2~0ihlHkrcW-LsllW5N= z7x`6C^Yv`az_&_x?uGA_d@Dy)I;R_bKa2|ywrJkd0n}U5+H7;)zqW7h#A5~zT<%n{ zM=>l0Vlxxz7G((y7YVNhN-*CLAGqA}7VEAY!@*ZoleyQTah{Ed%z;|^&Z?;$J3GtW z5^zu6CMpK(HJAa828H7JsiaC*g}QA8EpM3)9aDBG6-^A&E~VN|Tkj_?N1J@cFSALw zq}nJL_Kb>TjH2DZ!r0=n(qI2war-OOn@jf1z+Hc%bG6Bd_w=)$kS6vDN?!8oA2%|Z zsK~2RFJbnWG^(UE=es_pzoyz=LoBRioGqW4Dz37mo*Khdkt+yy)eAezqPH@G97 zH)=WXY?L3YjA}bCJ?>Je>*4!&xOvu>v&C!&_pyM?5Lnt)0dhX?-z~=z%fQ3oTFoQ- zagW$Q<8Wfp(0b;3r^9c#te~NWIoom9G`YMtPc^_}JikOwhVgwiN!M-WmN>J32mB$U z#mpzN|HjKEy^yT9iE`n;WBGSfdN*aPowi=7FJ|=(OAR(r0{h}m=ykNte40=u6`=(9 zim=c~U{t6&SW~tA{SloQ9#VDqBe|dXZyPn(CffuZ@;s*^Pfrk}=L={s)9-X)F6WH#wv6@{{Y@Uiq}NW5>w3pG z+Z1{7Iw*WJVc!rRQE~P zZouxsuNdrdPjNn;j;DL#n>|k~#c-N6aIpocq}44hc{~$1#(4(ama8?Dq{hGT-9R%TNVUJWsOkV8_@;&ou5*{@Jfp zzS|^X(D8qw$r3#O40a;B(c%FHBG~(^}@<2qD41 zl1K>kNau?CDQ3Y<1C>RCZnEUGxqH06!Mj@desAGP;jugzq_Iw5C@QCCBwKjFzQGfG z-Amoi(30bIBJ<;Y-f-y4T&dG^(4If0^MNC(Ds&fdDIorCQ1VBv_7L63%%iCjWyA=l zkE(x#_$^J@b7->RvZmPf3>9`!N(jotyrk`Fbh=Bv?1NTV=xwGLc5za$G>7qTQA@cG zEqSa_pYrIaZV-I}VAj{4nrs%TrD>c@WGSdp08%U6i_fzyRXeJlU?}m`FKY0Z>%Ho) zA~HU?g1f5>Y}V87h*W#wk|c@M-w3e(iC+Iho{9@(q#(P_IRLc@5-fw9Q0!I$ghAlN zVH$exPN&WsrIt@CCB# z?VPtd9Nb)8y4&%Nbrf-$1MMO}t(y?H$K=k_U-^HAB>!eC+jpK~jvFv9f2y@Vr)uvZ zuW&{VcGDMh*+#>EW9bqyZ(#zh<(YI4N=~3@_u3QW6tuYy^3a+$7#%2uJar*m#5j!3 zq#G;&xtV=(y|@ZnIQ=Y+xe|!%$ZY5^tJfJs6RgplKaScT`x&KRF(t;ElgD`=!x8N5 z?Z3Y&4?SU*zBLVCbog?J{(hoKn9t*&qF_lk5+jVZb}vpY$ubk>ktt~D2VRZXS|oZ|*-V7S@7^C_Uz6a#&z5MR;}RnTarNTL!6pMFhUW#!y}RWq zThG$Z^~>4Ho3qQtaIn?8ORUZqQ3YsP+Q~8gFI}vf@+bsUsk>%o_mNdS0~cnVF>2w3VQe58Yn`fG5pDMYxnU6?}C123qsyczxnl2GPCN=N8lu-wGpFf2-m$SJng{YmC zF?-72)bnEes~q^xa-wX%|Nfe4BTqMvjC7`2AjcPZTDVM;@C*$$TvFW)?zL~b6sGS@ zRzlXomf_4=-s0|O{;93cwQrkee9vaixOY>fGNSgdQVnqG?{ZG+yicY*r`x`ACPbQU z%ubdIX5U}dWn$|n!(GtNW5sJtE$MH-$I8AalOGOwy4r?j6IUSJ|Cm$Khr_w!woU5v zM@ka~&(SReYUrTaPtV?eTl0CWqZrqxEW`xin(dfEx9q{9H zPWsrXBOsgf*q{_3d#sEyx)IJ2FRw1rtG=ts-+$5T?Hn9++#)tov3me<3YzpVMyZ zsL$s^W~B1E_LvCl`Re02f~eOzM;{>FTlC(?&8J^($8OV|4)Dcx$aH2fDFDUe@u_hr zpvArOdpwz2Z_T9*_{aCVG`ES7WDvA^+Nh-=7qX-^R^qhp_F-&oLI1dQ)_xaNr_6O^ zYxleS-1IBb5ESJ95dIr(!3IxHLsA6)Pg*nN2+Q_6j2Dk?2h_(Xc@>5g0$L@19V zWbjr?Az<&DgLMb;RQ!Y1HQ78}4n_}NUKvKW>~pj7uWGVU?Ol|gt@chjbJ8)3FI!Q> z+q&Im ztB06|a2K9lQ-+-NC1l1D3FS?*ebjBP+HAtc;;rPS=DcmRPn;aZ#B3DaF8-m%v}wGq zu1}2uXBNwO!vicTyV+cRN`1J}owqJ_9oR-X$AtThmac65{AEe46G$-b4(f()P!Bli zc2x?LNNv}>a6T(h#{UnzKnJ5~_!XA~Gr$iTm#i=~9(ha!?zR#s2Px`t6dDfHQdX>+ z{7`RQ)=zn(d5kLFBPz!LzvM0CzB{Cw&>H=o_Kti17G#*b+5J?MvtRxdU(+rpc-rS@$zQxp2IhJokf@&Gddwfj zriff&rtVWAYTFRN&C@aX<2`-H@x8(?%Kcw-CYj|<7a_E@Ey@B*D0|ALdskhAj)oRf zOJ?TI?=Hfs?#R=Q0|Qf*B2L~L%;=bXtIwD!c%28`W|$|q%icioYrme56D%~VYGF0x z^~HU#+oDH#$ElCm&Gihy`q`ueD9HDvz{a89~^> zt<@rProqjma@lrU^>C|fht)ETTOz1UL&KRn_~Zm@trr0OtKF_fe9Y#!M;l~1ZMh3qo_g-I5@BpW^(!yZ!w}P9sFm;o6 zHX0L;F`$!anIy!=R9RBabQf;3 z)^?%r=#aPJ$P08CCZj@_!>U9dc0Xg~y<}wDT7=tr4>T%OtzG;gq7Mg`bZ@yNBX-_1 zD1U=@r&?HmzT=D!(;7fXEZt<;%Q_GJnCWuWWY$uGq0U`@4nskd=E);mz*@9B>)Sd_4i(uqwbNAaq zy!F+axR!dDU!hbN`zS4$R^c@r@c6{o<-Sc#mPk~=qF=zX>t}?qQJ}A-X}-{S^4RZr ztm`Ac0~$=U(0+k+BI?Qty)wz1_LZ!zTAP+zh;ZMv2?0XSmDhR%3|>O zpH=?>&u5HA^rRi~bJ%cDn@WK-F2)4#1qeI{xPSTnV* zu&c05+gxxnGi~zTvV1foXbt;m@e3Rr^3Cq0w&k_~a-k+!T`vw@Ao4#OC{%;nyCc|| zc7a(1!tK)VqEBaPrmtr`mWf|@>bg(<=;q^*blok3xpNT;25hx^f3@*MWcf)GL5G7H zN{Xie<0X`7)@dP+%`5~-Wq!w`qUvw$R5tbL`JOd{_PusjXwSFPZDls znNJX=j0MRDxL}(_@VTSGm*3K+f-*gFs#!8J}^}Y zc!dc$)7U}vaGc6s@J6a}YR%I_vhgCQtK3JjPoa;!p5 z+2L;~j8MG&)W?vH4G*TD+ktIY(^I)!B0PgHzb2mgvQtqmjCqwp3R;POZC?g5*Km$C zlHTNF?wGaa6LVX-2>}-k!~50`7H1MZORj=fXG7t30XsoZ8Q2cgXNU5RIDaJ?vJ-@< zOlq*Gn-lN%XXb|@VDhe+wc2X!KX_qf6CNFji+E(Ylj&pA`zwAM2d}Crna$lT%V*mxVQGe>Dl@`81OR< z;qGA8_h}(`LaM-5wBJCXZjo`Z6v;JsJ(x5Kqe5AXs$7Xl>wjCiHNQZ?hZRm5j93Wv z$fjo0;nMb$P$D6ZFhnax^1%q1KIX>-@KM}TBuUYO5DU85avHIVXZo8lm;Hs zF3a-i>wk6fiOc!bX7~wvzlV2TQue`#7MEdpN%8$4JbJ+~1s|)HwRHCBj&s{bYtz=q z%7VtB@~6~z(EMjswJar?(qwY>3@V42FfK&ph-?RKBh#WE$0utF(2Dd#ZmF~RSTIwD zR>*MTj{>cTS*=*~WnxykAO3@cT;4&?W)Wpj^5of~K`+`z&VN1WoBB?CSW}Y{D;G&thD0eS}5Ak^45=LQc;$QV1*X0 z58E6v8GsDOb%U<_f<10e zc{tH|``r>aMzw85y!^nfO`ySa!(l-u={LlU)yx`#(GC=;m!&-pDcG#X0Hgz}iA;Jd z$ka&E0d}}~()F(;jVhWHmlKEU-^_4>R+(=NeqdRz+$c`GDRl4w0|Vhw`hWa^iIb}KP0r?y(au@fv$qR(}gaTE3gEM%5_o$H{(f4nS6 zId6y)DMN^QI2#QHLO)^e?Co;p5Yc!sURQs; z8<5nW4BLQQNB{9>7rsK#%FANil$7@cqc>9bn}h@VQSRoiLKwr+%rs1f3>od z5L{Tjc7vS5e<;e>3|zmxqt%aim`f__^<_>aeyQEUw5&ZXgMliA?+IkH=oj+96RX0l z!~)H$ww-s<%m;Rtan7HISIZNvg&hA(zdHL%7*T;C;2w<7n`^+L9o==_?SyYx6>Fil^7iD zafW!WvFewni^6Foo$>9H@Ag}-MfR+`k#7|{R+mCS^9u6VU`}p}w~CHSPI6=VuDh9T zV`}a0w~bdHHF+QkHwnGVo^tfvM%u;NPLstqGo4N1328)xxg%2v9XnIRj-iqgf);FF zu`38N2Hj6OTyhL=efeEz`gR`h@MyPuHl#D#bi)yFoQ#A1V_QQo8tkXKO1JIM)PF*p z_B8S_`CMT?KUN|qQt^&&^pt0#^1h`j4@Yb5Ew%Kq-l}RCL-aCbM_H!SLQ{FdPSj@> z4+k|pvNQnRzlSS(8cx=yHC|-(XCa#et0I#D#BVMNo&;c^vv_&TKDGKhincffm=n%Z zQ$>ijdr+q^G=6tn;3*GJZ*rl8WxGiQd|51*lVO z=}lk7L=9lPSzkx+x){Gu6*l@v?#aA-x2zmHRQUKfOjkhtgO66kA$xu zKK#N|c}|zch~Su>dZvlagpvQnkOJ_LVlZ<#VwoR3<8u=n>vNYf>nM;ry&kx+|GMv zO0!P9!+R;~DN4kDA=Y@#=S#$yB~GIWMz#fY%Y|w=M*Bk#P7ORfCwza|mS^NW2-sK9 zT4c``u&S8PiViXhLEoMSw|a7(s`qS7)J`5$9c8a#-jrTW zkzGOBi${U|A4dBsM*EXHA!frr-DAQbZye|y-j6Q(Z<#?BYaw3~=x1aZE zvA>$Cwj`tX@v0tYZauoN(Tg(j*rnp&YHPvf0Uk%{XyhaBigoFd)0DH4NX}3h(kbJn z@QT;}`+XTApdtO+|B>B=I6TU%YR_rliDb%ev?o&%Qdo)lM zqJpyXG@}a}Vakwf8FCN+`{>lu|6HvMNP2@9lI+K&aVO8j$Sh!`Ovm6;J`r{1G$0qC zS8U2O`|D%t2zqkjksV+mp((@p%Nzd9&GM<0n{&yI%SAxjcT@06mZ_;S6`0S!3T&5{ zg$&f27`)Lm@zDvpJt|s}{pr$ze{+|u`b4w;8Aeo=C~oxji1t2ROK5SvCg9>FcaOwy zc%YqLH5mYP2mkn_npnW>2i9nj+>br5}kybI@)<{dE?l6alJW+lzU1Td)t8?!6BNvc{vm zfz`vrO`4< zr45^AUZyPFo^vJ&<G`2BO05r9XRGntEAMIGelnCr|IL;x z-v-DRXx}Kdp1=ltTDRSuJ6dg4=MpmfoX1#ZXjj;(Hfn?8xr?i~3^6O{eG&Hog zv{Lsp&e#v9qvN?*jnDj}dqhHHQm0F&A|CAI5g$kO>>30qPe(RT%_^Yg-=!%UlfgO45dRA|pKE0GK zG1mn|??VL^t?kMZ{Jwh^3~G;zP_IHh5oy6*J321Z9I40OmrNboMW}zBH{mNAiIC4! zAiKZb2t=IowHdxjUCATfswX}wpEvo3Kraquv35%I643e!zruo;@u8WDTtV%?~$QW3R5928UFiwG65P$_Uw5NY?xDR26EkBKM8r+!w}6nF5S%Ndv=II!HK|7 zG}y0^g_2Ol#T8A*z4eHF-xtd0H$XAt72hns=D4w8wzhG{mO4C?%oBQEni>%=|2{hL z1GQtX+o$;j?zE4zj<7uvWJqBY4bT+d2wRp8ryEx#R_gBDatU_q?)UG~z{^bDUVeu+SLXw`Xd@YPS9d%WgfNo13x!UnlxMv9oY#N2bS+x>j%_!Xxk&bRVJd0wbMEzl zdjj-wNUM3Tv)5GOVG)DA}Y_rr1Ga?PvGtRyze>SsJCviK7JVA;+Ou%}%M0Thkt zHF>CGXe^!lo*Me3YNSZm{$3{y?(0_vlk#9K%OLnT=`N3C< zg^ZJSSk$e%@VR-cM#P2ImZ-Wq-lv0ned8-0p3MV$!5KH7*8+uv_@*>fO4n+oBtqOC zILv&J3fOXe8K%@!U|YTaGPx-X=;kXMOBQ;F7_E|ZxMEAm1F*ZuQHVXoMv@9Bs-Z^Z zmr~Go^KQkaH#b>mtoGOD0z{6yYutjq&VKA~S07zK`*^Mu(HM7U+V_kqIGU8BLP5^x zicF5VIve~Ig?3y8wpbv1o48=|1L)07;_pPjZEfdFxrUCEl!vc5Hu(xO7gXa83G7$? zmUG1kkc!cYgoqux+rhUQ?q%tw@OAuWyDF==+5(<9$n;9@ozCFjZzy@6I=;aqFuS-w z@mLz^nYnx19RJBwI79JA=z-6j>cQh(4S(Uz(&?BOYJsso)%LEBDE-NjoppaDeyz11 zAD@mZ_(-A3>)5qA2Ta(5Pm&9z5EK81E%sFpRiWIs-dc$$nplY)sGB8wRH5d){kH0< zn;L&^JQwd=K*?vhl5`94hdifcJwaruz)j>EpFYd`{yCQe?S&1iia_W8Vyp#H<;=Dj zn-FwQHdGiaa>;{L_Pq*ulznt(6MyFcGn&S)d32wym}8Y)m5UaTBG4M(sUX>2wz2inAqXRQw!rlj#Way z63UvSAdf#)Os9;mT_hc#dGr@3(_!D0-06hhwwCk4OMkxU!nBBE6n0)-3+bzML;D92 z4#|!kLM2wB$$Lp;<26yBv$?D+jt<565fQ6Tohx(r>a8i>d+V==-RG4fpx9u>sI|y& zq>tN%q4{0P#nv!tn%lts~@`e;^vhQ{s7`<25mZ` z{#z+mZh=dWLMW*x9&7GIyZ~FiUU^&@G*m>{6;ewV$?oKXa~+gj?FzgtZV~GLaC=ri zVd%GRI3dL6C+}BUiYm&3I7I?|+x4(p*mr|G(Mc6(S?y1yW#H}D3HZ{it-cW!;Kq_b zADl=W{dx=8ub!x-1$qAvH4`M{Dtqec_osfnosnSnISNrmhBd|b}*`JgE#)&zWn)W-yU_aJ*% zp)a+c4&rHf@;w4GG{$*uCI)r2#Asx>sq3}i-xE>P|4$%SCkMq3Zw>ftQD(u>v3MSJ z#ra6y;yRNc+@&G1dU#CR#=?~?@MFKTvsf8d_KR>ZmUKP}1Q0`N9aQ>6z}@?S$>Crx zUdZetyfhZr&9M85ZBl?5j8md1;bVd$uf;WuPM-A%dW=ri>&FswtsIG==YAOlplxc$ z;kSrg&@cPw8t#>0L!m5h(BCjn53WD|_n$5+qDn2+tX=H=AU9u#EqOy$&jK0n2~lqU zt~We(9)BOIXh2((iIKks3V%jIQ*}1hCCk|RNJvJB3!k@h`t*|qv8J~3+9;a=Eux<>b zb!arr;0$gwZ;>c_el<)-O~1BZ0c&4?K{s{owogLZ;SXY2=E+cg1=UIKV#md~%2Ent zus;s)3MxWVWOU$KXsOkr!PLoQW4Dz##m2t+p&RsOH&oBep~4MO8ESsZeEi~%(kNia z?@|6rdEgOyy(*+Zwaeb#ME}SQHwFrV+ufELwqRb%{Yi3=Q?YGx{Lf@hp zth=HQKQ0iu5Z@#!K+Yqbmv^x&za1h5nqc9hh+v>FMenlBF1FtXNX??aId_hr1V_OT zXE%rlkB6S2d7%Wv;xO(mu25CXQL8?kEusTSDAK5e<2g92;Ii5|1%`JZ;u!7kNrma_ zU%hfYXV=v)2;G{WAuT%QKuVIg22m>RCN*d|`7OQ2L?SQL{Q&T|kI5cc>()Yr>Xq+>wqT}pb`#0nJ4~|cMo5i1 z>;7v#b@e^m0fa+PIoWDCrBNuJ%?=5(Q|ln_lV38E|IgK9Qn1;cHSS!LA7pD5}>=PA-j`=p&Z%vZ=8`|Q zQ$e&}Td`WCg5T!Cezc&VqDlghWTmedM3$+Q7zfd-^eWnuC(UH1NZo z|D>G*Qp;vRbuUVzbhyx1C(<{QtKl7~`SrHb@a-oIhnW$d>4zUq+wHs$I8!LP19A&) z?;MDKB;x^3T6?3K2US(ouPaT;PTK66xt&Pr#e1V3#Tz#}Ss!eCH)rgU{ag>xcDsA9 zJm2|A*Z!*o&^J&&&0Ih|XNw(cTwU7nmC5Sh;7w!?LD147QIv0)6|~&kX7gFuuptet z$#;QKz7>bU`*8Yt>FqKb@M#z9ZCbonVw>~!q^n8E^(eqs5W>s=TYm9=ulM`+dif52)pVw1AXLD;_w{jB z3azk_lA5)w?W#c)2WSzLmDVK}s3m$oqwf6DZ?1(aP6_l3YL!!evaSalSxjfpbJeV@ zIMjPLjNPsW_QpM;N3L+7vQrc5s&$K+$bENmsaoF#Tx+KmO+`j<38Tq91i#G zbo+u-Ur`Nn{ok7rJD7Cx0&W6FY0wRLOCH{3WrJ%snQZiJkaeePl-R2y2Vu(RP%{M$x`Nh)Z$cTMh{@wD ztvjHVT7T`D*`^l{qH&lxHmLHjK(`q}@Vxwi0;Bo8wzCcFb4VX5LTU%fy%c`!ED>cQv(k}F8i~C0}*HL zuYlAq?FpCsrqNyn@%BGA-VMGpF;+#J1=`<4P7qhLDG1WZ4)e4a>U(WvUjd6dH5V82 z_5(+)qYtjF24T^y_%( z9=DY*gS_PLM!)vHn>*k2_Wt&85n4Z`W#1sSmE+b`cx(P=HzO-lpd%7p%|2`PiS>LR ztE+ePR^|^MbJezegWSR*U;!7r=*Z%tv(ho-83B_E+f=cit=)3sHiYRo%=FaGJ_zfr z_iOQdM-O9u=U2#&wq95vbRF;vhz`s#V)3*)cD^8V`k{|0IA^HW1cY&pjfY-(W*p@d zW;IieH^++MI76TNqIhhqPRr+Wt2rDMtj@P^ z`8_#)FMBZ)T54xIVb886>8^cVYC<%0-bq(4yh|uj2=h6)-52@(Kt0s>lplJlYhUJU zQ6mxTyY6G2`pu)lXr^9qp4DmR zK1zCmuD>NpN$tt4(E7Yl*jhYy&TuC0A>j9=I+H^EExMq2gtvH8t7OySH;DiT{c=Cv zDnTIxgb|a`ytr-_Da6`3na`Jdh`LQ2YdfrEBZV?PfFExkL=d38bke$)t10d9;rdxt zllk~4+(rd1qmZ22oO(9KM}ln^n~CC?4b(2 zY6^K^1J)3~!2qDJV%V%^-Z+%3el`#U z^i^}A;yi!h&siYEOyg$EkXtmEW4A>T<0RS(bvRjvcpkXUF;6#`)@rLzrxcbgu5Mob60y z>^x|$wwVI`Tl#B9|NnMXxeLy+=Yd*q+{jn(ZryLAO325mKgY8V}-AXj;T-ly*Ai zB3ZHn{XfcKn0HT;NYo95Kiqg!70*~IN`Tfa*-0*FK+I`V9oLi;d z#l>&aHQ<}UdyT6PeA`t}A3ToOuJ$1z{;aR9P;xHJ3f`ZGH|>6E9I7ASfCQPSK(ZyA zC<_D#epxe=eJ)2`OPIm^O{n_J!@W1Ilir*AYZs04OO_0yloZY67 zHrtXEr|osr+h{xy$$*EWGvC?E-v>7{J-tOMEvlQ-o=>tpNKVgCBorK7JQNB{A}`Yz z@?S$^bcB&Dv+;}TX}8i;m3wJkuVFPE5ip5|H~x%^Ui}F{CV#ru{E^+w58%_PpUeaA zO{&f`G?~U-IVLquW~)XBv4BjE3KVq~{ysu6Qr9Q>v*F z#qp|$7aF@h9j|kWm5axA1_jVvg%eg-?H+ak)fm9IQ66Xm34Huu9+gC%s{7%g4PmPI zNXKX}YO7bYlrx88eHk6jT8&Bfhtx7o+v|vCMb$&vaCKiX?ByP|eHY_?v& z>NH%E!$KRXJPpoepYnoRPDLhz#}ZUXSPzc*ikt%vTnSxd^#3FQ9{76ou>^1(m|4C9 zL!7$B5E=UIr#NK<8jq6KmX#yH71$9Zuzxo-n+>U*L^*XGrtD?Uh1 zC#8lJ119%BN+%Ad;OWE*`1KH=y$V;cDOR zmj_E$={JUmSBWxF$dMvoQ7F>m&Zz^1V48%S{6)cbnjrJ_4* z3$yWQhn(1o<>3!drom@rb{Z+-6KttIfw69V?;HdYxIThjyn<$Gx5cjf@@5e-HP8k` zv&w-ucO2hAD!ii?iwCjpJwfU>dBl4APFuldFO zC4FJ0uLqLoVG^r~*noCBjLIYM1@z}OewBa474KGTRd-+)IQFMFlzg+E* z%8@RY5I3&tb9_$Hb7q18Mp#;Px7J>WV8i2hb-sA<76+H1`9NK zIT)|_q)=TnIkKAh0C~pL?I*d{D zP06H;P1?pvGp{*5Y^Qt_@L}%`o{lP6f=O4j!=Fa`tXN}&5OHb+f-RQM)W5d1bGu4D zisdI_vulX=NrqZFFWD~_vjwV={So36Kbzc@F$r7zCd$@NgONb9!+Lo3%sfI=f~0UK zrOIrtpoxsYMxVN`8w}qN@c<43ENYbw?`tpYeZgH@ziqHa4S4sT&Tt9X78=uQ8ck{O z0`86gANPMYQ`Eek8*A;7Eo4kO1<~Th*z=xs@i$eD|;wLKO&!<4*P-F|pfuXbCRhXbBuW*lq z`H8enH9^X0_Zbo#m027!=}}!DMQB&b9|-tOfz4>0wUi3AY!gSCHZ>MF(K8MnD4q2O zKJFOipMDkmCp`VPO@@SJ7AZzI`+WPYTNmnx#+e%qfA~9@f;L~acXN}FN)WnlNo5u5 zl1q*AaSLjM_uVT>fRQ2Hwm~SdI=9v%q=IyG){mzI>5?kKJE*T1wQ+H9B~RYLYv~h! zb5~{3UkV5pNQ{L@%B(`PDg0jzfCk4FA@lhfP@L30`T;K$K`Y#5eXgUv7yWLYp22@eU^=f3F1)La<Y+7rG)!VuL#Lc&v1?a4fxe&L1U zSM(mnfl!^-1X|%APeDQq-KKGHg|t_uyFP4(kv#`JDs+tED`z|Ruzpv^`=PW(B0L>% zuZPSlx6wwXC54KL_sdS-ErW2^Ydhoe076a8avC*}A*<&^=`?`_H~Gg13~F|jJ~ z+ZcPbjd5?}JqwY;KE=1sF6a7dx<}r3uFZ-(?Erj`*=VlwQP|FTh@rCJ+?iVJ>=O83 zwh*1Rq#}deZuxX_oOt0}&WA}#$B=3_vA9HXdt7V!klR;$dAnsuyY6CC3GjdEk5wmv zPP;FhgE;4hW?*N~k3c0EQBWn4LoQGbh(O&1CQvNW_33cqXlUjN=SAs% zB0nNhG%$2Gyvyy7yAA)a)g|pTJU0Vk5^Q^j?T_BEgY0;l#gSguoz0j&+}}bQ6wI`2 zcJrwFo8uVkhfl(PB`**~vYU@%Fp!7y$u`#g?ymTVgy;BOjqu?6Emz%;#ObF7%20b~ zD=`nL`jc<(Xp1WU%()6P1@|A0?N?A=o3=QY2QBDS3V*T~MSy!%$tit`x$Hr0HCSy{ z^IVJKR87d>iOo2vzcpu;azq-*{9V~0Qi!IB_X&ci!Y$ViDr`&h(}F_Rh(AM03noI^ zU`ud&6yc4B?xrK8(WW0F{7NzeV?jwGOh06Jr)W-z&UgP4^V|8iZ`0v}XWz;E+^%BI zlJ5&Fj&c18xwH{NN_FuU7w4&1F)c!FD9$H;mFn>8kI_bn@6Y+RS}(s*PazOWYD)ar ztmxg5vE5p*gt;9Zri6C6-?rB(HJh5?DEAbC0Jz-n8NW?r)-q5laj?!C_49WBuJR~w zn>WqHeb#K5p(Xdk;Nwh0+0m#@Zea|9{GSj9RYV2j9hF@=>Ml0sbG4{}MY{7Mc8MVm zHF|BtH3<~M8pyg;&!r8by0A+0h*7#VRkb*76XFdmcVt%}>sZ2`vM408gnw)S&(xp` zr8CsH*(Ov>L~m#~SV~(!BE(^m-m6ZIQ>Bim0ZZYp&=b)hWEBQjHpC!S>^(p*lX9smo11m#TRf8UpnIz_V^n&0O&`0 z+kH>D#*K?LSY2)0MYwjlC9ziT5fYX;zEy?f$qgcUsKE=w{E@TOvo>@S7Rtt7Z&!FB z((2^$lqB_ICElcSoW;zdWO(b2>@GK+9}`(K2mcnQKdQ+JoTM#a!(V<&gDrDFM+hJcg!0LFJB3B>wO1%2G z6_`1{PBB&vBXOFf+a?HU#oft~bbES0{k%QG_sd)%VU|%32Z&}oVznKdRYFp{I73yV z@ck16Zz$q*>s~1HOW{&RfZO7Olm)hKxwGXTR&I9t!CG#ihM7ls9Q9%REAq3KEf3pd zD>^{^M^0Y4bh^sdsawHo@qj1MjTL|$CevH(`qCcmqlA^!2Zc>*Q-kj@fMkCS!zldQ zmNuFPNx4^dHca)ai9sR-@~;iCsQ_!=(T^igujK zL@^tEHTYXgm0&MIt`jucSEl{Xr!_!_svUL6!a^sQcj-Qnp3du3!XC=_Z1yxuc*P;fEjq_(lw}uITi4@p!y9Y-=h?-Mt zC}uX8y9Co0y}cZnZH3@N1h-A8#dT4mb0!8qC5w2SGAn0D;A zop;?N6>f2mv7zjS526v$)+*zkYy0#1c40AyTx65>YsVndRmrQYaUp?DpJSUay!5$! zsF>Fn5?tLr8*Oc#-|xAfYe<2nBj&z7B{5-~i1ugML*Q3CITZ_ZE!YmD+`3V+x3*ca zkKlat3jyk@;%f(F+a};;WKw&r^nj-A9<=#=Xc&={~?c(*LxwNsS7}t8RTDopj0qLaX?IP zMo$tNtk2?_v@i%x#^Y;?M6_&mJz;*hHOD8;?(Uso zyx_}=guWQ#+1L^Ap^JqjfdrDV10|@VH>u7bG4_5eo@{kjYkkCa~2BgG1Zv z0a;cmo7YpX->QE>O6SchdrxSTAv!<$0&)Y`63WA?-$M#A>K7Bx2LE*D`K> zO}RM}wHH6Q1Ab_;6#|*;e17w%H~cDRXrVgvjq%R{+(^}g$P*sSK>A7Y&)pS} zDvdtOSIcxYt^8JHCdPgel1OPo>YJ~o?=`gwh|njKK42`7MMx_J(emRAa&U%%9%3vJ z@TlrVWs$Z9%kk|imUH9O+>$VNzpQxfZJcB159Nb8jBP$seHB}+-{X9FtYpc${zE&MJn#MV_fD#ng zP66FI@xUOIt2CfiIx?2D!f-iJxY21NZM2ANTx1*kp&RHjw^%ms)e+BxEu12F@6x7K z@4%Y~YRe|tDcjSX2omQfO$!|SC-uy3Lfm=t(3m==otot>C)-aBgLQ(IinMC^7a=kC z_s8JO%m^?a>*hcjlf6_0mZ{uyKG@81oULkI)_dF(<3~$0I0l$hiGgFO(OJK^keQ>(Glzd%u_}z~ZM}97 zWkbaip5ye-VI^FN(Gz(Y`aX9kPd!X4h9q6*3~i zyrcldNlW5<@ju3MRQ704+s$DL9z%v2KhrF~qK*&?#>oH9f=Tq`-?cXf%T10M~ zm#LOX*^!8BpUd#Vp-2u-kFOH1q7sxy4mX0q^}@VQXZJW>WdY8*IRCP+cF-cqL(ToOFSG(2Ty*T zMQah~KF;8honV)6lh4I*t>sol1Ce`3f)~rnd6g9rHl>D0V9j27?DeRA3q<*QKp?Qh z^XM;@1ncGq(rVw5@Mm+%*Y^fO9lb& z^P(;!elU}Brt*NNq-Uw&r_=oYCM6|Fu&+yssqEcS z4)ycJr+A~Qq}it# zy2+^HJFH~4(1M}oX$FN+Bho{>_>KfHAPM-`U2CZ`yu>Ptyqs%uR@!CK;!cIHBbtcv z?fl)SC@)k!A{hLYM%PrCg9IYhYkrFJCIgG%TmHNc`oZci7qNcUtE`Jq$Ge=uue0-5 zYoexA2)fH=tNv>FTpcgd;4QAJ&Q)XMU?nr}=Ia{!gKUS4toZ!UW!FN6O_Fp0;3;J3 zUAOYB&ei&AZw}_vhc3`m+~EjBQWk7kTt~OXv3t@*qI`-;{xV*-K0rSn3>U>EVN$lH z7m8vwqCottHljA^IyC3=qDH@-*)X~r^hNr}T%S?;L*NzWL_5B2z8+^Tl4DoWj0sbQ zhZAm(_RpjD%qYKJ5+(Y3g4z$TFepQT*W;oJO`{FJF3@6e{X&A$3*&-f;4PY|Eis<& z3|*4{h)Ju~!XBZc{B0PIW?xDkz@}^=o`YDq3M%Ji&W!LpqWZ$!&(DOxs~_O{gJTk1 za=Q3>AZPb)8Q|~Rc84*=rD^4|{h;ac>J81iwY*XG8GcoBN+H=py_<1b4_i z7`0QKOwi$QkX0PIP*r}J$wf?0hsw{Qy4_R{ULDA=z{j4*M&h3d8!fv_B8VAtv@|A;a&8lq>KYrO|kMRRY`(m4L&!H{7T&6k6EuGxK=H5Xz?PJj!!N zc*3)sJ0%Pf>pxos*&oQqs+7Opi8gWDU1Hj+S&>9OwihyW=bMgIfVq*nI>Q_5E;?&(`kG}vdF$QmLf((OS-!hUgl5bN@6^EF^N z#>;I!_B^?w1PYh=!j3aToJymp|0l#beS}nm00?z*TlUE!H_O!(Mz=|wORJCBfPwuZ zZyUfq4i(kRTr306-q}UnA}|o&#y0W2ZI)04U%4X$fK$eM*6WFIEJ9}NM<=B%uCrEB zHCiiH#+Uu)FhLv<6e6}bq)ht7A&2Gp8};|lH=-A>SrCWsD-Y&W`XG7Wuy@znskLxs z#Y#>aGOCb2#u+}>#$PWlt={I5szcJTO2}a`ifw15h6WOg?iSDW5bs8JvBY5E+5MlZ z2v0wk2=>0BAV0eu{}BI~A41Gvxi)d`O0o99S9oR=F^U8S*QXITv?lBr&!-CJ*-b&M z>0yvYg&U^q73KlDNLVCbssGQdQN{?6btQ!h6c*|F*7^LhauwXpv8##Rt(rd9Erxo- zrx2tU%3JC4fEdkFEv0ojuz)lWzMm(1mSA*yQV&u+$t#JaK1-}wnccQB{0$E5nKG7V zTf$}S*dcE@V7ng~NG4+sLTmGC^#JYUFe&*>gEJY`1l7R$`>MJ{8%w+*BAMC~=6Y3h z+dRc3anV~1w#z@ta^z!)*qF%{&W&ND=3PgrSDFnAg?_r_IY$A#hQ)RjnSOFq(I=p( z7ky)8zV>>nn);=$`P;DujYb&mxw47LlR>a*P?fUNp@~5^m@a}2KHB5WN(qZ+C0TFt zD&*HBM57{ohQqHRzn1^9f@JK=k8lOt|1WD%Y=B*(1mxHZFfS}#k80w%itG(} zirHyJ$39+*iGJkLiv4(bM);cOoUB#r#)aj0U1zN$$RhL=l5cWDEcWRm&9&F29s4T& zC?-w{jBm0wQ^1tl=K0OLz0JG3-}w?6hN{YjPK#o#2fLGVG!XPSBla#W06H>5pv3)F z3fe!J8QgTcyRW9>*EYL^+JmfJ*}h9}sDk3Q8#ky~nvyU%v4%<~e@O#Xkr)MG^Y@gX z(HEd64WknLV0bt5OXNmC0FR5+m<9O^Xc{^kNHp~Mk7y_*CPK7hb`^xg9PtTiL-zM? zGF>3oV3FbF9*OCC^XHbS$G-n`Nhgy@?0)pPyEwlx~Q8Hq3SSKj`EKI4RyS zl|yqQV*hxZOhEURY!bT&AAa|ZED~Lw#hpb^k$5~fc#lZ7+;$i>_A*4dC&w%0Y!l!4 z4ss)ja)#!5oAp*olugvSQx@B_j3ElLDIpU}S1ri7Qgduqtjx3EVm*_Q&nqRB52zYw zqnOMEFe}7Y$B9~V98RB249br+ik_m~liU%;1-GWJ7~Z(#w2ZC55<&UmL0-(29YPw0VGCbpQMy26l=VS~(0hxohkYtRqc;#93SpKz zK=HQiQuB*aid<<4y&cXU9thjCGZ5V(61`B->>93KfrnZDGjZuxF1rZNTStiPrancz zS`9=6(Zo&z8&g@+f(&1nDZ4f-IZy-OWnKHEngs`k;2zLY)}&jK$^+7Efep%(7Dq}#>%|#vvQY#+-CZUgagvTPKOY_PD&q% z!=EpmiWz4*XZ97iYunO?gO{)Es#< z$1Oe_=#Y!!+d=Cc{@fB^w$UEm0B7+@IG_O5^ID4b|cNzBK!a&zH<;LKo z`h@`igSuVH*_3&Xd{`_&fnttl%u(d_Jm}$dZa5HaH;FztR_5~X`5#Cgoo;ZqC+&t`Ad7&cB$Frh;qa|`9}#=DYKnRoQE)_LPjd<@x?15(YQvot8kMnW zQ{M-9!uq2b0O@R%a-;De&=sOoSV za_R=zj#^e)pXoernFRgDz$S_Uay)B$X8+P}2<$XhGfW6&Gb8^0(DfBiRc&3{a5%uB zQ)#3_q`N^%kx~Icxx6$)codTaX8uPcpHDf(KjG1GC0{zCM09B*;jq36L3sVFjw(8i{xx(e9Mr0&N~Yk zMD?1YwB3ks&h}1b=@;hRwc(UJ>7t-`d2wRDb-nrxWtX_iEapK4X zzt&Yr-6iNRKYk-KPKSYTTOerfiw>{KSS#**h&tF3fwQXi_Yl-ou>yL>jLKH2D=q6v zFXaL^bl4q#+Wh?sgSSB}Q9m=@95eXo@i)@z8ND;8p0>(AA_U5Z33e$Y{gQLc&Sz?` zwCGZ>iL|~k>M?Y>bsxj%ZSY{+MfJbFH6b(m^^HD$*YU-eJZ~E*5gDytJl=Oz$kZB5R zX{7EyoSnB=Ca$S<^0Arqc&HQd1O~_H=2gqqrc!GBJU)_LzJQO2pBEv4>{6$)Ti6I zJZF02wif%tF?~z_YL6bx_3xufs`0(*1}U#}qc5EI60MbJ z0zJ1yXzxgj?A~qqseF2D_9=Xt639aW;aiOFCNac~xRUy0)T<4yW$yZFgad_qgswp+ zLzpOoJe=#RQg(Z_MM*_dq4qIyl|H*j654N-jGWWX(ccxhA>a(3d+fuio_PA9Rs8}U zF&YT`$uR(#KaigD{&EG9JZ0+0}2aa(1#|YJz zljV%Yo_>B@_Gt~-^qdu2%@KI3>%6IZCfLP>uc5okCTcb@jFop*{6COj@97piBF5~j z2YDZ?(gzs+*Tem}hUL)$Ej?!$#jK3<*$0nok*8^6#5@Ch(4Gj#pVk_!YSm`(>*IhQ zqr0Y)dKB|oRx@_Tm^3P`m2)Jk?jqBoF36Rj0D*xeIf)3Kc$av1eY2#em5i}vf`cp} z?zt1UYmr~BD3o4-5APK7GUw7*|7&IbF*_bUfe4@d)8WH*065% zB^y~{PGR%ff{rV#9?}iQl)Bi8Y1~Eb6uJpZT=4%3|L6U2~B|2rZvO&%>y`GYwg;$KD4U`($&{T4k;TSt>#$6X+m!R zwQ$cKw|9=;IK1V*WYbFAJ_&u&A6ttQ%^F&)l27%$?5**#bZ64$mK5i@bs^c~(?bV^ zo84`PRURIsqIXWn&$HrNTABsX5+ZB?5-9AxuTAy(!XM-M!6^A_FaEg}Yr`faI_T>j z9}U=^;ohomj{B>C*JXgrSVrmx~wwz|6=QBJyP?OzxeSr%Q?+7y{M zVHoAFUf_qSvWqS|Z2-B!=N~2L z%UcK$AT3vvbJ4^@Rf+$-JpS{WSas0y{CpySaZ1O=78ncF(8%_Zvq=UIUD3lP^_B{S3cY<89g zw1=FUiZO62cYc$)ikg3RQ*Ct z=PC)ojG6wJ#0tTv}Vo@4(Qe1sUADun;tqoG54P4o2^bI=Z>ZBPwgsjyB}w`fSnS z*X3lPuf}+}FjkL0o>4TN7?G^WQKwm>e4ghq1uM~)YDHJIh?$;w#(oK=MP+ke@NV47 zN)fmZ*oDwx*Hu0Jfw}u%YxAG0wdp%{%IIlPgFv&i%oh!AmcvOlI}*wKg`JJ=N27~L zem+w}y9mykn}J7KkiHrT34yJRgJ{R`io)z^McUSYP}9-cp1N#g zqS&8F#vUrK{zUvFd^W@P7OhJBqa|oXOt-g$FZfnDBS!85Wp4cOQA1V+#^y2-_s zxkF_o6%3XRbt3@}8`-D5kK`M&hnt_7RfwGI0V%!J;&eYWMJ(wss8EKBCBzeFZINGOGg`v3HQSWdq^ndarT}(9r?nc-QIKjAg#>ke}`!9cy%ey8^!@u*||4W==DtKXk zWoydrfD!SmfE0;3*R+iokvJ|jj@+_?gSce5kbN8EWz_84m&=jUzqsCO*5|nffvlpV zhWvqK(9!WPLC2IaGUc4!bcfjn5Sz16PkBLJ(;3rihTx!e3kx~k$;qDH0Q)~!t$NcEqm^S@l{jsbKz+m-|z;=laypVXGFF+d&-{IW@U zThK|6rlT8ptVEfp`i}NV?>p_?V2@8`zYVwj1a4kdP~W&03jF*rL#Y*vzr3fEob91v z-Lrh`PlqX4+seMdWIXLNm|Z9*ob(E-hCVr1v>82>TBkbqDq^kxAUGU11r{V92#)AV zCD)nzd`k1i^ce!^XWBN_^I5cRG;W?#_E6LQe1IQC>MA?jsnKxd50wgXWtiQW&rd+r zY*cTJCAbDmT4dI@`>_NE^{AP$I0ob&EVcp5H*kMJ%O#rcjFXEHeROv%Q=>WGb^fi5zx#3wxr z`oGX_wJ$2RA_+cN>_0mQE4zEl3{Vx}tI4k)#y+&3jQU~{fCj-D3uv?zlKXI@{(FP% zq2dD|+$JfYJ_-T0rW;CZf7cV^(wL3s{wZZnMKpJ$5{5Uo&YIOc)p|d{D2yc( z!kPt$+JPS`z5H736n-kAECRe)?2EVD_DT2v50C-y0O4h}SG~_W{75WNN9zaJSy!P( zdv69@tJ`O8>rp?{=D1JC+RsX4mIle=7=)Mv^xG_||Mq^qmtE?vW4VVV^s11$qpbB{ zmXj6L<7~3wowi=v+Qj^m6*rlvt(#_m7Wj>FKJv`r{!PWoy^4f*vm^CLj_f>&p5GWh z)Bkl)+iJ1^e86X>dB*T@wIDu_)o>6OV2Ldl`H+&pU+Phx$=t@37Agh_nbiir{Wtgn zL!}a{Jv-ss>Z?L)@KN@A`bPtE%8A7;o_;8;w%!vNPU}8+S;fOWCSe}~=iF>i&^i)d z!Mb*(C+>*a9M-=QOxwf4D@O)(eI?ysaBnwe^Pr7p-kW^*dB_{u_cqB}61_JMe{n)H zdJ{Cm=7>7#>%e+%l&AI#iLwp@H+Q&QAXT6B}DKVwcY z+>dEByCI;1Nn*3D1AJ$&cWX^lR2S>C-P6;* z?84p+pIZGWe|>am7_K|Yhe-x87dg>m#zo4f)UA5^}&6Gmw$Pfbp0ToIY%(#bM--bRSc@O1gQdl zEv$cPNg&qY@+y(;ZWQ1J$Cp6y9#qsYfYFy7b;c(9gRL1*fY=vVADrj^uIW+$8pT#j zBS*+e@|fURFj(rE(T>$n>pwmT=X*Wr+(9mUUlrkh{p7p`W$C4;K!-k?d5Mk!5uBYt zBz#dYyoXz1)h_@KVKv`orO5E9_=Uwa3gDM6Hd?~}H&;YTH%hD@Jh@5J6uWLa;fp{W zY#_8=Wsdtl#tH^NOv@o?SG=Q@fU|sp&!zXpf%{Oh;tcunUr#hX6uBKjz;QiG+2S3G zO>P(c^G_M+CQT}!)wd?GyZgd4G28YbOUjKJuX~=kOi1 zwbWZE7@+ih{|S$c@GZERaBY6`OHD>H$7V3XC1^!$F+wTb6y!37AB_1Z-ybfCyG%&c zrSI1vT*^fpm6BOMF$X$f%kO#fWrfel{rJe`4+Ui@lw9xm(mM7yoZxK@#ICB8x#*3g za6iD!?=9902AZh$4#gfb41b(KmanU&g7?2OepOpPbm{vj{mI8328hC5@9Q9t zPyo1UTtGJbuMZ@Y-4ItACX~e}eUXWORyGfD`blEPvcPZ1TKL=WUplivC@ovfzgw3LkqMa_BMK51xC4^@Y7?}dz(^_gIgcCoHZt`&P4s@k0bT+S zX(B(DJXP8Zocx--LlVH&1mAFilboXuz`G@&M;|xzpDwu zUR8AEpqEbo z>X3S;+VLV^0D~{0yto;x{q*rX*#(2sN$fnH0R?3L#ap)+aE} z4Vhs$z|Dwv8}}b5T8&2J!=CL4Api!?}=vHCi2revJPn zbp*HxEC8n(HTChmn*jCgS6y-v9!d3Q?PLQ}Zxj{%@nXcxOqE|mpw9KQSgu{SE29qyiR<3!QK;F)+`{yT9}s$! z7@(YF2|p2yhy(tp@2d=*#f~9B+IiD9?q}@)kD$HZ)p|}9Vp|kSiQtbY7hil*ce%j` zUg66`&xOnn;eJ|TcZvhDC$+Jt+D|BHZeoPs-+!k{QL&B6q6^=dAu@RtEh$uQ`iD55 z{F^ve5nf?1SZZf5|6u2qQtz&A^Gwg;Xq7?Gj;6P!cK8=tg1*_| z-5hj_h=}Pr(vX<=F^lrVD^}X;FBAlTUo#B_e6v?*nwdIt3?an0OpX8p9=I%elnOu{ z$vzh&kfdb1e*V%>90K_z4w}KTMn2QJd0jyr%*S8d{-i%%)4>J{e2CjrM#oBZi;Uzs z{4wPt6-8KNlbo=5E6g;8i^?PMD?80naSaDzOc1|J)-msR!514}F|tUSHB_(0XkubX zr+E*I?O$=|Kx|=#4vwNWwDy-(4P(3MI7|l@44ZYgpP7N*U;U;YXNV1yD@C$@2h(}z z$25Wje_l`+Y>PgN7Dy=;Nq0O5BrBvcrBMJHY!dSMU8Y7XI>4CmFp<`)oYx@9S*9un)A}iJ(yd zy8#kFyvC3hj2B7q8|VTcP?@B+&(TYiT>1QQB;%K#0<9nS$KU6;sv!c3t4sKWOW*RU zZ!HCOG_X1KUaDAEYt_~v!Ar4EG zl!Sbo=U&?0-97!%!6+iuIwLGr3;r&-rcv2CnIs%(;hii#K z&CUi0smfsi5-pXqmJlt0+5Qj)cjnVjfmHj61=bXVhb?V%tn^-X`*F z&xA~TCKoUv__8T8{lq}FQ|?apS}&F+@*@GE;irv&hwm}-Vc}qAz}eeud`><##79{a zP5mN*eYhyIgXGm`siW5yZ=PlzUE&4lqlMK0&PFnUudDetoJn~Zkx}p8*Ga>R5=cr& z)Z`YaJY<4y0!OWwV=zHEkk66B4k(2-M2Q8xr2CTPS+ANpi@ArRUgw`2e9s*XmufWj z1tfPna*%Ma=A((t5G_`n%)R`6;u1ffdh?M?N@knjBWJ)VRH%JYvpnb$Jx|jx-&(Kn z4M-FWaw7c^nJmP?HQ+yV{$m&-?s_)+uHTutm1qa^K7A?&1El^|*YX&-{vN-;@~9|i zT0`>Q5O({BA>u7)>>S)N>bmcLM}hOTz-I8de~N+q|2)OLs*T9Mb_3YN`KhA1e0100tk?@mkIv$O;Yhuj zaZlYMKcIFK43!QJPX`f8p;{Raz&B&rnwDZLz>F$^4liQGjswLniwGB@2PM)&it=a~6S zOnhy0xx+Ew*n;F>BD3Ic@=45S7IubEU`VZNv17Fj=DQ)&TUa z?K*Pz1vf;ggAzKmpAV`-4b(^fsx1%Yq15BwL;v$4r3NJ^WE05n;hHX`-Cgg@l4loL zWGQ2M$0d!o132_G$~y%uW$=y=5GDgW{jsM%(V%QMBa^rc=tczxZwN6I1F>ni^WkCK z(n_|0wl>G6#(ArSS|`R=^oqv!Wes zx}s7VTnjR0hL08W(2V!z;YwfB&F*uV&@A~DZ{g(MI2N;;T<<6LG}iDxpAThT*FOG! z$OnYZC}YC6;^!okc+>s+!ccMos`hSU4-K&n`e%N8xg<8JA%*Z5{&4he=oMZT^a&H| z{K0={Ly5lT#xw11&4VX2F*Dd?A_*MM%;2c5C`BX~fp+XZA->&c^F71dO$i)3QJjze z&zIK4#C}18e!7{DBJFu=rs8Q|w*Hw@48m{!X0!92_z!Md*z>(3|J1MqJHb}|JQbZgqPCg z#Q`439tog3fz8O`rDbxeP|N=1(R`=RJx*8((NXxg!k_MkEi$?Wg{h&NUWl-7G)uvL}=iN9}r`{xJtl#&7xH0U4EEF3n_ zA$tn_>OcSPaoncBre&N}eemo%t@jcd4>$jhz{C_&&~eAgP$tKn^DPS z-^GT!Qxpk~XJ8_5cdl<$M42UqPFNT#)mgTyy@$dxhOXP^0>#l0Q29*1B33VS3 z^BLQ&P&Kr(gR4<5FdcJXo7l{-ko6HbN#O-ycDGJuA8@nLK_jab=2ObUNK{hEh6j>EBrbGraT#6$zXVU)%VS4Y+URbHdcrwchbzf5Q~Kh1 z?c0;DUW86m?t5n=PuQ~BEq?z@uxs&Vqmv1;FL5g|0ZRLgItpe+>OFmLbm}R3R5ep< z>k&BGZDg_Ibn$L{SC_t&zuAN!H2+n|&fwjf@nF@)H$J*S-y5~u-0#aGHC}Zhz<+@w zLa?f1zxML|gl2RapvV=E=jM;Of|Vr|>8HB6mwbKqK}(~v8K+GFb_pzifTx^Cj>lj3|1T>)5ZC2E3e@rZcak9xNRF&wS z6Z*40o+gVqRc0JIe)6MWqXsK9*fFCFjafxHqr{zgdTHzJCebR4x`wh0J#hO~qHN zot&B6rBoP3X&~p5JrG!-O{=}^_9t6$oX!2=8>aOx?A5IKCgCrf4lM)@0jmpmo6k*h z*G{Rk@~Tv4UwnNh=asB{;hj*`GVW2u1G@ej^D}e8BCX9y`bozc+r6&ZznBFoPE=uN zvw0rvJjoE;yL3Oc4h(;7O2*ARGXj;o`*L93t+RLT@NrLsL5H~ECoyK@*9-eyPty_| zCU>0W)_4u7#`??02j%k~%P?;#@5;>Kj|qaWui@-A#JzMwf;U!WwWr7%&E5uemD=kE zt6pl?WL(z#rl>B@Z))HeuYapc=TLkW>h5Pf8^3e(3GFFR!Qs1?HgwW`E%R$s5%UZ( z>;sj}zHVR#W^8#^@ zII9yyAUAFTCrF1=1iDPSvwvT1sB$f4Zzc?hl}N|^s8)8XP=k__WVs0DT`)xacUS>~%m_B?+-uG7n{=KNK& zak$yLbs1N8D~+_Kgl1{jHdEh@fN7RPac&^sl7XqyhzY{ad5N}I{h-{OWM{KZGF5PE z(+5SCR!Y8|mi%cZ=im&}N7{G3RsLOg5}QS|iU}XFrr_vE#aRd?kjq4uUTL90{m}p% zC`h*%C<1mXIF*I8jj7^n7!>$#VMF`H?S6hf2*nyF>+8np41NYHHXij&41Q=k-4wIy z2E6-IMvFj-5s# zx?_IWE{L27OAxFk3MN$78aN*7%%0h4&AWRo4w}y`A+@u%&q?_-O{ZRJ$?%5-8`1uxS`=747M@B#i5n z1crNF840wxdu@W$n5I&9fm@B5)^$(GEu(%G5!V>oz5cv$kH|8`rbqc9tferPpsvXI4#^&>vR ztZcs!C;2&gS@%FlcI|GS$(UqAlp>zZ?5vs7@{3y&dU=|N+R6~juEjUK5mE>0a~`~1 zeaTgp!=+Z*qPsIhYb>#){k!Qh!$3G3KJ{ePEDurnr1mHGFd*`1QFu4I9@lTUp2j+M5eEM4=uy%F`^?{tj$)ajfrhCDoMmJuXrWb13$8E4*K&rwGcx}Mh&(z64A?3iWO0S&^!!ECJ zedD*xvTo=ep(D~SdeuihcDvZ(yIEw2yN&7m%`k}X!0&QHP=vr^$fGEBi}#lC$Q&r( z+3a1j8R7W*$(&139qWQfsd$q_N^TcFrHn>@e^Ld1{>ezEvCvI<2Tx4BnIN?1cbmfogL$7}FyU}Vx()yDzzUd!>WvStka(NC07g(JC1MqDu>8brvW_XvP-1!Zki3YL$r`(I55ePIhlDyF2zoe> zy^nWemzRPASt1zr3>H9HzzK~K{=$nYJjrxdrdFR*?iAPn?-_+Zy{u#IR(+p^ynE|< zakj#Dq?IV@B!6cX2+vlpN;rL0|EoU~i%4{hRc2snT-M9s2xz!|y+=&|U@6mxyb^Xn zbw%1vZl&C}P^i-zEB)st<+x+AoSD9(|M^eKt7v6L82_B^szz&pX00~~aWgWb?Zpw9$Qqr^`&pLDbzh>+_Z z3ZE(QI8Hx--BiSAmC@nKHilr@5v(m6#@rXl2TCb*Urc}Uq*k|TTsPG@Da!L(BHtM+ zr?jWjkZ^c3XT>W<^Cve1n>>N3NIL}N)fAY;SbeKy$UuQiW^fHo= z$7z%Js*nNE;?WOn1IO{Z{y7ituGa|xcY8_2FKfH?qG-dwkdNTcG?5=ssEFJr2#YcJ z5{@SM68HMf$__4W&nDJt%lc0z*C3UlhNNs=RI*g4VBF{ME2OGJyR!N6cy2X!dxB{d zWo4YTkl1;cc$@20=xQ7Mvs`9&QHR&$3w%8L>HrQ)~waYY;5DpPUuy@NnZYh)I z3eY|JzTTqhM2#moRvz3q_G_H27mJrL@`dnQwDg1@_5T>EdgkOIP-OIG(L99IK_qFJ z5W4MfR6CiGOrxb5Q|%oxTi_hl&N5}1yhZC2vFM8o^}0Ds^dD9%3c<|9(aEZ*nX`2v z>t4#scWYmFJR7%dk9o%Lv}VmRY&X0{1e3)Zu-&b&eed5C@MKMgq~%y}jYS~Wb$en; z0NGnxF+SeyPI`cnldE?^3W~BmG-B8_AOc&*Ha)hJ**4 z-t4#7Qr|vPJ=(t9RgXX-p!l=dw|>nqj-!nr@v|_G2gWmtIZ~d}T@0BugaL9D0nLc$ zHZAGUQ=&4HcSJxqyC7pt{*_Sv7Z&L0$KYTRi9wD~!o}cIxq*WYgP1tZe8a}M@x_Uq zI%zl<4Ds+{S_%rO{&Fi>doq@~t6?x+Xj8NO@&Q@1RI+@Sv-DyVk@sM= zMD2^sox2k{`Y=BnP+5ivy9N7U?wmaOvMVqknRJsGWBTpZpQrM5-rj)tMX&E|hw$lj zz+ui-UcO^vgKGdr#4<;L;f1a+XobdRROMt4g|$LX;A5yFmURFjASxllHEIWPg za(hv@gonD7@bvPwe$SFk-#3W(*=L>KYg_}@%vw{l81BNsM^|SW?Wa3>OWjQV$Hc7H zL7X4bt}sTvygEvUmvjDUutS@=_G8h{PaEc^*K(=7_WP&@3dUa7Nx^PR=0a}qhc4Gl z+TRTCRoAZXF=5_u8z5NgDPhb^LE{DjGi)O;QiPRG=jz3GCoqqPklEq{#zk2tt3CLl5F6KH)Cvh*JUdIE_l#J4 zzDyajLdguqexW6{=T?+^Ao;|NY@L-N#XfG`u)gG#n$_vJpn*Y*qy%K6W@2Tb`7`?4 z=xOuGz(=qpjNRSpXJ*6WsRH^j=W|$72o#lz3TLD3cN5;;(7L&eT05hDt)Dg8zKjPe z_Gh6Uo-0wiA{Se`B0M86wmnOParDQJh~dF+2pC4JV_v7b5#Mk-mtfoxvwco1%B`+G zt6(@8DmE#B@SUhSki#dwYWjfA!V>Ksjs+~;`>J09Z&TR%5;DPHzhKV0nVOE5F3xp^ z^LV>XebGCI#leoRf<3q~R<|$=emxpqbydZ9vPD*|eoQR=jbs}0*z$8&K&VOQfXu3x z#FIAF-qm;1WljZUcPeAF385*TA1?;@dC$p1g#E@L8Ps7pgU?Pw#vO)aE3LQPK`?Ji zQvM<6=MX@8>T_zLaG*9y8;ch~t$4?q*v|zT8fs0$#Qjl+1h*UyUI|fnax9*cBkz{P ziI^92RkE}2BX2p)EyktSG1bb^HPn-dlj9z>vT1v8*_t}sL4Mm@?9oH z6h84Yrza6d#$9{%`35(AjI@B1`A$@;gGT6S^K z-{|@w%4yyT(YelTTiCo?P(r4#gGmdL8KhV%l35Mr^Fos*A(%gC-BzKk7ia72(!v&ZPM#%C} z)fWY%UG2U0&^@5%&elK>kudzw#5`O43cEp_pP*1|D^}XkmkcF*C-DbW2nVCWS-;5F zp&_Coxp>4T=pH;Lar;EFUtd6eth_1lLfF78({tBRWgdEvIuKB4BqvY3rp;v9XEygJ zZn$*%*5T&GQTWAPZbNoWU!NtKcm%r2D0D@&BZ=L|zNiG(hA+xp2)cBho zRo|i86D7jV)ooa08CT1KSkSZg9O2pDz9pI5VA*eb<%E()Y0DtUt;Y3~yV?C@0(ZaD zJ@}28Ua+Hy+U7m6dQ z{M+}kj4#va$|T(V(1 zHGJ~tR_O&uOv{JJK4^U@9gy>F@*HGDYxWId2IbUQ5(UQ?XbaircE#nR~S!(Jw?Qh;T;* zv^7Fo-xog+mm+&^Iz;1D6u@(WdF63+S#9ZwjU6(3GJn;&i~Xd2oE)0na>d1Ud>ff0 z8^-hc{dWmC*fq-jG_6-<#vhd#ViqeQnB(8nAdvlo<{YocBs=E&^gGaIV*9OXXh$m+P2&$~Dc*6;=d-6Cf#zx4~DD|=Q_dyAPtfEs3-+-606v?_%7DqSzC9mUxmIkVVU5l8qDr-S#lNNFj3>b5`cayxGy(*)^s{X-3&N zP3wjxGFpE7=$B2H{?+9zVC?XncdX0w0}>aZ^FvA$Kxs78+VJKt{mEy1Nu<`N+tQiyrcD#SeUJIruiMiuCykVzjf+p@F%x0oN##&} zQ(O+#pL_F;Wp@Neg~#(-2gJxr5y1_X=B2w6B?27$gFIuD1IgZ)dnF%K_AY)@q;8k? zY^Jq1p8x(i>UMS)^$wH$w|~_@0Lv2nW-tb7>--AIw`})S%2T0xbUE~BV6p31V2Ope zb0i5=^6HwK!AT>nmo=i&(kMg4d;BUh+SK`rz#P}AqQxXmap(z%l^EZ(WEDcpnyJe(%SPCd)T$EPs zdYY+Ejt~lLrhNr0AFa~eMVss$jjJ?A@g9Y>7S#zpN+;2?liS|Y3eGr(bk<4L`0a63qO*%PoAu@e4g9(fHPSSapr$;EKpky~0T9 zJb!rAO?2Lw#2kQaV)W_X_yP6+iP3pb@1sK>oMp;Tz;i)COM?lvqSrdfe4aO#Tkvt^ z6T(wrDX>ZN`K+wiVBuOauc%NVQbnO=G|Qan@>~<#hUn?E2V>Lr%H^~G@8(Wqi!2tMlyRdnc69=!y9OCyLnh5_VO1Alh!OIkmQkv%(p|q zNVov(W?4Gy@jw31Ic~tk$L9Wn_wq$_yM{HQ8+Y-RXGuNyS7)pUcGhFc%} z<3n}MoAW*j6K5rDgp0xA*qR1v`Y&2<<-<$5hKz(kUEpJdvW_QvFY;4`aBYLQ%|RU3 zO2vw$C6VY`WL8e7ybG%WSRNd0sK85^!6f{thU|0ql10fx45lk@N&2U!EhW}?URIj! zWx2By)jwvNbLXTO{|Vc>9QSZgt1{}r?H3H|caPu!K{DvWLg!gZgyFe(0qiaC+I72* z`k)>%)FTX~25=>Ca*35du<;q9<0+o_(y{3WI@ekfO<9(X7kD=-n0T6e8@_s6>uJYd zH#ie-T88&hWYsI}W>|f^be#P0wq2j1gIo*YQm^DzWWhI=2wpuUi8Ijy^t;L$y#QZw z4uRJ@)2vbr3D2~~s0Wh8C>976g-6YVt}INr_`<#)$nISOy&z4mHxzf;yQ4f1KyD!= z22g=UGQO}nN){2(>R%G>Q*|8YiL?2FogmN&B!gYeufT!CyBMXKb5^Gy&UDuw5AT-&V!MK`ndY z2xBx);!U29Cp#OQ>@AJayHA&n^K%a(Sx1>zr=3)*ot<)QwH%(M_4C1HjhpiCfabqc z^Z9>k--vPZy4lp(enmD-xLIhQETPrMZ!Pq==J30Dev-rZL6DdLNFc~h!~wT2oVs4x zvVgHjK0UpK=h$jmnXVXmwWI5>PAOcZ;2pCY@kJ;Ds&EQOhISPXUh&#O{Hm7`aB5d1{mXgSRJ#%OOw8`KyjJyvx1 zsl*)FXOc2Lt{lCVyvK7K-W_3e`{re&2eueRiCCd$?e-j*$lOFx^Un!>1c#*T33Fy~ zSgYe}MsxYlxbxivS@@I$pu|bjB2*RIMB%xTcRw76N{O@8r9Qv+GZreLOngw|?6It2 z@Asp6>oepEYhU3)$D*5X$*c4HUaf#Jld{74(OmVT?~ZRf^w!!XH_*upZeOnnz9?rN zc5I<{nUYaBzRokMx@nU%PQnbXWMB0&TX6o!@|jN<+$H&s(6Z1907bIAClmdMhff# zDax=NYXS@w8VxRp7zIU~0%GA=Y!K%=*r-A{8WV;6_JJ_dGuO7H;1sboRbj-?M}$D$ zs@*${<;w}jy}DCN=TM=xV?YfOmA&`nkJQA3@P;xe@z^36WDgTx@) zWvB)!GGT_vZ#Tf_N#4QJtHib)>Hy7~(gg&(~$Cw)jXMcOwpB#?G6W0L5rZW z=Y2FC?EWYGtBH>qo*FDM5X(q*TbLZ0_Ak z1%2Z0YtuE2@H>UC3dJoyTuvev(blv#yeJ2q#wZ{3lF~1bNy2N(Aflx&3Vr!4HzT7b zn2_~4dh@Y?^xD`hKttT$wyfAx$RXf%XY^~n`UODnJt~&ZPJh#*P>sgabW&G5=j+MC zxnRa6D6BK=pWR~Lkz$hbnZst)@%HZUGC}KQ84cd%drAYQAcTtNsZ3 z>&K?^0}+GYl3vN)-o~Cst4iHV`!SMwT{M5JcryrJ!wWGwaZC9nY$qu=7#AXGQDl`X zC)SBHiPyU>7|}kJvGjCRXIQGg_^llkYn1+;OAlvWr-2 z?5i!=gw14q!cU(Q^qE_DuoO*g)nH0-sk?_>4 z46Yo+%Xo0Nk4+&Nw4fYa_q5CLB4=K$#*HhB{@AXXeK(C`;GTB4J!|OYda1ca?Pwdx zNk0_>pw@c<;`@r(+qb$LcGb!CyhU%L#`X~aM8(T1w^6n1_rDNB z8{%yhX%zygr~LA{xDyy-+Wq2pl*4T88E)uTl-^<=K$W70#vh=i1fWSY zZ2Zo-F#P}Vp8#KB5fAoI{@nY7mwrZ&)N%P9dlkA4J9H3)hM;(h+L~s*`!U##sY$Y> z`h!h_XL(@Gw7y zG`hi%0xr2FC23@PY+x+>&&rdEv@=bz;6oZf0E)#lUmg(9e|>!Y=G|e==Qr<}HPbES zicKg4&uJJ^8G}A|!EaBCijAMs`ab*DKSuepm%PRNK#PlGI?GB-EiL@$@L+w!>+o!< zQB|4kr`ptE*6=={i^p^x|MF@HV8d#)9FGbXjBcg;pc zooRyHPLV1G^J$-)Hw&@?B~u3e*YIJ9O*U5|>67-EG(R!2Ctun*AfoYUuT^&{nR_AP zDIJUBF8maJvG42n9s~R%w7DB$P3jLV_Sa0`Ptwri&**4-;nj#q3_EYa03@$Mu^d#s zOFlwsa~SvtAbCnu6q_Gt?0?R1${R$XUX`O6nJlES^?`n}7PFpX7XV8~Aa3V%)*s{M z02+EBIx(eb;nLpeUtT{2M2x;)>DpL?rRcvFBB!`J!oTB`cylBdy2b~7s?x-sSRalu zb`$m9vs?Ha*gHs#n4Rz-h;p&_k}cIEP4~bMu|mXi5n7y;_mN<~QHUX3Qw1H2WIPVc z@clW3AS|MKY+^}5kx%<-bUUzT5Tg85jD~~jkgUbrpDAwyzcL5VboeTcVMJi;94`2d z@>-^XUtbCC$q)}&=V-sdlT?ff|1I67x#{;_lXI%B{qT_F`nD1bxD> z5u%&VrWw&Rk7z0;hM|LhjL}Fh2}DkOs(=_i4cjvh78W?p5}x|OKYZQx#AN)vOmOOI zRlQ}yUJsA;wbLB6cN07OhWgoiL;Wy4-*Yka8aQD@q_hrrhheILj*<}Tl>#wN`d3VPzRUbw2anAQa!Z>aevPJz_69aDZcc`# z@!`iIHy);!u=k=(;su| zVSqMJZ3;GYg0|%DL@cBzRA6r!$2tcXu=rF?>vN-HZm=1k@WXuTL6r^q<9=8%9_oy` zUH7I=Twb@lbgy^zAiA6NxIYdX3hJ_7lrnawY!kUbJ&gpU!YB%YOs%vCGLAZu{Dg%=7HE*ZS_&dv9bkj2N5INJk))BSR>rh+}_^6RYR}+?y8ofHO2R zl4~q8P|UU~HQZXUOM`@jof`odO%rWA5gIyi7YarCzOG}eh$&_=K7GR=iH)OfQ%S25 zhX^(g73I2uMAphUYsZB@jV%W_8CBGH%)=3R$~ zcRQt}Uth$*NOWxC30h31{}P0{U-tq11d?|7?5Oe3ON^_8!2z(SU zW~0YiwgH9gLGUGiJ7O630wM@at%{D|L^Neua8He0bbRQEo~2azZau5q$7~LoCNj%pK?=2Uv9u~dh+#`S4}uwy?8-=6IpU2j zm{b$d0qB%rWD5X}(-dOl+ZNOmtEI*vcH|mq>KjCE}wd-1@biv3hHkQ?6gk#BNu^D&9lL}Zuw-m-+)2W~%2DlcMq(}|( zwI|{^7dvi)M zr(mPkBAPN8KeAdn@2x$f#)2w!*t3>x6?{8g-%sKVGuA|UC)dpSc_zlfY!_VWQ@xU( zsQGb{aT<_M4`SuU0iTBc3VH&+yn~@Cmw9ac!w6~Um!-1!XF)+X{Ukk7>|D!- zd~uAkOwR3evoi~o{^7HTtgsZZRG}&OxX;P_^+}YzX)s3m+aJQ2{RD!Ckq|}CvU4Y? zFjb&X67g>64h)x&_|BVNpj<$JP&Vn8#J?ask13ohT*)=c+Y^*Rr$B>7m=0>=Ooa!H zPuRdx`BU&d5N+$MSsa*_i8!}a0XRMP(EHxJIN zYFCu9dc3hvu;TFK&bdfk7UYn9W3ZH+o3%dI<$Y{Z3ISnfy6r<%h95d;rhQPr$Si}J zNhrY9BwR~`QX;-U8KQz-C?a5477_Xpci&y)VK*7Nd;Ot-XxtNE3Ggs&r<6AXDh30S z5+YQ@<49f4rVD-9Fy>y{llC?c9{KH?4ua4b$Ry;X_SiH+dhc7D%6pC~$f@K)Q7v;Rc@c&W$MM?EQOF z%$9Ar_Q8KTWiriG`Il5PCsPj$!?9ycRGQiD0u?A=@g|}>@PN(i=>1|)?f?l%oqHdn z?|JApK-R5+EatD?jlbM8CUJxY!7rK|(4bDp_u`lsv|E>r`aa-L<}t-?h`BZR^dEeb z6gm1Rflgu2a_^td6Cv1_73aqL&(O(mK%Mfmzu!tPivw_geacD&pZeVq@P)~;MVvn1 ztQCdaCQ4%!5tz|_1+2f#iMUZz1Cjt}4Eez=h9?mEUo*LXTbF+MS;+ATEiLWYy1=Vi zjk|Z#-1fvR;=i_el|A^|zx?AbIYnyj8SwTTAu-8=3z`Nbxk8jK4q&hdU0_BvC#*Fi zE_Tq)g#A>z_ROPu(ZP)Aw(4iuECm`vAwtVZW zuoKFQgjyJab_pHcFP;(`h9PLaHvhkjku&(&Zva84w1j=ebuvT49J61Kc|G(toGQIXk zLG%{3@?DtMscW0wY{}n#H~7 z14TdBpleTav)5_G-!SP+D!x7>OuhSI+F9C#su-T5^l@bVy0*8f;S>rz-KdzQ(oT`f zpySiG_B`ut7?Rmm`Emn!t>2Q5i&;>Z;%@TMMTq4Q}Gf@H)) z&LbSI_brusaT;?|WT#)_E5Vt5qTB5+HoeA>thqlO#cVR1I>#=QzkQJ%WB2P>m$`FS z%W)5s^!#&Y2JiGIsk!hPIrkrpENY*VCxrSJoJY?J(8o+v)|gjBe?IH;!c=9=(y!<; zh46uIzS}4IHD;Kj8x)yme?A+#JyWtZTy%sfrdhHke<>6oqzrS8 zJzj3$&r;_vaZPJk`+Pfo&t8w;~JY?F?`h z(7fS>eiC(TWB$t_#WQ>L`8I9?0^PNXOR{f!QH>|jcBw~o%M`5%O;i_`dfDpyBv(KhUt^g;zASb-~kHwfBh?Pz8dY?EQ9BrZIw@BJgGZcLn{# zyd|1U_*(~K2S-xrFD$qEqr$&qtEx=tH4=(qGkG~r{NP(Zn8|R2VXCtm$$jR+k8Mh3 zyaTx6NeO&zM29G-0)_gL&*H2xB6-{r>>(?Gp z9c<)8#lPfYj`MCKQ|0eK8Y)az5c5hT=TU)YMVWc0<~k|VQhJ8{@xT@g8mBO%xRUVm z2@BDz93)j&IEByPnczsM-USmn7%5ul6H%GhNBU61q>|`M+ zu#!57_a9o4$q1@aCFx~>K2G%nO0GVq$UjSX;4acaPA7@~uLA*fDaU)~)^+cM1D1=# z0YJbvvdVh-&S|oizBT^)6DmoFuHdPs@@1<9cDH?V0+n$mWAi(2f2i8s1zGfb^o2h+oa5T z>D|I?eMBc^73$!0gkdtlh}LBpjQ?UYDMEM$!BtP7+6J=<@8c+KDZr?$#_fNaUF6H5~ZoEcTm|n0i;}shVNY{UWOGZEB zjw%7dv5*aXDq<@xH?Bp8mtAdyF6(MABpi2r!D@ z^`$0h$dT)X0A31)I!lws;y^)!ks{N;h5*)oNGGDwzr%&qFV}q3R+Eq0z-r1f+YtZ- z;@I$PH%?#q*T}cXBJ{aG)%?G#Nmcy)DYsA4r%fl5fyTnBLjI|8s{Vq=7+SVeg7({H z-P~@3`!Om11%6_cX6uAm>8sO6h=_MpW3R9Qs`UcX!2-U)<6L8w{3(d(NyER2OaDZr z50ZD~BtE#weB~CEyJeC0O&Oc_p9H99-<%xoP6fSzR2n5<1;ijL{Y%dHR(khY4L!-H zjRmL0JrCMu@As%UN-{EWCTQ#mtlEPod0I*vhHX2JvCLiw@c%pS)WiHc-jLhv3nklq zgn(AmFNNvpnN*N8u3_`V`J3eYE>>;(VGt1p38DK>QX`Q6bs2wu4hnj__o87fJ!Df1guKk^i{0K-!yO zqreceVn+7`+IzXEa#g&`BJlvGobnQL%dgG5S}q^a^45RX)HnBf{{?9{X@_p zl2YkERR=(F%o~5cYvD@%A0;$T5-ZBPm-y>y>$Xqf?dP-*Hr_&fW|T?Rj9bvXT)R}C z$~4?iZLP~|$)I!&mG}JD7ZTPqzp=%cPB!@rTI!iyMYv>mZqp~hc`PGVaa`LRor%Vhc3 z*(zjHM~I3@LO%laH>j%ug@}mHgh1)(Pwc9fgEFSMj$4LkGboXeJw@DsT;QL?fSA$T za|Cd*CE3i@`;EfFrng2@z15p}2fK|DVNu7UGasPb#-{ai*e<^iHug_v$100O{#K8Ub_tF37_n_OLvXr0h-!;TfK+64{ z(f==EsJ#GkC&{7^|6=Ap2Bd!o#Qt=j-zxmOk-ziF25-Y0{LyC& z|0pf}wXQnBfbuyF%KA&@|Bw|(_BOHq)`}+je^~K9B%9o(JiH`S|H1aZXv?4jia{~Y z9!JXlhPeN;g8%;rm&WaA>fg~)N;EM(|F`*-c#G+zOib|*{46UM$UIQ zAu2^VtX*2c%$smRs4LxZ`5v~h+C zE3{S%n1xTE%qNW2liNTItBlphrLywS-!!7d)ZebK;-?sYp)IDW`4m1oY}4u-jD*&>5ywmH zkV4Cbcho%hW^jzuQS(SI2a(i!fZAW|r6&V1DHsa%blX<#z< zfZtbyB~VU~dH+83Z<5|Ag4U=w71!7=jJ!&ct3Xw3P=w^UyX-QN;?}@j6Br4Pp#Bq! zP`c?sFajoy`tp65-)yCv9I#4dHHZ@bE-lM2L=>FeX}w2JKz4OPibl-;oT`BVYYyN} zT$0-|dk`w=l@JZ;-&0!w`Vum(He>MrQt$UUBA7tb(FD`nf+fKPkkv8tssk~Zd z*p0!9P?RcP0pY)R1G)fO5SVU+a$f)WO3*h9It*+kDG&qWmosHH$}Anv+Wp|JkDzV6mv~I+Aj$3J7K}Os zMkt9!;So_WOC$f?XcgUt7dkP=f5Ti(su>HLIL9&ginDQ|w|d&k{|=Oed%sMWH$tp~ zb#M_X66AvujneUH|2N|TeL_gh#QEkt1=xf2$Ez~jkuLAy+5jkV!A6XmLlhDj*pH+% z{*LKaj3p!zVM`{T$XiWx&;IX`ahWgxJ0STS%jEA6Fe3r!BLd>EmvZskT}%$bLK+H6 znE(VedR$y_k%$ced1Z0~A5E@NdXGQ0i2NKLItOsq(`z65FJkUMmk{A6lmaxOsE;h& zKhQ;^?8Sf)nhc#|MCw7#WpV{hPFRl7Mg)n!))t0e5$^S`J$8 za#=c*ct7ew{Y@ep03u}OLumhg?2(>f_w@k9K{NqsZR&dc-IW0^5aYctDoUBpB0`p& zt#>ufgVTTf9aa5PA5e2@A$EDb!WhT_8 z%7Eo&{pxp}{zw|o87m`?>HpRlOo-5HPn`ATwi;q^Ck3fYx^Z0U5Q`x*0Pi>d18)of z-YU>orO4m(mjnQbjKWbq7xxc*==fQ+)zK)7q?DbNq1g(gDWBy}Y3zuCpzovlPd+4l9qv9R|)UxY57uV=Mj;w;M@{!h12(VZs< zn*TGccOYu*y<3yEzW=l?3-yU>VFxk)AWJy7^)AUzjNN}iAN9M%eWwNR0Z$WK`?gQy z7gImkAn=2zL*bCOZ6nV}g)F};5}$F3sL)XS(I5HCJb`Yll{+5L4)T{~S201fb7%%E zY#0v5vcUXH_Y9hRrqbUuKL{P|SpZPr4vDO%q^c`NI#hgckHw*rx7Rc=EvjEL$B7)s zph!|TpA@4#g0cdMID2T(7p|N>d-BbOILIEFiR0IgtOB z9J)Hb_A_dEsKhRH7NzVbhVC|eO5abSdKE9_wE^}Xxew}n8cTbKf^R2x7u^9)Ci7+oaK{}G z(9z1fWseT08x{h);OJ!ycGh9%J4rClhu?s{oy7ihHfCCtJVZURp{#?(C#Rj=Wfr^Lhx|)S))#YJa+Y=i0}9qqQ`zqJyLKo0*6` z&ve_^(x;7U?Tlgv=u4xT20Lmd8+f+$043-MNqxL)vN?v~JBr@=JoC3JNnXMM!hq$GooHQX+_cf34AWSUj39+Mnj#oyRS@wqJNnNl^=*| zxk`_f1Xl=)dB>D4({C=)n_^K?3>sjwdvl_8Gu4fS0`%aqr)D<;A0#yT#bDbemV{!K z<3;*YoFbKVbzarHgxHZNOIDVwM{RsTmP?5<$oW|957BX5M#jgfqA1Bxa}|>-;q^y- z)83;Rnmb(@#-+TU`!9#nuU+zasrlt7Y4EAFV2x)DdyzQ4`vJ+$u3_%hwsbce%6_#L z!#Rp;-Lprn*^XBOQx(<*7nA+Irz%&CU32oZRmz^#3fDXKeqOm*Ynm)IcRjuVux_AB zZ_qp4n>TPin}u*pm6~=VUigm4&FN-E9^HuTLvad{c`a4H&ZnT_P!?RD6#My9AQ?W8 z0p}kRUy!q2AI3M;t0N*oO-y^-;JWq=kDsO#&s94veZzP79F?Km-Ipqy9+A$|v(nD% z`=Ftw@$ldRx^9wCj)53t`EV1 z{M}P3yT*%EEGI^{l`yZ!502K>)}sM9V(y3cIO-jC8+~9(Q7HRH+^n({bVRWC3Jkj3 z;q#N%N|}jy4JgacGqL*h_4SRjj4lW_j5rNQcam@RgT-xXuP?XKn@*>!A@TzY=Or6v zYGi8_Rj+c2*J6V;4+YO6r76*6n!myh01WTA3@e!WQAp+1)Ofs#Q#>Pz*6|Q$k2Rk4 z$$B_P3)#%iHW;@wt~>rJ0PA8*FK19naX&8YW}n{MP3|_o5IbL~Z#6yyvteB=(RY?T zob<@z0=AjxQw%f*uBzRASX_Vj-Rt^tylJ<7HOabig?7EttT)LBe?Jju%?fllmY?=Aa0$yDcS6lGrSlQWf3ZfFc#J*q&R(>Ut8#?xbtFZ zZncPL-f#CDizn@Yx1;*T9ymp*Q>*ncZLL}3$faUw7k_l5z5!gcuBW} zbV)mXhMGm5QJx$UGjxQF)zz;o#s^(`?&woPx|AbO_J zak*LhMXUCFk*W z5@_clml31Z-b9>%&co#`Kbr^AmVO(vNCYtUYmRi`(u9Keh58&LF9p9&^D!69gYda| z0h8Sz!O%Tji02Y4=^aKu&rgM8!WL1L(Hz<5@7KR*XbkBj(H?6+y3Bi}(v~+rJ0yBD zsIfR$r&9~Qn1(3(SWc~97WrZu5&SZw_c=142qZ=A)j!^`V3K?^t;A#`jJ)|I#4WpQ zMY1KPdH{mok!dZj0rq8nn7>-(w~zO@t27qmJ*m%_722tFCQY|cDj8&<+Ud8Kh98jMm+uw%ds_lk&w!g{lb|Rv)`A_+c=pT5!gU!GcnCoUqLIt{!;jHEbC5Zs%tOB zScoPc6$MFUB}~k3^sRiNc}A2tUJS*pGfJ(2keDzM(QbBBF%A~qNi?`)_5r%NFT9dU8i!h4od;s-}96>ljXwi%}VbL#W%O$ zHAL&RXZl??!giAxT}wa=W1<>+eB*V^rsVLZLH~e}u5MM$-kQ>=x+%sY-733AUY#b_ zF8X-#PL0|NCF#A=7^$;%;Mjy8Q(HxKc4e6KBdOhB!o*dBBj84J{sKX7-~`!t1K$h# ztlcYo?}P5w-{M0^cvkm;e_muA))22>ahEt0_82$@9m^yz!lZO{==Vq1C0?;bQ_uH7 z!^~mLh(oXr_oYyN{M}EGdPb{-xV?ghqFR><=B)6-cPj zxK%5O#T>T880{U3W_&~;%gPEfC9V{~2g%lq~~fDNBn9( zkeh3d#+Gwfk~EHkiqA$X7zsJ;eNktNcvPh{=@~=!>tTL5l?6sYnNma9^?EGN`1)ff zt>Zur@5Q`mC84|qL&m+eXQT@?3$N~t$+p7PEz8A~x0D&&DVv;DzVnzeeK1R%sY!2D zo1%-RPG?SU^_a?hhBIKq1Ss#bas}c>`t)eh+?X2fN!g$sZo2*WF9+h!L7?{yJ}|c> z-TBKGR0tOZq)z%w`5FGl5%1zOf@HVU=1jzD!HP>n@G$QsvUzGIT%1-|J%){)Hn-Dc z*q&bX+WI@>Cu{piwu%m3$jc{faQdPruPic}4+P9I79HHXwK?|Si`w}(qSY&}jpD`p zR(k9?sPrK(zjp1yI2Rl!Mg&g(5qS$s!6gsvsg=xWa zPRpAHO<$~)EQx||&9G3OFK1$E6g_&Klo)iYGN)L~I2+vAWzUaTX>c0raruh7wxA!RpJ$(`sy=r)FCD^Vjx6_!|_>{;OLCdu!U1}R~{oB*CwnF8a#S?I_|nsZK67+ z5XLNBr^*(6%zh--T&BYq#WBil6KmK!%Tio(I`^3WYALQ&d7k(Sw(>yTG40_1%Ee1< zUJksCyoFAKt-b{)$yO3o{SG5b-%T#+d@cM3oMNl&QdpJMl#U7z1SlJAc_n-`E@$^L zUQ}JNgueQ+7^gR>UjsY1gM7^O-Wx?$i70b?gKpPxCH$&&%D-w z&e>JPA>AdMLm|4ivUR1#oeq+W;wK|&=BBaLl|MaXhe~9x{^(Se;#zXESWT49 zpX-@td`~2pVa@bMCjZ@yB`7LobvfYDT?#j-+vIhak$7${0&&yM8Tv6MiGg(7ds6av@XnLAQ`bNoMO~luo89vhGR>Kb^PJ+|VNY2%VCT%OC-+X8 zn&YO%)2Ab{eJrfmo!JaL#6j73?EK>yxxKu!`(*>iu5P)lU$D}32e4K9=aRXL)~P=c zq!4AT=Xl+|8d2n43nAr#jb+5K51s=4c>SVYmKm1Y>0Ie z7bD{v*=JX+QEvGml1SF3t!^t|P$Q)Q3M7Dt^MZ7UAu++;@nTo94M`WvON7!^n12ST$Gslyp3| z>)D4<;7q?gWBNU^e!;br?4bx^+p)<09E0|4gY$S`m5sc-NU^fVp_n&dp!BV8Y!nkZ z`aCyV>?|N-54AQ;gRbwZ*`%BKw<6~0>#kIRhO*U5(nm7=9 zQMKTuNbzavFep)wGdz+Z#hw=|c)%L<9#=^oX z1A8BQ>uv%9 zqr)9$UOKgpNh6e>3E9v;A9C;wI<`q%EMf^-14j4Jr8_@NLPAO66(5?;1izAZ_B)JD zPoam*k7Y+mW1-FX-N1=tIU@7$wchYub$-!ECSuk4q8G#@6eCBv9i0X$+Iu~I93?u` zt!;G1{!N7F#V77Q2GR1ThY%WkT^%3smu+Yib45%JdNY{!kmfEjoC7ji5Xa(zqDO<3Fp0be=c4|sx4}OaTqfrcTLcDFTNRFDMb_dAm z*Je_BJsvkxL#C^2<{N!d6?}!IgWd?qZUJ{&rqG-;C{lWv{McxnOrcfoNV>d;uIKO8 zKkAqLVwHBBM$GQIG-~vk?$e;>xBHsgF&QtO^S+Q(>8;(VoCZg|P3d@dB!ga{AxkmV zy-&v9i7SChN=61b<0tZ;N!6{hnJgt4dcQRLg9Pru0;5V3^*a5oYgc2cpY2O@bUZ1? zJb80=b8V0gFF(+065lP`&De8|6yi4dbU&4#ZQxFd_lxko+|~-wo2w&kI+^0rv317) zc=#&p$9oRG<*5M{mL$K#h`DpXb*Er^Z|BmzBi2E=AhcfV2Oswfoo(B~$E#ld`2M)| z@>37bv2UJVy52Z8*ym(a-qaUL`%UXLBcC7qc>MHgIRF9HiIjWZs3S-}j2@+^oMmO- z!J-Mj>}6p93!Q8=iJKpGKvs|G=z;)o63S3Jt%F?8vl$tu<5u2c$Lt+UjuPotbPJaX z_>P-@k2uO`4qIJdx=7u~Kk&-xuDZUVzVv&YaJ3PxqQ55AVIw5Q?D)}m2L7XJg_t%fEZl!jI>h{HZYi*z zjyddCb|>Q_QaRN8aJg%$dL_1GRr!30>c;&&?`zb#qqH-dajrcUj3q6jz-6ta^v1%X zqTQ(At9TLd_k8D80VOHJj{W(e^FzdG(ThI3@W6XSyOGD&$3SlOs(Xktuc<+VjNeW~ zIVfZ^Dx=x>f?4}CZ*xtV;YieBBRl^30sv;+b5E6!fx7*bUD%`((RG?Vpm8}&{NiS^ zsKW|}%8UY!c}bYDTAjtZ zgz8MtEqN=#LU-G@o*j%(Gp93ubA1-SJqzT#KWpT(3j)|lznQ;*&#!z-Sb~ku)b8)B zO534H3Q5QPs0Ttrscz=@tjtA{#MRI{$B zCALc*+)qDgnmag~fQw|dT3}>im$4cFvBi2R{A*93%*JFot8PNwcR$EfczCB7opo#o zhi(B8auGzcdOEsr{8r*ZqU^p6b$vc#a2Lnfjh>l>>T@bZTh)iE3L~LTn=?_jR7~El1?i}_nW3-((*c9 z9Ig;`*N)ysQPO^pMWRv>s>^}kF?mX9IZnfm=T^!%67?KzTtPeDPT_nnFe`j-%8du*mQ!C^sZ0sQ?d zx06s!d(o@olOU3!ycmCf>numvRGx=g!`vr<`tz`7)ZXV1&Hl^qSkf!V+2ZX!7p51` z{kST!TE9zya=?=szM`XvR( z59%Ase3qAr`FnNi*48Hr^%wJux(lMcG)zE!$9;@dV;fnmkHecv_Vn=y-KiTgC&~V( zcBzAcYa-fZ=6Qhgy?0wssp=JXdwdsilI^BcSX;|gjz;9K4+-Zr+WgWGEZ~z4IgR8-r;gM>g5+_Z~2VZ+uO@Szvs)=+613&nmwQ0 z^F4#gMl-C~y63vc<+h^TrrNf?xyE4jx&+KUW)md~lDo4+lc;jJz9x27Gg`7yQB?)= zy@AcAmzj3Qm1QYh`k_mEp?5;|WjAJ96xmC(ZVykZzqgL7=``zf!?-K92CvUc0WlI@ zw_b)Dl&!7H$4`eLA$|Am%(zzgieGeaDJ0cUd9wy_k)EA1z9g&k>n1$1~DTW2YVGV6AUW(Zsz6MMJ%d3@LT0c z*UrOA%hq8(wC%*PZ(@r+rHqx@3Ql+b@s{kw&WoaGb)hAI@p?)8ddbuPHZ>l1|H$F_ zxISbyEFSE+ft46h1s025s_5|ViFN2RxVaqHH2suc$jvYHg?Q8B`s$*WTe>+JD5Z&M zui6R79-4e#Zejf1Ht1~H7v{s=HgM1~ZTfXtJDJ~Jtf8#TxnNbFAqZP8{1s7l7@sU| z@C_@GL|_!qC`jxA9V-$Y#VNd)sHmGVsGj1qHOM}}l~G3!>i1vVB5nM<0fD%Sf?=YJ zaEqFG-ONI3R6@NxhwWiJ=Yw^A zpVZK1uBq0@_Q!SM*vUK|7P?J!Wq-CY;$r}t+Ys-xKOWaJ7J+S-@t71GLAF%*oZ^qR zfCJc~Vn@C!`&|yRWE=T}9V3!Fiaz7(JaQmuTz#ZnwYpu_Zx{EF_TAyNJO$0VF^~{{ z&z-o(L=t5lqqNF$K}F+iIK$L>I72J3v&|gzkdQb1-UHSN4xni1dsa8E7`Y%>*tbM2 z@whh@j3C`7a@5l?sx^n>ak-I8R9Rl0l0ij%F&Aj^bj!y8sJqPi)Nj%SoCZYhsZ!Qt~ovT zAK?k7!}~awa*7}U2!q%%mxCgF2H_RPOOmI3iffu4^-AlZM5id}SFPCHl{QVq?T5Wh z3yL=>&Zizy;W+LcgdJE^%p@ghPoeuct`c>=r#B6Z^^QAra7q$x8KHJhI%?cdL%DDr zWre1w*XS_=a<34zUyM!iNLOxK?Ru_&rgytgr+3tBd8R_?Le)iskO)2-c*6#G(J_uU z(^92qwtwv;1TOw{ImadOn$gwNBx24%fl!R=1>;AxE2_Oh%$3P3QEn&2_6D-k`f3HC z?a2A%_~Mac>6k>vBgKC3t*KODNLps=SSMW@t=~nr0hlGdMSSaZ%{QGkLFd)-uC~^E zY$`DkDTdY*#tYu;=RQHaMd=SE^juTDc5B$aML>XaKRu^ybb!L^J?;-jh**uCXWj*v z1TL2iXLEHY1nK8(wN&tRX(D;$S0tpo10vmLTMNiUdi%#SHO}FL5+vX0aSjE!vRXbV zJGxlp=HxI)cqSZAj?H6iI1*qbweMxNsHetfSMuqkHt<DU^p5*+FHtrR^m3X z$OFUHmO{OiLll$wDAI|w`wQQ%>b|nNyx4Nqxlr1;dJv>XdubEhq(8JGeqcd=Sw%be zagICYMX_QMPGaY5M89wG$;y=!gCf<%^!)WHUk&?6p>|n&q-P?%#3@{6Rhs#o(ZC3J zbB3iMmDF!PfT||~Y^u?OhYe00ZmK4Rb_$xW>gA15``FI*2bKU+!hqLHzHXf{Ve zx<>x05#J%)ZgbCl_MtXuBKh7b{i*iw+unfVBIiom=@*c-RLC3?|Gkt<1b33l(v>Yh zZ}v9<8CaV~L1f4;j|@H+(-sDQuDynly)Q^jRtC~yn9yc}WSUpu`D z{W5nkjc6bdp_txUaTilU+JCKJGM%4+9JVt=?$i(OaHdt$7YT~)pYY_ zQoz!B^y^mKI9siybbm|zBYK2XiHxJ%8z%{qqqy$+G-E#ws|GyU3FjKY2mEs9@mVo) zo~*?{OTUUakjL^gHivy*tqSjnTJ#@Evv>--XJfgUz^eU_DI(yK8vV9?0D6VC?l+Xh zWq2-uBkB@&73`HTBYIWIN4UCXlbHSJhB<`KscSGNBwTE1P@2joofpxgX+MyIGf&?v zOVT-pw|?6Lelo7v{kl5zFoD5!C)&v70V}xPy@6MyD2=bsy~86`keyG?xVDdtQ@?$S ziXl`kO(Xw>{-e;kV6*-8qyHhxR=Mu()A0gZ=Gbx;ZsdWebsNlEySF}e`=lbs+;C)J*sMlDT%=?U%Mq0uHzT38x#ng<;sh4(TpSz zMa3>)8|X={ssWeiCoT{^(nGY0ZSk(j2IHcWsnYm*f}Fi#G1EEld>y;6SdWVGqxwfu z2wolSo{16*p3Yl2^0qv5CA8^RD2^e!Z`z_>BFcVCI8!)!rb4Y&ldzN%QnV#xstWJp zcPpn57}`lBBy$a!WV?{BS>3kBQC4WY`^Ww};IJnW;h8$1*l5IEvqgeWcz!b}jMFUe zt_|5O(7_3PJn&<*g?Z1OMzT5?d`A@Bm1uhEGtzCpK(7T(?uV8cU4K`E z!gw}aH~Og-$->Z#!+5Tw?!L(~^F4+KU-4(`^;mq=St;&_b}VH!?e_`q**F?1PO)&2 zKii_JqLxQ6jS9rY&A;=OXaBaBWqQaAfeUG5L3_$)r()a}uk7nV)9@gS0`2t{J*}4n zn>*b7ll_MCA^HrGVZ8DJgGWBEo;5|(<;pO5@<_l7TU@RW-PG!J68hHqwtP{cSqG%( zH1t_1DrdPxLQ{uaBxDi7G^;hE`jRjn# z(2Qd<6O)wJ)f3Wwc|<|+Tl>YzO$;~XwZ8H<(asT2SeWqcIq_3j=gc-(XZhi}+Fi%@ z&Y7hG1hXfVSw%X1p~iL}H??d`Ez@^zHd1#8YHwUYmw`#Z1ttQ(b-jwZ9k2s3nyqH9 z7xbw*XM>=Zi`eR+Y>Y!kK8KW8+mL!|37slpJzZ925Mt31D!#t8=5c{m!?O2TNyFMw zeo4c(ow6F<;Mo}(ob`q=+H+bUFb?(=Mczu=jAGG8*Vub;(>#v4!e#!7+~LJH##4(G zj{`yb$J%mVnXNw-opLP(f_J%jHnhC0~HNUuQt*~~kEA+88I_1{o<4@Uwqfgc5iMc8r?aX z5Q)$vOvw%Im*3jxaHByAh+ODxbSi8JyzvDVFLLf;V+Ud;__IVRJeYeMfyCg*k02my z0{GpG=&F~3@0TtgNRFUDWf=9mP0bH7%`)yBR)oI$IY=b-hJhv$ejVl%hgJ zs)ht(SeXdZUI-Xk$|(X|j^h0F3 zuP+~wa5lZZ_$GX(NuHLiLtH%T%RTEbj)Y9_jr6&ZrpVxM(Antwn^t6lN`u&wV^^^C zb0UaM9A4$QsQdBfFKZ@^O-IuF^Wu(MWe?=D^&B;;E-$x==V_gvJJt|rQGaKyK!Jh| zL5#akaWP0sXieXpL^oc<<853x6@kONw@y$hQ+@Z0lx7j$=O-r;m~5_rUfEc%KFNu9 zE%MPUxGG3qD)iIa74&h+c*MN%vV|N(4$qh=G_LMtz~A`9ew{E&??`E5^usAERkLxw zDKPH3b*>zXe9UUqqn9CqPc>2s?FxQSvkONxn`9M>j(2>RcD+QHRq2^e5@dM#Smql}_7ya64u!9aNs(PRU?plE| zY5}g@NNs4d^1agYF#4f;d3Dt9 zH>hrwsLDT;=6r|xv6_mXETT!OnNyw8&@Rx|a&+)*YV@Z1Do20iwg;%R8T|v&#~&$) z3Oa#l*i!?gQ*TafMxbASEED7Pwz+t6`w3D5i;&y4))nBXT1%=JlSbaTbDv|J`A|X( zURGL)g7P3fF|j6~AMcJOqnNSgK5M2i;UvT1+Ene^Q-fk~gVVB&iM5f4R$yJwlX6

r<2viLzqx_q$nd2^f>U85{x4efOXFe|?#OdS?yI8_OP* zJj9bKWV;V#EzXP8_oI1llIaD|HdN;zI__vob6X=2v*jS0RfG{=dQ^mt? zRj%3CQn4dEp?^Li$i9w3c)p(}yOgqETaARGL$AD*@ET%Gi+3cGX~u#JVKfr8Alu}ucdJup9MMop~TGaOjH!lAzdsk5DpJguk$SVulQH0O>>IE%?! z{WE|fCprDnBO(ERy#eUI9qBKZDr&mgm~OP{sB9%c0DN`bkkP7XlkzzmTJ60!!MjBb zc}b+v9R7PRnO`GTYTAv?Ypx5g(uxBEkE#K0Y)@Tp@{{u*Tp31?C@vs_db~BlCse!- z3dmM}r?_Pdav)t8iR|Z&`h;c2yH7sD61XQ?ivZ}L{3JN-t3?j|Ydh0wTe7J(^?eUu zUi8SxhVG%uxKEU%(dB5yXJOy{40GHtq7$^~KVQk)a^rc{$l>w)u7>F9$GQ?Mt803vTd3s%MLT$J7ePfXwqc5mvw)c5QCu}m`a2Dm=-Mp=$rf**9ij&3Y^ zkxl<@*v*0(%itgWUS%7J!b&O}s+q}wS)fLvzR0gac+<@|uV=@pU<@%m4W>>%M1+46 zpeOv@t!D#hn1vee&fW~5cD~EFp_`Oa7-0KJG{pab5hKG!+{%EK8Kd39 zCeGIs3d)9=9QCxwAL&YW5@{uJ?qkgArmc0|RkEPW5D2n=OMkupJqog3Z^iZ@BMXgP z={AR0KAhj>BSM%RRci2%vJKhi%OAns{++>+jw0QNU3`k7xm7QX=KWp@#HZz>Tt0cf zLvifY1=(Wl^Gshekw#HfW9Tx4(z0QM+ub7NJya!5_rH@9ol}gnpx6E zAzVtDY{;S$^&)hx0%)%xCg0kA)v~8inw4%zf55~z7zoGzMb*h5w-{W?eE0M9nNPfmqBFjBbAM;Pt1c|%_6y2}krN>O z6OI*$7LT|53!PPRZoFW&BzRYoqhg;)B!k<6@s**Bic&n|p@4l6fMX%N5%9WJ>%TxJ z47~9S#nzq`HSC^c9(Zk*nV}ZG*8=`UTZeMUvPa8|T{EvE(XV#=nEC#xn{7roz1tR} z=kN$@A_l|&S*7Uqz_qF+Oz@{M-wDBcpV5{z@u0`y#qrMz2YYD-T}aBC=T&~u^_^J=0e2L@$OPp4n!^9M6b&r7ajq6)NrSZicj|UcG z57%15SNv;4bYo*m;~mtG6dYEarD;pp8)3*AKWzmbj|8H|tX(B-x|gw;t+Gs?(goY( z_fFNxJnx>q>2*D-dKu-_`w5+$`<2_SH>O%QI`@^XMH%?}_iwh%C7k%Ht+30x?M}*gsG@vyRHdg6^HWaN zO$Q|%q23-Ed*?$BokBpXbO!1K;rjCQSh+AI6@V{7 zI^f2x<+KdJtr*L7#slQIxKTufTPB)XYLwb}hXCaaDbGoiwI8^yNmd9BdPx++O^AcG zv!U~eaQIpORY^9$7XpIIw#)VA6A#b?$Vp(iO3owoU1dS=r+s*i*R4g2TpZ3rV~#hJ zknD(Q&;BrFr(?~Gj*M8BDHDuM0xS9%ZnA@HfAkcQ-O6mMx>o-e+QlF~{m-|^m!rZc z7G(=0D_j^1Fm1pRnuUcyG}qfPsGmz>Mr+u9p*n}~vsv`tv8??aH3*RiP~HcFYq>e# z`8VQ68lrn;7|){-c!K=emJdcd)Dn~5`k$1m0v7YQ=z)jE@ydHG3f}M)-TZazhf)UN zKgj<2OmH^fF%OnO~Do&v*&zP&~O=EVX{#uziS-}NQ3s3PyG z;WPBTcKFf6dj&}{aRQ0j4hK90_8DKG z3w$o1Aey^2ENK^?C_BYvL9_y z*xk=(!DE`q|rd-a{&En?dUE#qDY?Ni(3&48al<*P1y*K2z2dM|5r$Qw2H!J47Y z${5xN``i!B*Cu1wO_}%50+_6QSFA~-<>x^ zX*eGdJx;rgFIL!1R10he%dF&7Cw^hAjHLN@^tWy&5p*T~jqU0P%B9`=T5Kj?#4>KF zvhLBmFjH`h=zLIJH}yy4CvyfhF7*5hARV zJ*Zy{ z;Ei3nK@jFR$rtx93q}t_uxD+v-2MJ~u5)?Wp~F!}sH3iujc&1e4KMug2S&cLcBS@v zkIlDH4HTrsFFZmuO`?*ZdAX^! zzjH}IKoGm1P9}YiWR=$Ru!$uemu3^$Rw#F>JzgLf@F8x&R#?5k>_|g8cZIKR;vPLm zT!XBA_7k;|CB?;wz{oFS`SOo3mNMVTR)yFv4XDl7%h!W|2sg?Kyos-Ws z(V6!Ca%pz4yF}gXEBuc8Y#QGJ*BVX!d^+#4?U{=(K4{IZHi=4k!os}Xc|zsGOqKE+ zqv`eItOM)vb2qW}i9tI3?_K!qae~W_+!G$(Td~Z_}wNWOd5+{fior~;V)6cT^ zZ(hG>h>-)!tsVT{^!*j*1HaKW&rq^N1pAh6%6&3)B>iwu{!giHD+<~6Sp6)9NVw6d zV6At7Ow*?%Q_s-KM%V_?$2Xt+HOG~%l#JCS_J(a=;tPU|ByQ3Kr=`QwR4t<*{AbXa zw$|##`fPAOw}n<&9GvbOO}?5CbKmggj<|Dg7_qbQ1kbje%09i^Lg@wy-S?9L8U-aIFMZUpe)2Gy2!Ui?KaLMZo{ zfzHldxP4u)cg?*aJ{1C}{IY%CKt#m%3zJv~v zXPR;rzwHYg=(f%8CtH`b1tUPLiYlwHKqb|nfeh3T#mk2D}_`*HY=ycFMNI} zx6Q~!Dn^yb^^zdaerosI=H61wil{ihQ|~qYoq@IYYnaMeW?Mr zWUYboxnGH6&%i#N|NGY5knnrAzs`Z~y`s*7{xLH?UwhI_{q|3NuK>vyLr7f}jtr#H zRo-i9gA{r+hy4Lc`%+&rmx40^_7PkLIat?Cr{M90{+#x8x2_L&J5$n$K)-Y*(ty%Y zLDhQ3eNF!(LNnv|u^sOVtr@-2@w`M+{hOhFSXvrMr^Wr{?+ntK30#k&9(s9?GV1od z8|XqVxwR{dcfGmOhGhsf5Vs+NI<5dtDGC|vlDfUK)9R8lbD>gO91-H=QRh#)^pAkg z?`g14=h_u_uD%Nn)uaQ&o`~cJecMz*N^t)*mozzY@sK<*JSr)kZ+^w5FS&E_;Q?Hc z4Zl?RStPS+TVe*^KSRMF^_QCJX!VR3=7VWD)=s2a5B-)sz)9OM20&cvW#s9>dNT`z#83B6+@A3F}Qe zC2TQ)D=GvK901y_3~0~{34x?(iuYD_iw#y*(S9CJy~wWF^K1U0O309Tax=FDhLDEf z-oWv$YuCg)z=r|mnm-L3!ct7iEf4Vsr7R+ZMPw;~m!@n;%j=Ln6?J9>r_AZ@3+I@k zm|lNUCnh%=I{@Rcwmob|37hEV(CxxLVmYvKY#x^8mT{c zErHdWA~^u%RPPcVuMdR#jd*aCSd;U$wfO7|BOP2F@HOB?s|^vf=do6Q>ouIw1q?D} zbP$F^!lbU&_veHE=JeRc8%ZPBt#0GWY+-`Lz=o&|E@!K)$8QJzZ{~4A7-eAB zhw(#E&tCfmuw-aFgLsw?kvjRgiJ4hyA*S*`15;VLrWjEIU}tTI_8R;j$YDnJFipZ zX&)P`IW>;}#ui3NXv(d7X=(Ve$f@H|4HwT1WXyJX#E3KDb1puOW*o4rH?uAS>~ z@B=wB1_L49Wwm&z)JD*|yPSzDuT^eBQy>uV*wAYS8w@qE__xfvTfD0#jaBH9A}0fY zg{}syZzJ-CBuPwoV|SD@=|jlM=l(8Q;wv`C{5%60(`V_N%?{xs#}w&&$7Z^^ykA*N zd(^5|PH!{YT{g^5Grr;ZElnQv@YVb&4(Qoqd$+wzmX9l!BBEXy@rP(OisBGC}mJz%7sBMtx!vO$wnVcXo6we`v;cZCM(z$SscF z92-l>c?32#wo_>)nV#t9S=I^1-Zv;ypueOu`0MSmL*l;r&9gg7-njR)?FE6hRbfQCO`(fNuiJUy?covjwa4PaB!)(`?TsV zAD6>apg~NBfEn{7SlLcoaz8uLxkis`T_ZW~o8>+_g$UUcH)G~HPPy)qk6T>h`zB3< z@WUR+`8_q~KFb>&VZ}(hJ9g212D-FAR-nbE!acmAH2&OV_H-)_x1_sC7I2m=OCXIzS*^RMZa$af=c! z8OHACfMO~61IPNwI42Z6_yW$(#*!zbSC@{K58IELA3RRS>f*d3#shj57Sz%(ye5R- zUkr8$Dj5y*26`F7MOxoqp*xJOeP}uT@eImUJmK%*S%YMPpB?0Gp6z{)b+n{Bp=E9g z70DGm#lHwNX*vq4W>NtU2ZVVS*U#84wSd3C+inYvHQCMRsbc8@;z(XSs$OX+Fnp1HZ1-5d2a^SHnorfZ8f$1=o zE!#vAI=0cAJ6Ju2 zI2{U!5i(z_hWKoqjXWb`GJR|>mcDvJWFSvHI~Z2~TE=H=_M}#N+42u;`HHeeB05uH!vrF-$Rhmchs^Ddzo(3hkLJ7 zkp;Ol_UWqE!1>rOw4QrJ*lUNuS2xfm_4rnYyA=HRQJl)C8EKeZLPs62n?*FUmF-nLzr)7N)s|K$+|#f7x90#4w`d<_5jeR=xWJ0JR!7FM&;*u~7I zN;2;aw|D81?EPWjxu^4|#V%GW#^~dGTM!Zq*Jo&l{OlZ;t_?9Q+#f7We*v&e z%J*cv(?``+w6y^K+U_q7%0&XUobo2ItQWm~QF!LtZRVhA87WY4F>)E2(;Kj?D<9-$~KX|As-Vy+Xkyrl!THC&FI& z1G~<`r@<3_&*#sp-Nk49ii<^sinM^}X6Ter2qof{)bOd3W3jjXLpSuMxt=nac!vr) znwlE*gL}~eI{4rg-`FO``ULFhNW`dPgT5x*XQ-p@NaLDp zYiJnw-X=i2Oy4}A5R~nfU}h+Cvt0wH+IWwc1()&juhG#_=4`T*biqK^fx8sSPmP5& z_=zBPIB9FFkx!Kjs8La7f}uDMG)KPEtg_Eq3+*32+4DS8&Oqd=RX`i(Yu*a@1aSI( zHp0Q+)N_BM;GU!z{$ROl4D-H3s+53^SdX=IP?oH2Xp=Z4oI?zKlPIUR&-SWIDS7SSN2w62SRV*%+q zdCrw8i+|oBcmQioMph)dw}xth-y(<|RxykXaaou8!*TxYl$k*?!T7bPgpY~Mg}ObP zk`?RIzjRuA3&PC=QRyrG5Wcl5rref0AW}Fq*|>mWP~y)w<$dH2^EQhTej@DFBo|#o zGYqZ6lNk;;^^L+C1=zD0{{jEmz4odkRWULOS(&G(nVd48*vdv;fAM%8+jt%e(#iAj z>>I4a3X!iV#5ydIq!7+D#|>!r#QZc_#zXs0zv1Z}M_x>%<^Vdp8kcIny=Mv*vr5^% zc86o(<@i{Ed$ow9@=T!f)aw2;FD=EniIgltSvS`K; zfDT1hqGKgL$1EZ;_LUEoeVyX+y<0M8g{NN76~*7PjO19w)eCMq^DG#KyAcEd?|m8& zfSsg}10&F(P=t4uxR>^3Glm3oT8B;x`?GoPwX(7@_`iUfsuz`TbITo{s4lVzut}zQn-$ezB)_isfop{GX@3Q;=>Y-5|~sVo!wu5lGBVPQe(X zX>N#V-!{Q#Gh)Qs7a1K2!=}s18k+8T4#$NCXj^*dy!+j6O=O8e@#Z?B2Cr(-g?iMT7;>Y!0XpC|m8>2o|i%ywttjDMeHd1e;pXE7nL&E*bw zuIpLt4i+l2H0SeqZ#Shy-wNAc7+Z*x@Qe~zbwvT)`yAO8-%%~MP>a2Dz8Y27L zwfL;`YMmE{kC20;rFC2|cPQ;-qyQ-vk8Djun|_l%)ECxcfib@PX~p`*;l~!vo5hD@J3>nlxTKj=S(jGgh!QmE zi>F2;=r>{=%3fC+t1oGJioJY3N1gV za_n4P@V{OCM=-;~s-iM|TSXAn)^*tm!Rz|q!PKw z3b>VONBYRTgvGj!q8 zevB$I@OFrJfGG5Zy=HGxrO*#jXEnVLOt#66?gryS!XC(guw=dPe!@j=B5-=48)vr< z^Y$s#64A>x$^ASTy^VK0djk?2Ztz%}pGhz#qJLK7<}X;;PBM0>tNq4E(*y^r^{bXF>4^AtcSoN7H4JwS}+ z=gDTD)fEtAIHWgl$+1Z*c$TV3`yQ^@(lJ``4|3TPQp6@tZycBC+&S`Q3>^hUfh5cr zQnf?a$o8t$QZxz1oax1VlbXJ(-)8eVyNO-Q5!~1;H(1go9D9Wi1vWIs zsSchew7Bs1;XCjtGZxem5|3d22cR*t0O!T%Kjz$+`XFVx&(HDXjBe!=bn=TO#Cj2I zbHzN$gPb2cc;V&Bb2}DBy*l}1$%nxSHW=F4ueUj@u56^KhL7Ibc{BFIex1?<*Z;_& z0`YMYSt27(=yewlGHYMSRq3*x?8=_dK>z1bfNLD)9*gr^IK?yE?YhK%ZlNaFFj0G` zV$DAppr2h_U^Wfd1Z@b)|qs(d0r&6fL8wLZ*by)pWxJin-)|Xvp9^Ph|1{w1LH@{UG|~2 z($N{iJl^*?%n?-`P~n)zb}fUDr&2uz1|Ot&EQVp=(<7s0Q?J5s;$(Fs2L?afy1|Py zFP%_#S;s;f37v9WuKw!5PjM0Ee}~6?$7p0v)p87(GrigYzlFdK?Xm2Q z5kK^se0JjekVLL2!vo7;-;IN;jVKeDl87Jz=g^SlOV;vj^gHKaH?d zRfhQ`4wD6sMvU(kvArIo$%+ejI;-h=I^vKCeEgpGRf>WG-+dOU62smNGvgJ3%xUL0 z`+H7H?QR(dehfGwYVlkN6rdzzd>M)Aw+E7tt1yA|AqyOj_zXK}fUqn!*{qeJ2e@z3 zCb%I^zJR|G0>*O|D1cb3#70?!Zke-)4n~I@SGdFz@gRAF2&Og%)@hGsRR7C9yf6`b z&I-9knPnDFirzaLC^i0{#Vm<}lP@(bt#nKIk}~{6Pg6{CY*e!{dh#f5D4o&~$aF)& z9T=?Wf55+?1-VYuATy+Mg5%-seWOEnd5Gt>iV-8_Nrf)g*~iQl7ooad_wpY` zijfQU=x^lwaD8>1&X2w@G5tr=S9rD=?JqoO&AE?n^%&u0yH7bY{EZV2i|~(m>!-_r z^V*3tuZY9n*ooKjIoOL%M^y>K$_d!cB-rG0KUeqeQoWtLPR0yY<>+7?ywiepI=wUA zZ!Iml6R$nYmu}u@(jbHd@Mnx+S9Usc$!XY(`~rwA-P(y;LMeifA||92MbWWx#RklO zGhs)~Be@>5-$!?I%o!;*ms{&PuPY{=8N?1P@a&u0cUW63n@i-b`-YrO*Ak92FJ=Af z7pQ*f2(6^cO7D#4Ei+WH>#lSwFnT^JE>=%bS3_qn>NVR+ z*{3888OT+|5ccbDuZqPmtWvwWVJ-F0VD89W$ZlLbY$#d1x0tQMCzL?48Dcop+P`Yp zVXO3Hs34nIjN_vLuAF>s>}lar@2QNuM*{zxaZoEdkov482|^Rqx4{s3lk&S@cdB>t|jHIVGz zTSH03D$LP6h#`$K4kiF)uZ3$7?q9n+U=#B6at}KTSY3>v-NmN$ZR}@imGa3$w@07{ zXeLQy;5{sqyu%fb^(~vUcWoqZILZH3`VM+VTAuphB9HgZH{}xf{9nVH+3>JF(05YU zz}R2TAc!dox12a&t1rN@@yyk5eaE|Fh)hU}?Hc0cS}^kc%e7Sn3&H%?qeC4AXlZ$C z>8ZkWND(#{uZ0ziU^3W_;e8E?8xjhbQv;LYb}zVM21^|@xex+9Cg1_x;QU&;c{JQy z*rb39H}4xyucstjLroE{BP7IO_+hZaw}PswP;|2O+yhTy>SMfKu0a@wXqokB)FbGA zrven9B=#jggme}TF(8`NnaoZOi2xlJtHldhTYoRm)~edq$Wqg>FR{&jqsxVf#w&9F z#rJ-#x#?FGZZYYT$cOV%msWL=i`WL5K*w?F9m9qi0|fL+RWD_~TiwK`5rCqCWP5dJ zYFI@rXY`zSf-KUGVPQPX^}$3Q{1SESbj46~XzGUQ0UJvzIh^`cWvOVSe6o9rrT~Wh zA?B(TbJ2R)s^T!&M7H&(FMuIs+UkE!o}sD6G!B%jkbZ%Kk_BDoF|>WCI~+K0(WO|p z;VOP+OtBWX?l?fwp7lkc&gsATUA`wxz`T#p`9ehIRZ4WiFtev+zVoW;)!j&a z0ffjTQ_At1;Q6mD>^a&rYHfFZG{9kJUG008tCVRV)MSJk|8RVQ_q)l2WkzBC)+>*8 zY~ZPV-m5cVEG}nV1QB`^X}G(+rqK9_gUgNTn(=$#MKeXakoSH-jqT?ep%ed;%EvL{ zE~6=as?`d-ahUl?6^XLjrOLjaEe~l}_HE|o&U)sLe;<3+7=^l~4*rZ4M|gzN#rY9) z`;3a=Y&;p=@Zye>9gYb(bs-{5!c&J0N8yfeDyfr#Uai(wc9gFx@kV`E-=5LABXIo; zJGaifiAz#{ACP7p>DE7JSB#`c*%+zSneDj_Uw@VxUhCGcF2rxI@S?!8+d=xG*922K z{KTiXp6oCYu{T4rsd5&H>FPBhwZ}xCk5veXoC^?n%Up;3n!0L}>yi?`{3Cqvw^Y(N zlYc{=s6RmMl;9qQ2!mP{8L9r~-W%S}?of(PG5sSr5T3~+ z45;qEd+r5id)oD%pHP>H25y^Pk(ts8)=RAi)EvZrz zE^}M#oy~vY2;Mr>M&b>aPbcNHgF&c$-$M82K@Zr)Y!F0M3n(G^3hjkRTRjgc`mw{3 za~g1*PxZb%L^4(*jL?Jwf@fVjEpTC*G9&J^1G>$rN%B9V2E2Jyd;YdyhTCoY>1T>&Jn zYav>8E?*UOu2bvp`MaH8dw(SW8&UI^XxqP>ZZAJpn9|>3g1kHhqDDS(=6y^BdS$-F zn0?#$PXPS>pJa#&UQ_gB*9cgT#ca?V?-8&y#5676lBj%R-1m^*``+CpGhM?wZLg(! zdBQjdKjw=rVk#jS{!*{ND#qo;;anI>j z;Qjlj-QQA3N!m^mIcyP;qC!S?$HKAZOL`A+W)0*T|XBjL4VfeD` z_+$G16WGUdjNj;-;>ZEVlb@oowOiSY#R&WNM-!CRep!|9=D&_5WZG%N0BiOTmz+k# z`~rd&;J(kr*6oN<#1%93N3%Z-2{BEihR7iddR|pd4UBlacSNo9*)?#BFMoMIhdC62 zzjq6R)(igGoS7>xyK&jS?QAajp?w zR9SWn);bUWi-RPK9xlamJ?IaGyY<2dwe53K2#Ay2o^<$a`9F7iM|>_#TQJnKaPVTH z|AL8OPuAp-N5A37vtzhE+#ssP66^r~#NwQQ{!;OM?6AlnWRBI7yHzQYDE4Z|onn2a zn0SA&U5k}7$|%jh-MAZ-S}EQ9J|(S`@B=7u#fVbL#_5^rE<>ZbQQ`wRr-f`3ZH54j zdi_dDc)54>g4y@&qw92Xxvctcv-X#~jF`W;bMedUl141U7dXwcqK_E7@Am@7ZI6Gh zIPjKM;;O-YHpJ7@gK=CEx$n4?ZEcFE-;jN~JlDpdAYa(`*q{0p>nc}W-zIR16CDp9 zS7l~kS?+wQ;Vg&wZeV$JPpXYo)dDXHNl0*bo#7&5IC1#7xHfY5ZJBcNUGv1<0x|q& zcXNko6qS@=vcj#m0THA7)Y3!3%80{qr%vBe|+!Rd2aP8VMv zlGQ-F0meEp|b-8rv9 zFGbhDX7Lg))DG{A3rf#7wVxBw!)PhA>eQ?Ewk@j&l(4(RFZxkx%2`yf64$tlB~+Tz z#iFsU@J#10vb@0u?DbHKo!e@Z1u#%+v8NA+s$W#esyg4^<| z^=yf>SKs~dAvRp+zx2GiEPO`zpC2xFA}m7V=7zpOl9a30KK=9LII^)IXu(??@aRmb z>%yuYYnyT7H@PrX&Yi+vpY+Py7N~@E0|b{CCrn8iiCZuH1R}p~Y*c`|!aze#&YNe|M-8~9#fzDhrO6y2(~?7! z*&x0p)LUFvL59y{!Wf>z{4|UsXN`myUg*)vU+S$s)i%yYPhOnhL#UEh`CiNOKy_Ri zn5@R4{?Ar$U(>E(%U!RQHUAW+?26u({jCcSL``|t4fNX zgnpM!pD$-K*iGvVfR@$aCLF;b8ACD5Au1F8BpBLs)!b;iZT4$B=}}|(S`pph`!|X@ zm`_z0WsOe#OsKd7n9BXgC0mVjyIQoPKFyq%AR7OtN0afjld!eH~n~{!9%n77z&i)k3Z47bm)p zS#%{{5@G!TP{k@vV$k8?*sU}Gn$hZQg{l(AhX7B)k;xbrtjG}akAMkrP$4qUsefhH zMk1Lz64vC^V_ql*=xs0RCnfncH?YvJ$ze-OerEFBqoq4zGfJ0s%lfTa1_AhfHTCu6 ztrtsim2P1_ulhX|78cyk2k9j@Q~-Q{42z0=Iea{R#&wqi{0;^1Y|02hhJQT`TSV@F zMg^ddihZ3X13QBOgujxZI>^u}5r<{M-n__5@(j@K9UUvX9E-EAKqk2N;z&b*A<)cy zjoZ-Mj^veR&Pz}pt#qil;A85tK_kqwHK+uQkX-=pSI~AiPnJ;k<$tLuxbPt4KiH-Q z;7tZnIPcd}1Xd5NVx$R$!}NVF^f04o(OyiW4VW29qQePjw@{Eo5gB@9s_oT=fIk?* zE@s08*e^;SSta*k0!%b4xEXy?tf|8=V3O(7HE=9S{bO+E&083(ltl=|RD$3TE{8Z@rHR!L=H+K{bMZ!bM zEBF?&z^t~d=`)Kts$jrt1(xxcshT?ItIAgp!+Fo%`o6^+)|gWZyn*<*O@7YuY?ON~ znC|No@X}%)XYBVbZ=R6vlLFIL*W^J^JdvKIH|A~d>-d#D)_JumlK`CYm{TAIwG&kH z#S^^J-`}qtP`e6ou2osplx-d5T^3azFX+<$s3Eb@ouTT4W;bey4bI*=AjX!SW#LW! zD!E?AObbj9QFzQnX&zYo!b4l|GbJDmC%Z>4)34mc{gd~?VEKQQRbvlE&A-|%jUTZ& zf43|iev1z%n|S&XmGm{0d;+L_$PRSM9L4Dpf^pAmzh{aQOQ~u`@K++jqx=V8oMMc% z<;29$;Kz|$C#oLby>qsRS3n3yp`wf;317pF8_8cfFMhd5qj2oXoX)z$Gts=v^JgnE zI-NeQ#EEqgXC$!eraRsY-Y)g`O`$tTE?9_^(IFPo?(a)(6G#oAEZvljuiXyr&5vTz}#5LO?7&1p- ztklhV(NS{msE4$1pC=CU&h??wzqZ)?zR{F*)}!*ja$sNeEbX~6-V^z;y8T#Bij5973cYqrOEhH`Sl6| ziiLsL>ru4cXZ+_KJLKy>(9@}Ceu{U$VIbzGmO^HHRwuHG>7O`&_+$T}lp zazB3=dLT#7x`Z4Sglr+=6lbD&+qVF~g#$F{1Z*BtNOwUbWm7FMLiH@t2?z|vS_I{a zt68GAsHQx+kgRK-H~gMYy%Tmq2K_$0E%1wgdoOK4$qO7FlH-8p(%M=c0k@MyFBt2v zrEI?Xp<5Mymh+K`TX)=iUjgl{9ft}ROO9}0zZO=nLKwQxgCWyAeLV$fgD3TUfxzd_ z&gkbsv~^5Il1R%;Amy%riLTaHh=PsS7JoE^f)LGai%sDo(p;!9|V|&Dv2k z;iI$H)wZj$hWYJC$T`9E4$;bVkZ%j1T&kN7K|{A60q>Z+=ZQie&t(VSTvoaD{&+Xl z-{c*Q){*`1pfO2=C=n8om?PUaKC_0XYx&)iY$a0*R5)YQT`ahUhIw?f7 zmyYaMV=~Wp5l#MNpL`Xt+v9N-+ap`8)n)9QSUQ}AsLd8Kc}{+GJ)F|bd1`X zSmgwn(SPX8ZL2-c`;7p~?9~P;2H?=9G?=tXqe6W#KwNyu2T?g{d~_3BBonnn$mO~WW}>@*7Y%MQIydOr{v}eLw;UWE?OTOgr2y0qFqN2e0p1uUONAri_CI`z zs!Cv=rjJoyQr$M~ww?oAMR(CN`}Zt}YY?GXdMoQCcpEip^Mg~!8z08wvtV>eN#rs4 zZtYlE?EY66>~-L$7CzOSn;)LN!T7jG9^%te9~A+WA)nCJ!%H2hc^cVG2aq&MD|5Q& zT?M-nRZtF^SltyqF_>>%$5Jr9?VMPZ$HQ%`YdJiG2Ny_+M_5ejDd)As$tr=Ugbw9i7TF_w>Ox+^$02Y&Tt zV_(E7_|Ui&%p5IyABwf$!kKz0#bto2-j#i(>N3a~4$GL9q;;tfyfgEb1ioQ!NpE^5 z#kTKjpX3oxTWOzsCq0N11fwh5R(VV#Ljb#@jZWE0SpV+=css|e#8!7^b>qNQMJpvi z9ij?GocIF_&!QQWT(5bXISIvHvN;IBb38b3acU6&r8{0T3U6ad37KK{ zPZr-6o)L5eMEE&tw@6guolHg%5lkUpTD@zR$p-mZY#M7~Xx4K~_q6r5`tCTW>8I`& zYJtVg&Sab8b14io;d!RcafVV>cATn`SiI==2IBmdu!l`zIt}HT*Y^<9)>?X#=4}}- zXIt0UFVy_Nu9Ytig8vp5b7EPl_sAx0NZ0Wae&>86qSD5%UNM*~THHDmamWcD|6Kl1 zDW3!n>(D1VyW)fpsJso&gJhZ$vIj`0N$EwM9QAv!ZDgnrRLj^-LY&2|sZwGD`gv|H znOw|tHA<#n7X-4VM5I`1D-6*J@A4SnQ!ToajT3cR%muy{!QC=N$2AX8O_g&4lJAJP zY|_`_rR;R}Gk1`C03r=B8gC=crDIl4&o}3b?ldJI-M8qWn^AwqxRtg+pD}sGmUoZ! zka?9rTj+ZyN^fW?s3rOz3$RubAibL^Qv*ZRt_41R#qK^R1=hzQIG`FDi#iMaaBBY& zFQohXCPlK*FZR7TUwwFI(5F!G#+?oFJvgcn8%u`qrUc zzjp?TdQU;l18F^kmz@#wH5G@ejo(e>(J*fbeS2={xu2=l{G+EQs) zYmO9nnSf%WY2H{W-x`%^e!dh<-nu~ znZHi;>gcitm&2sd8aPi#5s^sa{tr`c!4+2*ZQB+_;qGq1-Q6Kra0sq};KAJshv312 zdvFczuEE{i-5qXy@0{1}t)Eb>?X}mQbBx|YCz4gy8xsF0yEvq(^;j_4ZhRqAhDJX{ z(B2@5W5M#j@%7Xc%$+NiW*CT3JblBcVQAMm5NPTYgD3AW;Zve^tln6W+6(C0cJ@j z@czyZJkMFrV6{!sd@)oHa`$HahT}$Lm;cPxu|)lw=^{~{A`}*S)1HXa{2wXT!QYV2 zlP^D2XUN5wx9vh5g|RTL0LjvnO&7wq?h$e1si~=D_Dx%%h1xd8s7qeF6#ph$M5Ffl z23L2=Zfz{vnJcYQn?ie}=5e>Je{3xY$NHK7o1V+Syp<1Uk?o!m@;*3$tLYg&$gBsB z=0P$*rk#bNSEw5U`r-rW<8|DI`)t~pCk(EoK5 zg3{r`Khy;A#?E221awChZ)VtjDy%%I9B_QA{5;C)zpFk$0}`7AWiA;uvv#WcJ#Bqg zvY&s_5FU#riKzuKKz1_=F1KAzxCjgu)myr27U#3B+1$?Ixp;se^<{@&LI~oM@i3?Zn7+xsHd0vb>eR!(?3lsW!^29U0N8 z$H%*M&8jlr93+Y(ggoQ(YcaIU5z?~hN7#w3tp8SxP&6#rD-D;(ko;l;l4Hr zmq-s%%FS#*&l%{+#|is=rIl7Q0eKceR!qeLGB)NK8sbtz>1Y)Eqmdd?-zuGUx3Y3K zqpk%;p{|eeyz0Mm>YT*^5Po-2n^4CEBT=ZX;?5_q0#Zi29vPEVDqn9jCb;^TRTa+& z@hM!LHK}vE=o3}7w@(|aZ{k%)d5HP^juN{mx=oaR+!5(I6+z*kaNki{38^HPIuyHQ z3+r7?9zVqhH}Gj3GpV0_8P|S}kd0JYl3=Noj0XZL4FKR}nJh2 z5BRI`x%>wu2RxJBeMF%{;)IRp)37pRM&DZc%CInkIB6QthJUjegm#WVMY}(qnI7Vu z#EBV{GcR|#iD?Q8r6)=#;9{96M!mp8oyg>PRo!z)3YwoIewn5{lKGGxeezo-`p!=g zPz#01D!G1>KSc_KCYjU=A#lmszO8bvm2VS3N$Nch4V%!DKX~5}&0p9*wQY;(=S|ZD zz|i_aCo!^n9w0sh+q}yVz()W_+x*}{EAP~sg#085L1YTM@QG^@&DA-`NPpRI2{{?q8fR$ z;4~Q^YPM=xE+|3t5uL#bg-lAXDFh6>AtmT~yMUe)MX31rG_WjG$qgqThj|_wbYj}F zRc|beInhd;>aH(9C1xQ@`cK<{-p@2bxo)1KAjOF_`BT~IfP(a!*#?(o~!x3J3q^BC_>`d?S8NZX1Cu zV{Z97@7%hEt0F-JP=52ekmfH!_)1WqyxSCw#+KEK-HDVHKXqmPahgz|8J9dDl}rO^ zaF82Qx#l#7!7*?RT5TwIw@Ingd7Ip<-?o;VG>MY|!(y!J%a+$%YV8|C)oZbI-q$}G zn5;?~^CBCLLgw^2NP9ot-1wP)ldqJ!Fq1dvrr+Xz%1Y6diP^xf5G$)^urw2_9WT8L zzbuCdU>ln^Cz#nq4K`fi)l)CP8APMiEY}f{OTu&Sn(O#Bmu4HXq;H+>v*k@l(Uq~x zX3F?Qq3N)tJzNES*Tv2D$@1RUbEmpb z@{x)8bXxq8*NLM6DOl@%G)SNJ2}5?ddR0>YxeWofjc?*NmXTDm=tpJ`0Y{^s5{^@> z3x#$EM)O8Kw3!_wpc!`iM%0IG_+5nT`yd}V_e%V%l)Ti`BJfO=OwD&6Fm*$4vz)Vi z4^2We*)mL83{}DzWf%@Mmx+X56BfqM)gJNzLu&G-2BKQHdqAe6mn|RY4s5`-Xpqry zHEusdMO5WhZTW$f_8Sc_bjMx_o$yKz_>eCh3f9#DteXRU?uwJN>x1;QSafQ4v$vkR z@PIpiCq%sQPGHdp!<_e?-$u_?F%7?w9{O5Mbx0%}w ziQ83(YqEl2h$fjxkV3ZOGpF~Tb)^scT?d4MZnyIVyN}z@SJ>j{Mb|{l*m3hvof55P zjVpq=34eYw_S|N}O+lMgKnVR$SPSS~w<8b~;6ohT%6)s{qThqOD}tKadAmpO1q!<% z9|EFkkxussXQaro87md5yYK%gCSn59j!&+-$E)5ZSaurpQ@1 zp@nW|_LG}>q7O#!ZA34t;YDEZV0;s2Dq*m2O>gss`&tP;>Dc6h)z-ptf0`X2DtXO zZIL#^v4yN9h~^fSXY|`#z5Xbv+=O-(pY9u9;25qx?;pEftm#qY@N8mVEiWz(y$AlOh#RM=9vhT~I!$vHa`Pm!FAM z<)3vjP?Ic8O`>tY`f<3DUEiTqYQzv1DG;_j;-DVuElNw3t&I$gnr9Mz+R#KYEk5O? znh2{Tg-w6Y(__vnbzxSV$R|tj^0bs66((^f6`K1v$<%4JP&nK$S3K=QtABJ%>2LFc z19sR1V>^)Rwyq{vs}!)CAv#3Syxl;0r(On3l{bKukAJ{-KkyN zSrN0#)YVLY*~z~zUH=qoHg@yH)VW&rMhjwPg|K)hzLbDHqF9Z@(F?wzaA5=dem7+U z`E1cH)4|kHsT~DS`vR6M!LD@Z1Bb^{=&Z)gIOHu8kZ>|Pu9|U)s>lb|at{2DGRSA$ zPq)fGi}z*7cBF2V*>aRt0#E=IW3ee>PB(ESCiZ%NtA|wS@=wAEzL}yg^hdmY!n&pv z?f0<1kkHZzKz9QEDUZyi#6If;s83e0n`IZ`LJ}@D%Qy9Eks%LDEN?<0_@`(o^?ms&F#*@HK}GSY46 z=NG+a)SO!u_-bnHX9Ka1b7Y-tI06@{UoCutlG*g#fqPu0RzRogP3vQ-y+`Ndcx)Ye z!+5-lCss$I-|$1C7)Wt$`V$1t7pm*|6obZ0Zu&xhh1U5k06!8u_LapC+oVLt4%S1{ z^yXHwS~z4sIwAVHS_-OlGg7CJ+0kT4LESm;+DaSCia`me8`t`jryQ@nBrUGM#!8=P z;eo$75ZEw#s(Oe)VpJnU?-nPm8|i zdx`uLoMJ4m&f^TE4ePLO?GczkHpgU(g7-72dHP3(H#u|FV^uYqlrDeHQFUcQ2&6e8yi0o4F#YEEpyYo~Vd`g8y35%|v&btw& zjiG7k(B_)kXSmQ0U?ed^%o_i>x@xX2w|YoF3dvfgVX49}XDrRTtJDZPFXAenrv@|$ zvXg$dq8zFl^Y!pI8VXWOW)UVqv2t4kd@MtUKocv)R@ct?a-1Tjp4Lb5X5+6iJNkxE z>4_Lba4>9ulKg}0a~zj`tyK+?C#>Z394@%s_O3_pCSSB zxiQ5=s>duz7(+U5B?wS7>>w)h9!)Y zQ|xgKk|82d1-9#>i|mOHUh*x%54RUGq&vO3_bZ%EH6S0{K1LTAy5sLAB19p#yE!cj z(-OzvMQCVX?3RRCag$!2zU1Wgl0|kVl^I+X?R5sb>As zzeR9_bZI!?y}7wqW12oI@tKM9&@|fwra^ugHQ?7^tDTmCZJ0+1z~v2OVvDEgLc+nx zsl0VxGKFj6I!8a)7mYq~iWm3TFXUem2bM`4{VvstHn$M7c=ZKg2!G+XBMUJDPp`vVpT-y$5=$MC89SflKp5w|kbx1X%5uSrh@li&f^SuDGbw3%DaU;fQWov}ET z>PDn*{)DRJAUX48Z0^z%Uap?aHWuDeVgAV3+(3=vb^}pzW`gR*&nPE8^@LM}deLogrZYTb-a&Zd(-p)J_u;mtJFVc_KQgw|O`I@v`n8DC;9DlgH9lZ|HMl z(YpZi*%j4p8#EW~FmetVbqd&EmiYx3C1VFY9D3sGFpOa73-T1sY1@EiEA}d^!QA6E zM0`xgH(vJQxoclF=PQ9YOl2U7DHToMAJfuE1W>;FF>7us`}#TuLi~rn^du#Ge$l>r z$NFqN`r|l`-!WG1* zt)x?Bd_~dFn1KYgr`+2!SNcghY%~XIDcaTY*C~!mTbW6)wD=5}5OJ%P*35=mB%$Z+ z1o_`R?6$~bM`^{E(M`>G*)Vct`2c5zUk!3X&14B*(%h7AnkZ^u?um!b3{_4SmpX7- z(Io_D7JO~Qp_;XQZhp^znH53 zQ9xjul&u?WL=_9m5d`2hzWOe>6uhK+3Br1Zca0@Wly?2AfBOU63ykJOW zb?vk)lW=JrQU-E@6768C#RWjBdMYCY5GJsEG#FZ`j)(G^xw6q^zFfx}9(=m3%Xl_^ z2kisxK#-ScLJ&Z9YAam;zU6-9jt#)p-q%XJS1)G)%#=cNJ(Ix+F~T@39cIs``C$cp z;{~taVcv8X8^p=sL9d>Qw<-gtrG0}I%ToV=5=FLjSAIUYwOGJX$u!xht-Ykjh6%{k ztxH0XR$<9tOY}&4uO|R`70~!1e87`8>%tFQB6;|*21R%FIH3g|H-XA|+CVcf*~0+H zg=j>lzAwU*<)t>A>7pMKQ-H|PMxY$EHarh9@(EI z4btdCkGf?3NOrycsbtN!I*624_7J)d(cG9rbcQ16PhIxmGPFrmqKEClw@v4FFy-5u|De?OWIT7K5Y2F_qTTNZ%hKyWu5DmH&XrF>Ci|?$kNSGH+LRGyc|X!24DFH|3)?fEBz~o?POfvw(n_|lJlPbDARthX>75m zhC!YDMdY>F8@>xSG1Zl@Uxkj|Tm9r(>p^7ZmOsYn@NKK0JzM?U@XU)*FNdqv^sHuq zT#5lB(zZxsvg;$>V4oruc~01juQGTY^yshG%Q&KXA=}^{fZr7QX?r$ed(2vK-bNea zYckz}bllQ15#CR`2nYrWi&0<7f=o^S#d_jM_zLZ!Tiy{ugs)_1qz`nIGfE*ZGr#JY zlBV8Wu5UJ2;*1@?!P&jv{U%1O%~J5^Z@PmD#d)PTY&|0v|H#bmoWUoLCZ9a<(>oeL zE#{Vi_VY-7Loz?JQXc*?^L%6MhR>ZFvlS0ni7$RH9ff)qPWt+f~F4vn0*QuQjC-Etwt6mg2?=YWJlEa05 zB%!tbiJ#aIlf(xC=VgqyJ`k0TaM% zwQN8oE(7uB$rDIFS{zsYVs!bMkT0^n{0@!n&D-`JMx(O}RJe-FR8dD<{!NXJYzhS? zd=;tJJtOU+l5BS9RC9pAxXzlZ3)aHWy|+XSlPZ7ArtPe1IWFCK>~$xM2L^b)J=8R5 zN1MOmSPsHkz3mv9Vn29Gk>iK{agw3L6H;TzIs6k96(zl)_rW85(`ebht@YXo$y0By z(^vnP!t{fP{r0zmUeP}*&kkCNt(D9^Ng21-iu!~ydrvu^2bp=^cVL2yqc3(>I{k2>qQs$sg49hCxUc8c1N`jHPF9Hyt?Wix<}B)opF9#~X4z+KI_0>lMz zLQe`RaGiYJi|0Ph6PFF#+1l~OBAcIjwkB%YWRaudm5vP!D2hd90Pg8Ois1jQd;be@ zI8@5#6l^QBP3)GCoK7jzDcDSYHIp0X_{z1`@9VG!yJ_BF4Q>Ok0<{&-eH+u(9=^5F z-IlZDPR1AFl08=FMt=CBh4ksIl$H3NYnitA%Hv27CN^J_W5PoJ(o#yQ1w0>^rp6hx zg>d+!31;#8l6{@w@at*z;4fYG(WYn8%s%(V-}G;XO+h(jlQa;xbet<%>pRvr>8_@D z!MQtHH!4;e0x_n&O9~j|R+7GQ6^P{usJEc~BA29Ix{B&b*6T28Z`$IpzW-X)wwD%y zJ}C>e3%>BzIOO}Ym(twQq&>a+m75y-SD%FNv-x&o!O?C+7U?as8KX?#jNTt4emZ$) zs%n}#6I-i(Gvg5LNnV-E15FaZHu&G3-6v5m12l0Gu-nqF3XnVX_BVCk!({wf77YQwM#+>y|GjU044p*)_61Uz9bJ%b({{}`%_w_eEK_G*_VQCgVB zgV&@A-3a2%#o$3~tq{t7HGS;h=CAv9i$)9a(8KFND`BZVX`{(G62S>wuR5vfEf)$+ z&&ep}b>~4G_3@yW^0L1_%2ND}3_A;m9IbYK^r=ymS2RTI9-)kULB`vYKc1VEFC~aL zq5F2X-qTU2q)Z65QLHMX^golN`81}d^l*{pz+}#%ug7tBX5fD{luyt;z2UjZnq5dL zsKL{7B2APg$jWQV1o*`g6ycLS%y`~nb}@`vk%|rIa?I_<#l)A^b3xp{ z4kfgkv8yN3zS6zgUzNN^AT`STQXu{bPonFGd&>B8jIZiscWE$$uh__{n@AEB1I-nJ zLI=c+I0==uhj@a~8Zo}|O%y8vIx_DN$vKyyHMio}J)r-kGi7g5s>-u(<>liJ;gx>x zdQe+s;W`nEw!h_U_qtn#0`mz}8(89@NvAhKr)AflDg#()iQ>_KOweq0C{!50QQITm z)Vgiy!_Fq9mW`oLa1M1JZx0*(1YJkzC{~P`6d^)`v=l)tuvuuHi9eMe_^c#1*(r9U z#~WDmZyFx5x=goD<>M~J6ZEqm2u(PtIQvXOEDGCIb(*61-5~BG3j)YvM<6e@zSHjg zz>-ZUL?f=3CYHYR}&#}`cSpb+yW z0?~(IZIXY$%Z*a3%qE+ex+jD{=Dw2@5be7)q2h(!+_+C`LkX5XydzVqSg{ zQ?J9ZTbAUON+k^luGntIR`hU$NH)Hx5UXR+C``sgT-Z?1R`X0>%L)a^0+HmsgSERP ztjCH zRd@6B;}a?1kx&GKfC#yvBS*&{59#f~nM(gjesX$PGThwV_R~}CA(9_|BceB?rHs&d zMDW$1m}wm78ezV*O%XnjDd$L3U7z!v!tyK|PN*C0TQ5BbAru()i*A2+Bc#d%X_eep z!vw{z^Vv$8Leg)UNgF!@Hq(69HJoHHJM2!P89#%I1s=rF0Y(90Ad16(LRA(tLd0F9 z(`DwW)3m+}`C$Q=REga)!Wn~?n-u=eQq?$je)W*00yzIJ^9gr7>Cufal>6LHyR9Qv z=lFzYS@skc?jJhrq;LwNz)4)V-OwTS=^EAFjenQ9mn@a=w2JOr6&I85fZlun7!4nF z?`C=48kE`3DNQV&*ZZ$Gl1>>@=zjBQFUt)?1CxN4N>IzrScIP%E@b$d(2C$I6b%O_ z)P}Sn+5vF;X8_l!kH>nZVN2zeSjm@YuC7t>PjvDJmbi7j&@;eJ>W-^2$TX0_MT;yG ze%KgZWDIsx6P}0<#7h-Dl9-zPJg9W^kY%lqs^cnA^X=GqM_elve+@s#kZp-vUj&pRA*3B3iBig9a8#(7}Z zFuXn9E7t+Vp_vI4*&`(0&Fa77@1yQ(=N!l|h+LH8uaq6eS#>8We8SlfzJmo=(0`kj z0I2Ab9qC#MoX4{oBI&>@>jFND6E8l&;t_@b7SseLT+J(iLXB;Z(7S0a!fIhE{sn(m z>!L=V_oNFT+7OW3x`}B$p208TFc~9aojosOyFP>sWC3dC{g_8Gzg=T|g7w7XB`6D`vkurakWX<&A=S`)E8& zv}v^zmaP-I<0WCk=Vr_ia7qijB#U{%s!b{9@?oOCT|@hpal3~GFv!UOhx|SZfFsoo zb{FFo^I{C}f_DN1JFsXY5kKwob`GO4w*gB}u|>pw^Me+UFeJ?c@cD>)LP4OJyI$9L z1XC4qSz8ryC0NNw`7Q=oAxOt&n$Q^HF#?KQQ(w@7jG3Ss3iI+Lbb0z1uyZz)>*UF` zZRzz(D|oriscoV~Vd0Qb@rl{^j2+1Bh`Zbq$u9Ib=drOa$Hy#hnn@ZN0)wkj*(G&K zaFXNA^jEF}2X}&#z^pm)ME?lypkRPjF)r+!54iN`#daceS&S_jiF5WiFh_s;nP&_W zD}(p<{S|+}ZJs9qq5S%x%Y$08lw;m;dICFtp3V+$m$G+a1YyebT73A#quK;HtS(8=LlhYib`;rqw8Lo*pvwDGV^>Ce`-Q^Vobl9*=+c z33LkplcBKjsU+CCE6U$ZRSg?4j;`wl{e6HZQa-mf!PV=;AedMTVp`qh)I~}50hyHI z$AJ!gXUP``;B^1L!lj|lLq|Aw@s%P5?Z8YK>Ykd2;CoYj05m#b_8E<3?MwTVKkGn= z+!bYz1;%;z){+d+i$e)EVswget=0kmChg}H2U&){EcQm=Qx~!rz6JVpb1*%6i%=Y>mr7=Y84~voXTfRJc1G3gt^oZ~Ghb80YNj zbbt!Zg384_E*(IVFF~Urf@62wtRylg^Em6>mh3mn689)RyY{6et5ERXKP z6%D^eekBj`b{MzzqI6ywAi&b;)GsqBN)A5qFu$i!=COJ_ewv8d4TJ-$af(%j&!gn7 z4t(DQY8PA-Fmss_$3g|u>`+hh2oA)%VHYR>R>P#@s}^d>J9=T6j_p8kOW*9EE&bT* zMCUER>`ZW>Ci=&^k$|{WfN{-6xdPxxu+v?LoaOZwBCvWIN zNv3JMU%VS}2W>b}mU)JIHZ(RSTQ!|u_O|B|qPQ-s{ta?i)tf|L9sUzUv4tk1GW*r? zApM(;M57+)SaER$2YA(TxoKYndaVzViS8J^#2 z|HTyZHUOZHY-Rfy6~RPB{?D;+XiB{4gOK77wK11#1~O!4E!)5kxIkgX(%1PJ7zjM$ zYAP~L0Z5nY6>tDHLFEFjD`H4on1h2n4t-eX=M?m>?$rW5+ z1@?6Pr0hyy9JXeO?`~vV#KMw18QxeI89{N|!h0a`Z)c=#R8P#qqa7Xwn z)gKfSuriJOd${C&Vj@p@Pu3(H^DM#s8r@F`C0 z3c!mr#tVBu>a;>~_dAncSi-`n{SR>VU&!g$xWbfv_MWg>TC_H7SmHlE`amyVb6J7O zub*-Rhz4T#U1gg^8TA%~o0K9%GQX3it(nNl35H0;ZrC-*CJ+AYKBM6h?jb^*kFmUe zo@S>ql}TcE!1}ST-pgT&Ly!6y%+_G%2%fG`Z9bwd#6g*97s(cGnuqn@!TNXh${*fQ075GMMq_t+f02PBW1*^HbYf2PfI1#y ze&Dkg7+nwT!3-!NS_7*I1ie89Lk!wo%ge4&WS%appOJzf)m+^c^t?T>AsLg_*DYEP zUL<%CW2X~g0Iyst{wudotfQSn*Cx0-pko|L_sh46mE6?dD+S(lsCaSU-%5e=MqBBy zSs_L4uB2A$g(}7`ZoWHGO*IKM#ywUu&yf;PrbPI};K{WhcYloa8g6`ALOa(1fkAto z*(I>6j0%5f4TtJC1)J?j*ji&ueOR3_1P)K#d%V6yars^Lg%-Jn?(5%#_Xv}f50Pq~ zH?;1R3sE{zR`@{DF<@r8^{pY=ky!UdMo4f$^Iy|q8A73;-I_i z{)`Cv)(jB4`aT0ygNO?|NQ6dySY4i%APS|QpFkYXvgLhFbs50i=AIG6rnETun>SHP zbqRItTHE;<-JJ!BMHQmO(QMI#8jxq+C&9s(!;JPt;|H8s_=M`_EFME~tTlv?HWuC& zQG(iS6`o^_I`^F$-6BxfE2Sw00Gx`%n&Ml^@;!lVPSDGL~CSE*xo^==Yg^1rnGn7 z3*x7_zmks{qOuznTpbon>=`HHSPo`le2$n3<#W3_c9Lj&>f|aZ3@;XEw$Jr2T5pxr!A-JG4E8viXsvrR>qmlCJCYtz(rpXFgJrpV zTS?HrLhYQ#!EyC@Bfm!5lQ`htN(b?LnCPIkz0nk-qRg-hj7KNex_#Nl>e^goM?arl zz1eq2|LbpjfY{;l>heJiD2yIrm8wc7nMZdfW<{Did3&wQt{K5P^8TqdbURLvb1Y1CBiO@PRRIj zTQ(4I55ABZpdueaOR!@%>4UHNK8%Sm3{!Iqtf{G*(JzflR zBRwpQa$JL)08l~D(3#K~M6G|N81U%Z1zqvYo@wFmV-f9w)(^+#q5p+B57Y-bimbjK~kX2ufKKf;ZUT=30S_bsW)#|$Po zar}@TpFe*GaJ!39Jim)J#XKL{*$^9}l090ui)#wJ#gG0gAqj zp9R%z3YRYgaS(|std#gP7X(IjTxlIH`&;P!vMpqP6`GtEi*t2zHjp}qIb}mcgu)BB zxK1@p7bq;UA3?Sf&;oEfuBi_{L@U-#84cjOEBbK3{3$2#;Qhm~Y8twL^iWf;g;eRW zZ?E^P&(1lws0|U$Wr*Wd= z)dgB)#E-f%c{04oZ2@F=`J&vZxDg&3qJf}`|K#)8HAAS@U6u>m&2VveNj8NZ^8}2{ zpsIKL<-Y70=$PDL9}16$b4YAd3%-j(F_xz;hj;QWX}!K93zk=+st4^w+Y&wo$(}Jp z+l4atL<3;FvM#Vi;5uHTVt502lxl-iz&cmR3a=1kga{chWllgUNbYY{BT1%jyT>mC zy>+ewZp{@;neqEfe4eFpA_YEewp{=)fB=wT5kdunHbsl(v*f-UYHYrfGf3R!!LijV z*R3rNmM4SLuPyA}Q$hquX;08A!Ee#QZ>}=E+4TI_sI{E>gtQJf%>eEJSns*ls}_U3 zKuq3!wAntN&|S32;hV5&J~NCyP{P25hf%DdOaUn%zujJ)PAYCo(dR=-SQ?RFa%UNQ z4;|5>c{=1srTA*rU!{yL=aa^nqOt@RGc)u}7*PlIdn%Ev?U#X5b z84sXY2DXwB1-?kP#y1RZ0KL$DM$3?e6^awqU4LUv5RhkLYBL%@9Fk9^H$dSpxr!Zw ziUhX84DCj!n3HMOJbKjcK^t5Jng{X421Hpqp-4|%BSs&0~(VuG_|oTCI-0%J7{c%_-p zy581$dU}3y0kizypCY$&`vKiUE|=2EU0ufMYUn*Y5}b{wNQs(&6(JNmbnH|>vbiV( z9Go5$ac~G#*RN-3hgh-qw8doXu=?LC!*jAY8@jrt*BgzzkO){RytVf{${#`i&+uvw z-`P>dn{_A+zxnP8__2~vFEiELdePx6T2~jj2{ixW;RYzt6oPB)`qy|(EBYk&wJqJV zo|zmF<%cTS`KQ-)GDStyyfcOj5QTj&R{M5eOM43=kNcRyc^MQ>}GeZ-&r zmKgv2f3Aq6PGWw8w{fAWtD9oV{F^Fj1~eXlXDlS49ei_6@OIuxa&dn8^J-T|HU2-Z zLrI)+1-vFhKJj`A1irpYJtH@suo?+|osg=;%lr9VCb`M-vE4*w?}lTD%@n`RE5k%- zUF*BACN-3}T)Nrhx2lckoB{sneq~o@{z{9V^pc{CR+8CRXH20uLFd=al5buJe!(Qd z;I2NiyX^2SD`zRY5oC7HA@02E%OEw_Dc=!mTPAze9~HRrCXE+ihOMS@z01>BDk@RM z1^7t9lvSi~;o>`XdQg(mhzzUK)9b9oq^SXM(wetW@mT3O!kw20v)&o~?!Vzg1QW6VC z2+2QdmgI2Qak*DNW#MNkHb^mBhwj3E+wPgsC&<~+*(Eg_j2@CVRM6Zy!7>S*w zdES`)Wd9ecWrM<*BozG)rrFW3m(xxNA3o?$0;mKL5%IoTD(twpwjvH88GQ$8xdH^3 zr~JzhSOs|e_g@R4;eodEnUHaDf0VV;2g$<)L#)fqp0}k{96U3M&gY(%Eb4-`AYOD1 zfs#n(TD_h9et*LNJVHMjbZS|5(WJbXS);t+3a-nqDFH6p=jtD3xY-Q z+RpMmr;kW;NF=~u{C4~wUcU7Z_@xTo6d`BB{#FYKtzG0UgbQ%3Ie@a*=IJc54ZZu# zZ`gd(7@m4&D~}eX<5@jNm6eTF&S(O(ZUrTlB8<<74A75cj|X(EL2X^o!^iM!>T;B4 znp0Lgz>0ap1e8;IkdYFB-(-A&raEGAwD>Cc$zML{1^`AmD*@XiL)swxd<+BZD__J> ziv7E-CPI;Vv>oADF~R}!|Mv_>7D6qc`ri0En7Vf7l%?fI_kpLPeDk8god6I)89fL? z;j!$m=1;7m$bz!$jM)+nX|Z3v;o2P^MX!mm);AKSTuaFmc3ICE_`-*3?Dl<)pSfQj zyKlx?^oN`Q=RC4ojRF_=f6(kGn@#57eO*7 zjqqhXY>Up8j6>|<+x4iV&IZ{0Z6bljUv?-B42ih~Wk%p$v4v#WbM3=3< zSi1dApe#c;!Qq;I#(^s)Wff;?#A7%LP0LIS`G*ug_A@qO7svWnpC118y`xYSF`l6dW%*i-=n3Vs z8Me;rMmQpFt0hd9E#o` zmyax1hkJEq|8j4DKZ3H*Ri7d}DD4KKu>KK<(?^uq?D{k5eQ5;EoyQxJm!I-#A{mCS z!omgNskRCWWgg&XpFo-1zrfIE)h|Fb#RUDif4uaAWug+$mJ2>W68(Ve3v`gdZg&IG zL{;#e1I|q`1gHVizUTr%YYFW5N<_vVKYrh?=N_Pb85Oi$u0x3rB(d>~@|8i|79bw6hP?z|dcKrB`}=PsQ#wAriYiCv>hwSVq7N zMAU8O1J!~PNFC?_N%Z!QBr*0wC~|`q>V_waNN?C3g7gK(4qy8G;sXPskU`5cPoMrI z0d9;9@4CJi^Oxf*N#sm*8dg95U{gc+-`&N3BL*lER4P5^jGx2jvmKqTwRQ3wNUlf7 zyu4l9%v`s$QPA>W(Gjt>zd7C6QC+>D>-w{=&g^xsjU7#c|OZL9$ziG$s%#cF}L!$bs<|Gy<=x_L8gZ`{>_?W9k>= zwi{7iz~0>!W~~{?``|BQy{W4M=eMfe$q(p);eOZSycG5A3nCHF0oF2smtnK)j<+pl(m9!u)S8_dVp%wPdnaP5vmbEuMx`Ur zQT!6vKEG?H=m;0b%5Mimqri!4=bq~oP8$4WdjM7us|5t4v{F~tOf2(dN(mATl(evM z)X82?QhizByBb=OQOt2B`Tcvd2x+G{_4&vMriL1f&@8Nj=XY47UE4;4iOp)5MJA%>E;~{ z1}KX}n$Y%d^MUb8;~M#|5uFY6rYVH%H`X53$96#4=56(kv@wzHgXkM1+x@Xk87owq zOM6ZPP>6Yk?BX6^0Qn@ZWnuHkw$t^1d$iy`{Z7b=AI+0LnU9&Yov6GTSDi*Bz=jJU z(673FV6*-$@;mpAo6Jmye6=#Xrl$+Y^(loSgYV7gQA{cpG1~&eV*AC*E?DA5+8cRu;RQ0M?*st+zEX9Q4t$5rTB(A5B?+`J5eBc? z*?6+S!UxhCJwufB2GQQQ>|dAy=gvJLRfnwD2C@Su5kx3_IDmxg34}dG0$-dCx)esK z91Q)IMGVXQP7H6v0d?NH3F~EEQiA)B)*N^XddJ|(V)=LIZ1i6D@M}OF_6TP2B1lS` za<#u5nXy!!oJ$2C8W0%r`QNZ9`hy8gup3#oq80(y4R57kyB9~fm-&y44q>vM(C;90 zo_wXFm5*LDQS9(53S1^_@*T9`g`znkD`g6p@-7ev6Pc8WEd}v1N77A~2Mi}7#8`Co zz7+68$-wJI0#AVOe|Lsmn_``tV&*2_km_dAfdfxz^9F+rJNc`zO;B()>%9+rK=@wK z4|dCSthj@TBP;Bx(`K2|QZL1*aeXQ^l4d|d#`fnOJJqjG4Ehg=lJaEiKp(}t;$w}e z@JAqR`n{>Y{_}}N$ZV5^56}*!V$gAwJZr64A&SH$1V5GEsGy4*Jp>bl&bi4glTPFg zN*|%V>yvmtmMFbH;yggFzW_plD(_>zTw4ER@OCP6mFLjYkA8!pAP5xmAKTiXkac7R z!d$@5Pn4||xB1q3R%shN>sfx#h9hcIR>!Wwuk-LR_O*CZv{v>z!Audj)|Bsf+f@iV93i3kAmWTc{=hlxRlP2nEpI|Xa z%dY~9Chn%emgXhsS0&ir?zx7)(l5_Y{iwx?9KP5%S{l@t&QTJRD%`ORL2VWY!?_q2VAvke!ipKV{j|RtT^ls4~`3Qg$3xx5c2BfS6S#R zwht;{sc%>qU+n>M10%XFgz*rY6aUpsT!X@AQST6QzK~Gm72q{XJ%GK#AL-ILPLdY_ zzyrvJ%%RXPaX@CY2u?pSDZ0Yz!pDc?l}7t1j2&6ND?R(Jt)An%eWD;aZ4$cs&2RVT z<61L;wuNg(kM;SFPR>Hrn#Br@>?SZbCP4F8lsO|Ho_AH@j@J4G4&Yy7Hv{4Wz++B> zu3!SS!v^3TcLdu=eC76dN(ktAWPo9Bvx0L?lbXy4V!DiI8^xg)ykP0-=|A`PG*6c* zmOLjk4={7u+RpJe#8q$eZoBb=+-TvO9^$Q;8&y;}fG92?yp>9(ieo$1+icpMI07j7 zxU0S`ZO{bn+>1hXB9)?61(-#;`Y{e945WS*Uljzjb3~UWO927UXCf>oz}YsvkQ>dB zCEl40Jw`2v!%xx#n1z@fl%vZHx~VW6^Ckf2P4Yq=aONwUEo&f)P0oanaejD_#i(=h zvKQb7faE1Jv(bqYCI%TeuU2PdG(Q4f6Rji6VN3W(q*bAE7BZzV$H?p}5GMAwAG9vR8@!iGZpf8D63NAG2!TT_o|yOIyX=;jid`&3E# ztWMWZaSIZJWQ$@t!zBD)bT{3U(8mw6ppcKeLTraSRTZd@1n`G`t{_ctvqWvR!H;$K zF973{A*|1Vvq`}Z8@qs**K2xK8~=W%3>Q=w;i%1cEeyIze!-G*5CX~J1tHHKX%GID zhJ72emuC(_@UB<4Qct`WE@QfRxF-^rD_eQV2Y_K&;GR;L*nq=OqB0^R4`pr|%|YhN zt=%q-4|VB2uae8&gukwwI5za7SOlK|zdxayxK_zfMR~V^&MYyxw+f?ZjL^oN*!O00 zs2&i{Xn?>`qs6=4r~lNd*n-wU%Uf>t?5iHGxlyao0;dN z8otzG6kXN*yO-U7{T}yB6wANV_PvecfYo}WLASbYNchE3V(j@kGBcir-~3Ki&}p6< zIM(kA_}Oc1>tQbuuplTSbo$(R-Er)>I8=@@0sJgQ52(t}0)E1mmO`-G|F#2O&|vLD z3p2|jdq8xkKLDx`5fNJMc~J50Cwke)OaMY9A>d~Srzge^#Go7%02U>fPwHbB*#(`~ z65htG5jZoQ7fP`0X|pN<0>Q><6G4boAml)^F zmJ->}t8I%11B@-grE)g9R+3l}4J%qYIpS~HXzaF7ulO7<(}FZWICIcQj%QuGQA$=; zMUZ^+@cXa-bJHM}Hl-TeU4)`qjXA$2B2_Si>PJA(wR*0T?vmRgAnM+9rU&PXC)y2A z9=?KqEwM0rTKqbh)6K4-1ESn8v-qV)`gV^LTNa_YWEveMGN|=}mEW^}I>XKF$v!`+aIu)#}l5ZKqA93M)EnR1u6}k>H<; zLPXDu5))sa8Ajio*7ESm*P@52TjjTY*7^78@fvH}3(aurF~gg|#dHF>Kd$M22IvX1FbuHFZcqM| z4OpHUy;+`xzTE&{YA@W28m^57!s3Enog@C-jXXfm+OZ3s2&xeo@mx$RG3=}^v^EApWioIB%r(@3Zz|j(MVJg? z!1Y~E8^c%P7h^y_5#URbZmSZNLIBVfx}0sKBA5Mm2r_B*SsU1vhrOUs!2asfNE=$) zTQE~<0lh3-VFx^&-xk|SS`juFUu?9Ls^m1-gME$eeY-4voY)!BI5=$Y(TvrTTL*Ak ziN{Nc0S15t01L>Zwxwknj&m?N|1&X0$4l<~^mOKKPb>h&=cup)@ML4cF>+3q(nh;^ zPu_v+o&%MSl@g25GLWi<0yK>x5qU*eZJ{l)B*hkhnguXec#7vbqh~vqaeWXR)*vf1 z5&NT+*yZ09@E7eJvT-#&%xxn8MnWN*A%nvo-(rl#bFuIuZ{TX`@C64A z9Dx-uMkD-5uAtKhqo#=-?=AJJ`kF_)`~OzbUmft=I`=9mw}+0Lz6{ALAHSnkAP++k zUxU-VwbV5%Ce9u{3h%rJUzJ1!cCq*3&&z#sZWZVHP`Tc@%2piFN(HA_^t&YW_KOub zKBeNysoum4?1cfa;uiKaYjn9rb<0do%98UW=+ZpNrUgGBl&y8XUVooD6DV5ScGmwh z@a68&K}O0$`&aYM-)}8k(}if+c)r*XR9?-2tui6rO;ogV>sKX~6Xa7j*8VUbm5-T~ zk#YrGhi}uKj=l@;3_8;1x!q?@u%jAguUg+N|OgAoPGF5-&MGCFi#ziS=HXhwr9aK{_)64UKvse$RiB@7ftR8+&7u^0GCz`Eesw zO8>FIuv!4p!DvLx8M@V~zRyabb07jNS5}IV#Xf&hm%_4kl^(s4Um9RSc9{%t z4|drHq@a!6)Pd>eq^dguz=kw^>EcVnrb5d8Hg*ss0qTErwmIy&$6Nmn&AyWM8QI}+ zW2=8CtFe|lKJX+OTDMZlLPqBR>h^ftI{zYJcD2XThcUgJZA?i8-xm{Fpl05oXB`{B)n&bh?6rqDj62l*61 zEgJi#j2?pRSACR1WUt0rQt?PNiy`}#JpB7C(u}KU8KQb(wYD&x6d3^Mc6yqHd&~ep zK~~)9ee*r0eXjrt08vWg`j5O#RuFPEq)9gdx#TA@pbeGJA$!;9N8U1r?z6fMnuWN@ zkXUnD!M4AIgssth01mxj_;&oke!Cg_3=h#s-qi~sqe5A(uIr(|1$GBu@SI!Fr-eQt z$3qu^)=@W;d?l=7YB;t$Sd7xPkBFJfC5R@lVXLD96CLR&hcKY)AXd!Bc}Kp z0>yGdo<6*e{!|KmdCL)TgK36g@f8Bqk+ly1r;`#x{_tDiC4I;RtZd1gNRS`L+w=wb zRExGljb9HULF$W4kh;{~z`s|2mbgQX@uuBh3>4#A+lsCXzxnZ#TudnvyHn|#AZ3FY zJ{SHlp=kJqua80vXtlwA7^t52+dW>l#de^64X>SY@$?Np?SLyASf98K?$6fFiSaMl zVw0D6%o2~|orUR~LvTT=2Gm~^^ZGkc&C!+TK;=m*Lv9#8rfd7@fWgW8`E7~omIMtn zA@WyTQ#>`1^v_~djA$YFlOIed%@Gkjm!O{xgyBn$50htWty!@Y(T_iH$HoOF-w6D7 zhW~KjStM9Qrr8l}5wKOYs$1uh%=njcxxJFKzB;YeRhRv%y2@m0ZodI_MOpcUnLhGe zb?_JP=(R0;_4hXsdo^WmL6#DM`MLu)qcyDIsQ1oraS}|Q7kAUPS6NF@abxv??H}TP zUJNj5zBqEtf)3tr)JW~P(uD_^>*qDCugW$6Uk}+D+0_LF5Pb?`N6)oO}#h>kvH91!VJSJ zjC0bo$F=75wjcTs+CwblEZ7t&D~tx8WtQ=>^BXH(TY$ z5^}@PC^{;}WPw2W-x7#2K1o_xLp=W{M)H?nAf6$M7RI`w-;D(0YUFkqO{=}1C93=N zbwYOMxLGiSGt?!tX*ZnjFD=yC6T|HmA2$1Uf*3)gCW_G`Rqw&kV6fXP!O2G??E}UR z5=veyIPsDW3uatc3%;0m+ZCB~iARF6cFU7^gCXAtH2Lm>?41n8?^=DF6~Tb3Qu7z9 zOR4zC+2t}9yh!4iFr@DA#fAShc4O25tR6~N$NMxyeZ+>hubEY-S{=4oQ87z@z<#>GPybrs&-riVCu*qXXdQdG36_GTU{NW&!r_J21h$ zArYVw@=l-!F$ViVIKq<63LYTimNJVy-l^W#ZZA)+?IR3RU{uV<6MV~;=d&K84PoC! zyFsf%6fptUv2+-p@!~gPjGLUYfb`u4827vuZ!F%^SsBrdGtg-TV`E`~P3$L2KTusNue!B3i@3=- zE2!$wTF4$LunNZ=FzU1IUrmxfo;bqt$uYFVf!A(iAiLUveU_8AH6@uDo`_ETfgqfl zDQQ+HzvP``|Jc9} z1Tm^foF%4q()FV(#N&;}TS#xy$Kplq_>@A3YCzT@c(gF=HtTvK z73zzKig*CxCgfAuB%4=p@zX9Whh8pfmQm((hu8~2hW%M7{1oz5HsMGnu7CG`y2~5z zVFl!YbZv2Mx5gyn$|Y{`1D9%!xieq+bd%rSrvrE#Jc{Al#w+w_^69Oz+F#wTLc&id9I0R|jq^-?#aLT{-VWB&ZWG zKPF$2J<@R%Sg!YyG_$1pd1-hZ2%#xY9;M%v@Kv=iqxSt8-|%VeJxbp{&n->KlxY$E z)(?*(zr8AC(6L${fV9v97Qv&_AN?qeYO1Nu-LeI?mZ1mbF%!XV2|akfS)z#vs*7Ay zUHoqQz8Iox^P5i(T9k1FFL{^UPDY^&WAT=z6iI@cBbiZ694X&?V8 z356CUnSJah4kTcWn?5?Zkbyx$XD9LED_^Y5HMrZ)3H0RtW!tql1c00}CCO8y=HJQmitz}>fVLj?}eE{u8V*+c>~O8Ca99m%r47;5Oh zRZ1TC#d*bw9cP|T?$KHja>ZL4A%g?!%8t69a=SllejDFktpBFlbGDy$A8!n0IBg2? zmlGqBc925Zv?0Q-M|fa@$o_6DGN)P`B6S+LV3}xX`tK201i8>6IrXa$_ow8Sg`&-= zs}Xz}Av!t5em+rb$AX0HMGg*ic7F`g$x|w|-a7}#ol*5*lAE6sN<=lJER}&~^4{l9 zTRf$l#Am#`A9RvD*FyBuJ9CZ^ZYwz9?o0a1@Dno17pUJF@`_X%PZHkSZxYuggqmOk zc>ey_IqUDrgKMqV9RFqBp9=bEQgOsma6ORIR7Ymqr`yzfc?M(cVya{A`;OxuRjM`B z`t5YV`_}_L%Vw24eMQ@*VxM^Ppug>$e>WslDE_>n#ZED}$BRfO5~Fm`fx<Z)ahC0Lv_*ViALP|ctd9s4+fTAE#^IhlHBgO;V=R+NT2nekG` z@26%+T9^WU{&WF89(w8zriR-0bJ1uHkYjuYmCqzo(u^AKyiiz}4d};}EvDdm;Csa|OcJzy^g{_>)z@*Nb0j5coIdo2gYaM9?O+_mZX`vl$$a+0e< zZsD6ryU%GGMtD2Y1t6M1qv=_OZwva|PEjyL0N`$=$h#R|2C6Lpj|T}5^B)6X)ljt4 z>eEg&vR<>k=J~|a=3}HRbIUF^j9tJXT2p@cZ+rU~=<^`4j_wqtehcKG@gGV#IYUK7 zO~{O*SYsGOclLHyQi>tM(Fl85kK-b_0ZgD8^z4S`pLd|a)1`e-kHWwi4c^q}3<9xm zG5Ug1Q#`nD?~J~=>Gz3(QUJ7+zhPK;P9D75D+t3SK$eJix{~TlIb!b;~J@)VK<4JNtb--plZxtw4v|D zlWkAdm@le{tPd3JsRK{QnmTN+tD9F^v8Bv2#(?}g+H~J&nvliREkb4(q;Ab?d15Fk ziGhLDt~(jD?)^Fe?8gxrZ7{aDNP`7+Y5WMKI>^#rLtzZfNglIuDQ->7{c;AZLXlzG zF-HSJHiKjKp8D3>HW?u3vaZ#l?kWy*#R|I$+UD{q&w;Cob^kw5@G_(bhwObyjsME^ zr|>_u;?C6D;_;7}zqzl5T0QqgQa9QF=j~tBMNJMAK;ep@{fNvRo+jKSQ_uat*#u?^ z`J#V-@b9+}1+--utAnhe;bH7z_7#?&xN2EiTDwbLU*^JH%fBJ}>L)IORxz+pskT!0 z4yKh}5X;{Av%C_zM>f`95f2dcWy64uKekVoJ6pQ0D_IT&NfX7LX@ngr?e5D=Cfj?>f11^@11G=gkC(2?s4sZT4ub-k$6zph;@7MI#mtYE zwN3P?$8Lov)pnPiGe$_-=-~s;zC5f~JX`qRFsY+_em%nVNP-`mN5$5w$ z+>-lYGf-8`21Y53(xjB8CLY14@5&K}x{D>r8ASrG=-`9ozrTljJb!rq7t~@2(#iQo zk~-uJ!5II{!XaG&vtKx=#vY@k7IeOrst&Y8@9Mz?{51#vs$U-!T%~$?I3F{|k!&dj ztjN8e#XK2Iz%M)mL|&nMTLs`w@?l+}Nx)Gb?@@f_@ufVrVu{&ENG+*_Hp`UgD9S;8895h~unM zVx8&Hg%08ZJuGCHbMK0B>>~A-%j87$A$+ zZ{&b=XmQ$$G)6m5NGs7I*b=^2N5I-082UY_weTw@#XdQ}hl2>~*LATMPhI&eOX!a& zaZQSWO`8Dz(Vq539wVuAgb@7U=P+m60U5Zk0gWQ!dH@HbWB{c^5p~CA)&x7GtbwS2 z322JJv%|a8si|0pa=!IszmK!uV<%uVaXftI5$qM zt-m>T=IJr)$i~S@gMLj|iqvElL+VW=s|CZ3iO(`OHrbttu6%$*|775A@UE+*>ziUq zgW|8HA>ZS}%Z?!9*b-4c#xXXf**D|c12a&3^1UlR`kgnL88t^o5%pxItoqX{<dkmE0(Bg`Zm@Joq5FRbzNSPof15w8Bwc|m9A;wz#{aRlQ=x;_IgGuHdf?#r@TfCFDj(R`z zvo}ny`cp1CRy(II;`|Bwj*3bp*04C+b@1t7mj+qVq9z|noz4C3PSLZcI?s`z0cF4D zSs|3H43tvWpqyUoCgHuu1YB8^yP2Pk-RNdCHzS?jV=qD`?YK;WTjQ>inp%yu9hju> zg`IP}sty-x`Kvn9z!4)v zv(T;i^Bt+Fhic#Y>zkhU;)pbCPVCu zUVESD?{QaY{MfQ8A>@$%BTIyb_quUBw}%4NPjd|?biUhce9 zw>jxT&mLYSDY94G3TV=qez@V_D4dIf@YF&p@{%lNcLEFzPR+<@dzX zFAZ2o#82QTwU-TQI-F1@1f;6EEBa$NlM*+ls$I$EQbo19%z*jMHQ2$I=U4*25H9*s zK=}TSkS+GPt)c1i!XmQepbE5M^dKa~2FxEk_^nLsZ!fopDm@oa_tm`b4R(Zd;kA~i zEw;=NhkD~ZWq^0&HmxW-_Tg% zoy1C3_f9-GsqG&ewW!py&dd=m+PB4T`1rm*W$8Xnz|hWg$yrP*sq@RMw~$4?w?nCa z@4e%)qyE2~a2Cy8@5ZPU{P&?_83zlkE|&XON%(or`YLvz+IxKnU5I|@WN?i-Oq^u)RCMn4ln_$p6~ z3}xQJaY);UQU)Pgu$b+|`$H2-$OwDVN40k|+4+H~mUK2*=4SBE(lo+fHYfy@1i^l| zrBlqVw$?t5K4~COBbAr)1t=Yh7dLoX5Diaep~ul}^7|~g6aJ^j)c5}L3VKB|TvIld zv+*2oY(wn3jQlj1``W<^dFx0!d7Pzk5-Oiy;6I^T?3;zvi8+5oNVao-QdvlcEsdhZ zoAJa1d-ULss|5*^*QXetf8iH!!-V4B7Gu9KhIaXk$I3D4nL!$`<98Kbjwy<8NL&3t z+jU^vxr&m!N;{SHQyybo{et2%n?twb5IXQNJs@hG8m#gXXc+>SXDyIcN0T*532FR;KM+%-Dp#%fEo&zOj1EX&VC3Q{Vj zt;d+o;)}7{S|*+|x+Jh?@=&_{xn*Rhb!|_YvaZnR_$R-?2AzWRHw*Rqt@|~i0B(9p zP#w0$vycHQPK0Bs91OxcbqDNGZ6#btv}?i%6fHgvH8UR5qwtNk=qu(vX<~Sqt366mPM%$LF8N8 z-&QMqnx-rtc_Tvs7^B`GCq+i?x%^wJhK}QpQbF3D#fHlqa|z$$89IIeSuxwAjvKNd zBkBqV-<#Jyyygu0@dK}Lt1G5P&2sIUJ{6}O96OE|>EgMaD)C5DOLZuSG=iy&PB+w z00pfY@3M0y)n;_v-mX3i&NU@PW~=KG3DIAdiF}R_@+~mdiM*B>&V~7SH{;2KbidM@ zyG~Cuyc}PQ>f2((RatT|kJYm*PXoliQ_)Ez!BhGce~BWV>hkxM-KUSGgioc!rE25q zhXGBXE;_&;?Kirhd!fy64wn<6hscuDk< zw34V~@ZX!wxj(e}HEH};|Ih0Mr6S|E*FXUb_}$&&cS?T#BbDNj)=Sqmu1rv8lPn$W zpd`id_(#A9lY8f7!>)~m85Y5gE5$4a2#_7}dWVKTlzziC9px+EF0(0qE(U&gB##d?5r#lL=Yv7+RLwBVAW5C(YvU0|^ z<}eMpls!DJeSqB>?ApZDznQ~*Nfs$)(ErV+;CE6qijOi+MdtD#-l${1y0m-j@Od(- z(Pl?~<<=$_5o{&U<;(kn2JzqCkK4?a30-qhD|^PSbBhh4+?5FeWK{NW?<2sR1^Xo50&QxO1TfoDpffeY2Hl()4sjBjI{RiF70&%7yknwt zzYtJR8^R1cMPY@$KC4Ho#{E2uFli*i&>lVPWW|H~?(p`*P-hv*~t;qt%0 z6bl{qH=Ko$lKeF#P-rL+oJ}&WOj5V>c^3SI3^Zy@uA12c03=R(5x7M$BVwy4q!XGM zG>po-jn_t5QEN1aW=F}uIA%&Apbcbt4cS%^vt$~T<@H7K!ecGkKFLOT{V)oUGWxidedr z0+Y#Dx|XXle!j&@jMYa@?B)9D$IOSM*rr!1u8gz{r!riUyK+UP8LNu>fp`%3w&@mZiss-F7{lMhHw5%cuB^ZDWC%<|9~k@$YIFlCXg`w}RU@X4NF7^1cz?y3_>V2c!sTGA0Zo>3~ z|3LWhe|s76Mp1cZq*+W%XtEZ#twYZP)ptXb@|KMSSV_5rSIo0Ej4p@V#%ef77gZ(N zu#NW24Blp*R98FO#_9T)VhCq{Eq>OpCl~$Otcg9W^Eqw)A|ZlN!l%z$&WXzv);_Zt z!JPjDij%J<2_{^crJ~Nw89%hBo*8v9{^;p`<n;CtD7HD#fUa3v#Wd{h+Rjsz?Py$)v_j8Y|; zN`TeSSV=8DY3Kf<{0-G&T>gSg;~xt{f{U!jFm0zOt{m}$T;ue2%Ws6le{co%)mQw# zKuRDK#cgalIw{G|ejB@Wu=}lho8F6Y^k1wboF=S|Pitl2z@Spe;G5iI#JlKYczQYs zaLVsXS{hR|7Fi}~=bb5pJHOisE>_q?9@N66r_Fk{QsV zOgDI$TvG@R?YnU!0Ek2t1a33BS)wdK4xHz{xa4WT0G4uZ?u@Xj_d4pJby7Rr2x+?n zQ*K@FwLF3uK41a1`A_@5e$GjadAr$;HGk3P2wNZ~96>j0QKYh!N23zV?LyWM-Szun z>Cc1e;+NBICl&`PTpc=}khU(5*B;i^bch^`{ai5vthB;_eD7+X0Wu*e3~hU?Yt2Ky z;z0C>BUwJF6x}~Fg$`oO5G3N~8XK01)ir%2MqqkN-FHkBgpb3$Y89(R* zMM<`}S6r~On7@#FjB#T2%<=t6fQL`lDa97fvWMA*!krQ2G!Z?dQdRW7tMny$nayfL z;r9~zJnoh>1bSxuFy!7!|bn8Vi zMH=6?>f>s$Bm1Xo#`^Yt`INx7Nopj_`q_*7=LV{#=#s~TI7jLU@IV~}b@t1nMYDYe z7wRg+6jOBCdNeOtYbtMU4b86E<(|{I z4&)}5q-$Yjdcng&nhUw^n25HCquNQQ$v0en~;a$$vAQ5qxMkH zvcGLDKl4YsSCK{y=U0OXZbC3}dyZuQUbN$Le86))1J8W=1Zc)vWbmMa0H%?h2>7cF zydsgGj4o9{F$$oAZJT(&aaO2J8tgSu*RgkMx9?TC5+XeYfCFxYbOJP#5vZBL?@h9N z#`ifBYL=$^Wimp+$S-`cl-O*-g8$4Y{kF)=DANX(c@M^V*N z=iOX(f8}I0c>{m+y~5OPewAU;5ywBhbb7nLSpTM@v1wq<=(fjXtY%anD{0Q4rePs0fyATUEjB1vPZzs+t@ zBEY-t)2-d`y3V~JMspV?Db>(hOs@H9vb>H71E)=f{$~Ixg5zSkv@;qR@<2 ztJ*I$7nrs5Q}(M~Ej>{utN>6-w{uRwiPU?I!D&wVtg*sm*Hl*Ba`Mbb^vu2X`2 z>~76hY*y@bNQCri8%^IXq5c&)Y7}Ef>c&3t>p|}zH>kCZ?=6^p2PC^ivLrvV4=dii zX0geIF>m08PqXbOM@)w%r|Yr&rmwxSHrw0yU29n8=;_EIo&R`N{VhGE;meo{pP)Ji z*+sXrzN*h-dTGiv?wFEe%!TE*FTDT%!0vYR36Oom3D~<(Ur+{g)hBS0Ks@x}rHRl* z`VY@>q-H+SaOb!NUJJv{!5#1XqiAG~6O3#~TI-Ec2R1BUAT7&3w5vHm>2(AaQs$yk zulS@)|58nsxItym#vy`BZebqIa^4DEx1+A$fM;xQ4zXKlj}V3z#&9_@)<#GQJlNHl zD6H?KnA5={A|>4h0aW@ru0i(R*YcfixnYRVD7%r&A2jJyhS40{*hW*!-X(uYMgJ=0 zu-3_pBvkvPC&_9Ef*xs!Ux}gyLnaf#EG)>t&i#<@)P41y?=aLEPWu!Ek=Qz91VvrA z0HZQk>*})7vKk+qSiICWHK`BSL#xMB>NknlZ&nzxF3I<&s(Y7bnB%kUZT5okZSXh9 zLxj4g-`r=I4VkDa;O|JR)umqgn7vu^S=?yw)2@CF?d>Mj^;Hu3?<$>F+g#ZkFKf>a zx3O}}AG>(ef`ct=d&4AQQ@7>$fy4R@w-jp^%muG(%U53!O(#TelR#VRb5_^?CGJmp?9^9a*R=w*xD)PvCQvd!HUzZ zoANdVWwAzaC2zX{ap4BE-x?i)XC{+f3S9n0LQt4B+QUZ=1bxx$4C3q(Xj|95w=jBJ zn_KrmG<>Ze-u$2c3rPd|MMYPDLCP}<(4z$YV}TqTe<&8Ezq_`BSlN@s4bldiup9oA z| zfAkARn*fvn5hy^4?$>wN!OvkEN-0Ll?fMrcCm2h_p1aDWl}~xYb8E>Cyx=##VWfdt zbOpZuw^|Nog`$*6WD&`C(1>g?}NgK6!rsB;j9Q?4UN z*FOYR2vUF}+enFhg|7?IiFXWJamvy3K6ZzTc)BEph-Hu#5k)s3ydysi(=~bWH_I)jmyYsRJ1J>XX5lI`VVZfC`y-L*-=QkX> zVM}VA@QS9%Ba^3fo!ViC^V}M>bLx5bkkt+4@HvW znv#89PJYN;t#ZsL#LtHb27YEq%uu^vv4Gxj!xtNM`!iz5{R2p9J>6WAs?3WBuP=e9)Vki72pjnfdWT)tG>!G zru-RI#ZRZ4lS0$SNu7e~0gJ#%{*YY?@g{mH726^@89m5O&cfSj63OE4Duo!4A*;uu zia%46DAQXm*LQE*NZyS}3OFhLw4|1jpvd`;U-RE=AlDBxz-jgxx-2qObSH zoQN#rXkru?;sByk7w*QVNj<8iMg#3R?&&b*agYqB@%6~}mfMu!n9B0<^1M67Yu^Xt zGVuK<_oL`!RWrGzjqp=5GO)oQ5(r39a{eUc&xKgS?rUgX)UMf<$w#K*!P=t|X(?+3 z_Gpjz7_p`O5B6!^yH^#WmwlBujmzb#S)V@r`6@h2Pj*M}hEiErH^h+VO*bQCup*h635eL=u3^14tqG8M>YO>B}+`n%a{om{N zF#<(t)?>`KrMaV{^T6Ah&bv`Dpsu#vN|-9PPZw=LP#n3%cEX zNxzES+OEqM6>rWutTxOwa7icU4}KS9%LGZvUEmsO&DJF#>fA>wAZ@TLMw^&0U>trW zorhRx*~~n<<9kaEPgAspv=n!F5%%F6mD!lj{7~WsNf9%rEFab@zkEU7LGZHVnF7Zs zriNj{H6j9}DNi-Y2UEgKPQ~SW>We^WLe;1B47@o~7`sYxAEBw`ATyL&3G&48S=$)h z`%d}qZ_CF#|LyX&Ds8*DzS1*3MQ}P3qp|tzpxV>j!WXJ|0{X3+o&i^HG_6QttauxQ zx1?ZjIj>!il6rQrR$_9GZ?Cpvm&Zii8|^dg*_ZPpXJPpX6sb4;Nw@7Y|Ya=29 zPXdY6gg=}i8O(8&HLc36#0yQ%IX=i9B-CrbO$P*T`ahQ%M<2ttShUo)?4Z)0KY3A) zB~Wh}u}~YDPN{zzfHnq`NdGHthaI)aN*fELngg$&l4M7}({geJjrtS(jlS>R2&qfZj;A)mI83@}zmyXg71& zG8sLm3EYo<^f^j?x@zt+MI}`}1ce{0bck*so1Z$sW7UM5aDncHa&>IKJF2h*H1hvL zt}VdEV<*2o&&|4;Fy^@87Pb2g72oX@kbkhIx`fuMi znme2ZnEzf;VShVpmugHF`r$vNvnO_3SSV64fs z3+X7T;UmB5@{hblO+@2HJnlLoFWI)84PzEWu94>FP&v^+{2DeK+Du};LV5~B7 zhws{?aNT-Tk`;mWQgt=1BD(?>#w`zN9Jlv$IjH@+#_|!|`u7%OFY`6Tih@4Or=Vpi ziOl=_A;bOP`!%IPF=ggN=)~I0HzY}9Qjy^^97P!AS2FvrgiQm0zah&=xrlC$1l(Q9lBMXyM9bUjjnv zM)52SC8|EyLTU4Rz8CQ>Z2zq6T)WjKtuuYkUvnLeLe#^1)4CGr&mIQfH;qe0bEGRI zwaIW7;pn&!7-0l;?_P3V#_$g`cbm)>a?~ZiXrS9@V@OX3{nU{VFy2P$=is-Q&=4{V zguT8t7g`r}<&<<=^{HQ1Mg3#%bcz7LiK2wh=Sz!&8(!1=!=XUQLGkJ%s|$F(=7I^< z{C4lWJ-94t{RE=Zsm!>c_#)hbvZD|O+%>}Bd|&WYB{L0HfO?EY3zx|Gixy#0e?li! z$1iB%V1oV-3(~kKb)+-XDn@$l)5vJ;3HOUbc{ZpslP8ndda*9ex$ku1r^$p6u)Xsr zQ_4A)lcR?lKd|ordTIOq>kBNAcBT`Bu(tUk7~YYY)7|-4V9fQ?1qYLzj|$Gr*R(3K zF2>y22luS%vJatgvVz=+u(R1JI-6j`_n{BQB@*U>0 zgKOCM()1ctM~79v4Gk;q2xt#G8WYAUzAH8QT%i>^| za(Qja8~BTjna?V414Qd2_T>h>9?SOSHLSP;U)9GZTXl{0YsAZ6z3>)x+D{w?ax{q0 zJIT*f$(m^lb(r)GGXlSEE5zylX(C|*mdXuTriQzTWfQ^Aym~C>UCs+*k#So%~5zE5HGqbW?=UFC;J%84y;_$OLMz>Sju>d4UOsB^eVsdhfnO!&E|s3^v#rbBSWXG0ld%~A;;XNLErkf;%l?|dv4{GL%=q(EbI9B{n+NWzfKO+ zIV~PD;Zy*Gv!qpxGwU7W%jw_Oxl*9FTo2H6i9IE9+GR@nWH3%Vvk_2*W^; zeOiH}<6pz~J}bCVn<)ZztbWNUNn2>o1OQ*FNCOCNTjWHX4W}EVf?7R(WK}7KF@;(= z2(Xl<8P`6p$0ZI+8k798I`{)Xr^+z-(Q&&|sMNH=x3#{(b$h!{OcylORkQ!Ndl_aExrLg(bRoqbS)fq}MP|eMW-6x3-8Z z9q7q7Fu$bsWWO0AI!f1o_Sq$=N^YLs&N|H-`#Nn!mwc*BL%#f+OB?^B zo*8)qX>=OU1=Z1xHqQsjIRWW}(}Tj=u#cx@d360w#2fAef`J>8J?>kw8k?b~bN^S> zz`^Sq0N%v80^yq2YJos*1ArJ_yK8ZI?={~#5Cb|CbmPqxBAV}Q{Q{$odD=BZgO2bh zJ~SP4woDVq_ste}qdL9>PSGR{^T3zCM$+5%*^7Z{en35b_4&pTdeBXPa; z73pWV-C9!3d$-l0!jDgAK8?8d3r{Z<3+4N&4C_Bc;!-xXm6X^FbOzolfMA?^>Ve~J zZzsx#;jck1A<8f8DU+m6eRrNYVZq;JA?q??IfSQG$DJoV*h#YXTe0c3B>^-^gI;^n z6%(#qk6sdk8aFpRE#%U_@vWYD!7rG;?2zqFxC7d!M@Kw#ZEh!zTGq^@aINhR0ore7*5Ebk(p8v8(PAr;`OG|rdV zHpf)EFOiP?V}hi=Yx>^yY>xemp10pfY%hq_Hk@a5dPT>DtJZO(wxRu-eYg3KX#CooA6!puzW=3OF&y9R-o#uYq z%~~=lkJO>VWzTu5E%-j=qws^9Np<1dLKS$2U75*)AWypbITOC0ma-dE?OZgz4+NEI z7al8Jvv>2#d4L+x$L4E2-b7iBlOcsxfg$@@M-6B}O|`AA*s6EdoynV^V}c`{S+d ziGNrlzhU`8zwQ48SB-L_i!@i^iTS}pB+AFZ^%Dw`)SupA+k&;-`l3m4b8>RPsrLsd zOgbJMpO-V#~vEbHu0%zek>szfvu9!}%cnO5l%SYw%6N!NCCz1vxm#bl!V} zbEYiNv!X$L2$D%qtN0lzU}q|-3RgQ7;ohdJF3-;L%L-8o%Ra}y;*zaBIlr~TM$6hw z%cai%bK4JfO=So_p%ym}lzZk+dhc^uZ^pY+ zAA0;c@=rlIhS3f9>Q_Zf-B>V*?gnvTVnPP4li&ZsC*85Lm0~kJ9C{erDJD4I!xerR z>|Lqp1RX2}N#jhn)fN88e@payozt4zlt3F_ripgy12W_G1krio-DKtt5s^LT7-n#p zN_R!M9%4o7{$z6Hq8m+OCivBMJ!TwMxZlgIfhjV(frAd`iOSH_WV^t!zUsMbT9x%{ zwDET8_k&Me#gXP%wBZXfMkzBr;}7Zc(-Nur*4o~TrFB-gb)-On(0HVlu%bj?JJLaX z3+r)$Pt{A>oVFkC!+sVJ76OxodAG;!r7-G^Mqj?E+ zoeD)hzgOTd^pv+$^u5vj!sI@uBEU1ET<>mB?H5*h* zhkEvIVdg>bIbiqW_BkiMDaCvwN=;5y^_@`QCZQ~*d zvK+8=mA1n3>1BU}%|yT5w+7&IX7134LS(pI6IbL2YjApJ5q8iSIvszk(Gp#O4YKbKB8^vVZMIm;o1>^8>@O+dsZ5HTh<+CtCkfxix*Mq zLrG#IBEcdCVv6-k#n&vYunz<>&^{n(jN0C!I+{F;y}V=$FpORJ`h0BP;_{R$Hke7C z?NwkNwkH!}>2|6xe02XJALNwelxX|;r&tzgvGB93tJ~dkU8nhKIN625X_WA+-)48~ z^Yw9=&65kVld9dvlq{YdWG3Dj9(2)i)!N2^+Zfq|JH5hj?Zk8u?}!3-d&Je0lJ7}@ zh(p|1oyk6ybev?NIM}=FAa|^wUR&oZl+|cz5ol^VEX4jk9-&8IafE9y9-viUcJtb=6s#ZCkOWth_D^kG6BG zsra&J*Lw=OJgcQ9PC{OQlFlHAYHDroY8(@Lb?(0#M4fnnG=neb7{plzh@#%>d0IoAq6JTh+kW4f-sL%vRg`B9S41Y?JtLwX-o)5lb9(-i$8 zKa$oyIX-za(P%>5!KOP15I*S$4N-xx5s9H+3$dL2A#509G~dMdP|Jnx7s?LwVdG4N za`C&3={E;uCHf#_`xz1oa=Xs6l?ufKCLUheJYseE`T0i7t(SlK&!0@|1Dgm6AJ?<+ z+=Gy|2G!=6u&fXEl$eQyS$zGPki6>?y761Qt$WE_z?kGw1X0l>Y8&3h=>ooyTS5gH zaa^N?1Sc_~v1L~7Ydy_k>j3nD#SsAS{Gg+w^Y)0jQo>+*;}2VlDD%#+Hs+ogZcv4r z#6e9hjcD1|i1spYI6PQr1L3By0l6&|37BB~iM3~dE>9J>OTNPd|5ZDag2Hr|ecS2} zWuB!#Lw&!(kmdAZ@*RD&`nuGV_$cvR=#yzai9a|*GKoQkkK{1`L9fz#-=N2v$2Nz@ z42vhC$<=GPAFb!eg>nJHUu`hsIpZ1LKER=u**e8%Dw~tYu8-DxrS>wg5QcWrQoX39 z$)RPx#%I~4yf?^OerzI@;zr=FbZCdjL!u;^d`K^>ilh8AKj-!-1DcZLi(3;{Q`!9Y zl9#%*oaM=f_ONhtQDP7j3>oBB`qGgK!8C`OtN z4Go=%1%}vPT-2A6Y~3-qBT{S7cx3J5x%wOf2=Rxqgs^jom*r^E10m-etr^MvkQiR$ zo8u*9)GUv$z`QUDZhz7{QBhHAV&u;sWubWO*CpnJ(8vzyvy)!DAOnbXy))-psQtpg zfQn0p0ozM|9F_8`&W_9jw`?(|4}O8*WFsQEMA(lKP!b-q4GRlKrcE4_i1+z2&u7A! zYQ2k>B5MRtq&GKBn8z%+Nd2w4xyADsZOH>%4$NqYqM59{H9x)6j58vMAH|{4ogZK} zm)ThF3&*!VpGIScwTP{pTjuEC2AlP00Nb?dRRnX}ZIu-lmzUI4?xtQ#>)vWiF9&Q! z`uL}ro>A>7u?`EhL`-Fi%o*D z)_J4J+{38owBup+l3f?Rn`Q^$rM0t;S&T8L$)M9Q#H&troVaUwc|ub4#h%0v0` z^Z_Tg_1qTIFN67b_{QC<(WlYdyBJ}QETlVbeM&Uy^Lr}(t*PBA9{YcLZ@3DYTx{s7 zO^kglv|M!B__&G{t1k5hhta;1BUP5aEXd=$X?2&H=;w&0IJCwdl~=OAgmm9%?z0x3GCs|$ z2M>1VZaq~D78Dt=P%U*}qe90xl251uZgX;A`Q?a12@VkbsVP6a+?x=TQ>PpIx=Hrw zwCc!DK$Z$M`lRcd_F|TuO&IQOv2ym$pP&2~A{|ycKucJ$Nl8Dl(V84PQr6^>qsWAH z)4}M6+*O}Vwa@sL?Kae-m0I-`;x&OF?3tmO%5FZUT_V2|uO0rA4!TyD6AZ>N zBY_%cWs#5azuj4fVeXmU*>Yr>4`I5iSKF<1#gNl&ihU;$&_BAd^le$56d&~ek{0Y~ z%}Gqcc+RCPz4XbWMnGVR2VrQ9++l#nahl;)WczV8%V zS}12S+1gGqa(8v@Z6UYK9j#*NO0@!b*0Oun-Ueo_Fs~xoW%62I;CtcBHEb;vXU-2I z-mC>^WL}KQplE}b8ZxOB6@62~;+pS@O`HN#-0!TpsLt$UKIAx0rK%BI9$VO(++9$k zmndg9KiOVXqJ_ zA(>GN1`@JCi;d(3sjOJwV0gK?p!~)7e4mFgD^r(}+nBJ6X(%;SQR^UzTAVk8|5{iOPz2cfZt#R*g945R{rf!j^%~)_8 zp&n*n_pU}oscgAuL(N#A+DGiNiz@@?v|_vE<@o{QzJ@SH@LC`qOe)bWvGAuotBzabmPdM&ySzAxhTc-vbdwEg*JZ9f7d^E^&@8P=6j zn}up~8q5mz)CW-2mMa*j$?Zx-u}sV4E9XiiV=7qPwSP+0-|$BWHk*H6Knh#5K2cB# zM>lcXe?vLxXiUe`GEnh;-kpc`>j_st%$QwF_&2;t06^O>K3zK9!uJUsRNP0-5BG-Z zqY8gtM39~nND zEkT-Z5HhBTjQ>pmp|-w?MdOQ<$Rmp;;lK9s&4tx3;-(s?g6XRZ6$Q|Rni!wStn#ep zswJaKW!Sz_C#b|HBs3?zvCu)Er(+X)e`!Yb0yNhy20CO8jgATt%7lezU9k8>h$CqB zOJmZpfCk)YyG4HzT7`Hcpfoc5ND&~G4Z$E4;6YFXyt%}hBQK_xzU0z;J+$Gkpncu_ z;>1Lj3G`{r9U-uVhLI646g^_0PZYP3Pd`ucy1i_aq+3S|%veW-lb<;e!rshwuS04|h zK(=PU=MyeERcbyFU1Z#e<>K+=OgswI%+P-E9>#o(|B>0Z_z)H>&GVQiEjP|WnHRqD3Ndg$J$#ly*h7{LK&+B3iD^vyJf97$C^ z>ldlUqKM1^wJ7L~I)73fIZaK?N@8fK0CL*w;SBy6=%v{(M3S-jI!m?MDyG8H7#E+Z(n{WEF(qj7M z1tztPzdX_#3-~}21xhbE0V`~SY#-=L`%2Jk(umMOIv9Phl`Qc@o;I z_6ge#$_QbsXq)S?dbLaWxUP0UgD!EUO^ z5=EJ1&RGn_{wd?n9S8ECwj8PO#z2NmG}3W!MpG@ zOiYU3*-fK2snx?JdI)zZVrpId1V!X;tmNbdmB38eyBb+kJNdK{r@GrgIDJtBGeYg+ z>x){(zKL5{ORJg0^@R)MDH2D3`WL>c=_pd@d_> zxgTYse5#?srT~ANEe}tp)K{HA=JuBM>eLxB3a2-3QNo2NwIA7#X6X zTP~Z;+?-o2H{P4c%z0*3*_jaDH}pCPa2svn`rOUAN&KYA^mWZsTU?E;(j zaB6es*^;ljOEwj3q|wb?!05Z|$_fATrlNSq(X(@7^9Iuo?>}{5kr(|Y)JN^L{m%6i zOq?itrAUl(9#n-^_}@My0xd{IE;y^t;oSMo_t&pq!1TyPlxew=zd+{2#RZ&gv4IAp#jqR>x7+i1t@Llgh)zx<%*Atkb+>kg2xlK?Yhy%dxtptgFs3-zb#_=Nlp zj;a080qzeL1q=3=D6W`^?+`K4gScEV=4fUN77gav13+!n;z3yI>g6r%bN&={T^eiM za9TZ$Lf%kdNSy^s%%!NU<)fmNY{@Su_(Xh;(t=)?OrltTT1$^S#Cmp0k}vPa<)jg&H>{j9&u%*LP6q2n=k)qn7p%f20UpbEGj)!wCroTV{Lw=F7)-!@CXIZu zl{B^#E9`?YW+4uyAqIZ79;ycf_$iF&P)aK?rqC%D#D%yU>w7L^ng6(#(gAXCiyDb$Z1 zR`;NS`#btbDA);Os^($2cuyl#}e5#IW=f$|5-;r7|ydFexzWzTS4>KE6*+zuAx&`-iKs7d}=5r&wL`Rs_a5y~(pDC1=IZ`PSGCp(4vB&e=3LTb z6K$N~YFTOH`kN`E*T`932$vRtj;9qA)-A)lu-<}D#IcuwdsLsxy0(#afL0hLmjXB_ zW7y4U1ro}P#79!(!6$tXj6Q z8f$?}v|qVa{d;F;=bjqVv1jZ?*zkM=mm#UAM?a(aPc97-AQ`qbATtw-TVeThpa=^4 ze0oEE^oHFek_`ajqA7;b0e>_+GEULCeuKIQN_(zsRP(+A`oV;kf%h8(E{T|svM*@h zDW_htFfU1$T^Ui)&PO0HyE?zUy|oSt3lo(4%z%y{%Z6rN@XvZOrvrH^BA7y9;9PSNX_hw;TDi438gqvAYCSR#Gk!* z`x>fGlj)8%Km4+oi7O`6&Rxc0%FGk}Hh!SAWF%fA)Zp`1*)|&e`}E#}q^-My=j-9z zMFEAq>A0PgilU-mu4y1xAOT6gI7(xQiS@w(@Sf&mQo#!KZf>ExADIek&}GEwm0C?xe=Lvx)*EskC%m=TSW|@ZqFi($ZeTbzpUbQa}|#h=M0@ z#^YVK+uBlH02aVpg^=di>Q&#B($i2%0+0C@o zjxa}jw7Iu-_iX(*#?FCy=J;_gO62)(iql2MF$F0nQQUMSgf%bl_$UdW*glw2#;Is( zdLZ>8xup=VZ4(4=6jG1-LQ|F3Kgn2rd>Lbr2ctsovCgPKvP|OsZpV3(MFQl6xls`Y2trec{vR~3Vvx?W@oZa z8!};L?|#duYC*Vw9-OrD?U&AhWrYr}uBrN$98Swl_y^Y+K`e(G=4bZJwwJmd|8#_a zEzDRx47q%fP!(q>SdCH5FRspc(nxp+9dIAzakyaX_R*nngjRc$R;@D!HQ$joFe%iY z+`wfF{kJhfV4G;kgMGlp@_^IMUT~Z$RzTxrS)y(n3wp%bBNcf6Mm%fDwpvuGeASJm zn-vq5stTK!xa{UjQabeCG)_uNF}=4)p;4V#y~MRzqG{B~hLgXn7m~@(>_G9DS;@;= zy=Tzo?~i&LD-;zJo`_ptc0|O?I7WN0E}w7ayw!72$Z1rfEq9c|@YUOWp59+_EVmf; zgvVDjsC@aNZm$#N{d6@mLm488>2Y?V+2(Z@S8=^}!Z%q1e>5Kw*d5nzfE9ACcvb)5 z_#?ERShjOWnTR?2f-E#afJh|O)guPe%K2Y}ke3}be!1tBRKA%#+%T}&(%Gfqq)~P> zMKpRsKiph=+o+<4{zjR-Fb~_OKEuX;vI$#gEG5N0>}FK>~Hp!TvT{jL;I4S6W{JYW{T-VQ;D$PQ~NePYdc_ES~w7K zG$(%h^Pp0C)Drl?ohoJ844J;P{bj!+{CI8%d@{$u*(P8J zr=;1yslfV)^<1ok2!`5UBs1|F+TX;Qp~Bz9=Tr#zlK&+>AW4zPPC&t@_XL=J8KJnR za<(Vu=MJyRn&Krib1T4$5CVE>-jgA|8?R>Ob=)2pxS!OL-R z94tDP3&0rLWmWrGIaiK+Y0V!uk@Wff3@$O$TM5A8Yq`Z8c{41zlMpAsED9rTB?s{j zG=WnoJftXnEX}5A&ru>MvYYd*A_(1^R=?|xW8-8Bxi1F~TmQU%Uy?dl3tMrhF*(=% zWW3Psy7BAstm5Y2=?x>&=n_nvv{-bH9}w}LB8I}M9++qIiX}vcOekgWf+Nh)il^t(q5@bj4ocxeJafXDi6j?l){=U_&s zJ0$LjojmH75us&V;7Z^~7|yx#82#$F`}vBuCkU#O@N2oAx0L=A!=a7RYBFII+B*BG zLRN3am^r**H?3=yc@#1U;IM90ADuXR9I59t^6K$;A-Pt8?0+h5jG@Cy7LP9CM|XDG;I%Y`?iR+jvH(5gQ!6~dj{(1|pPu-A|R?;gCWhxl;B zmO6;(xSjphm|?8wt@lP)1DHlUDCDA>kTt+92nxZ2eu11nKP-=&f#E}(L_&Onx>Ohg zW{BU1C|6|n8BA&D+A}<}{WJg{xItuOWHgpT>fX(H#Z+6;guj2b*(r1iflS@Rs?rs_BL<+Br_e5A4#WK zR_PEDJ1NObrj8Hi^CLtk%_JYKLY0L~>@`T>U0xA$kfkL=NQfMN@AbZ*B&n7T*(*`Z zC_k`rk3sK`3Z@1%ro4Jej(Y9=NU@1S?z-cDel04GG}t~9qOT27Xqb~FA^+;*t4TPa zZ*ZD)jj^Kl@%p>0%*;^gM4Ca_>ms9sx+faMIdGu5R26+m!j5z7VQaG2pomC~&6geJfa`2;wD3iK$CUYgIZCy4lj$(huS%N`({kIH6nGibRuOL(Ev- zlvLz&Z%s8R(cz=B$T}PXcE^8Jfoxw`ed=;PuUk8+T(~db$%_-fVZ-;bQsU(ZP4HHe zu(IWRa6_s%mbogsjUv2UpN`h4!^U%}@n41s4&Yrht3hPb0S(+hIPFp%<7);OwiqP*FtM4orv zq4pBllW<(tqEd%c+#lKcHWOBhXLVQMBIJDlkNUv~4yo`Mrmt$JBqln+e^%K7*i+7} z#^K(U2Agvbw(?LWthroWjEn6N`T(V;JfJ5zB$PdG29D#z4lw#5k??+xsiZWc)`xOV z{ACHS3{A5ifeVnWKhmhPnCgs-jAR3!9dv#Uq`4Pd6`1TgyK#MFob&9$_4Sps3l)e6 zjltYlbz|m^eC`D%bRN<6MJm7Z}usiF+xZ+Nz?|l3(Moiu9 zvhTH*r^I+7lKMzJhj-rI*K3w?Iy^3;J}v1*(_CF}%0v^dO|*?T3^z3JNM?eLLawjO z+cPi36CnK)6!W%io5ki8IRayS!Y_{(gm%bQWT|z>Evh+o=w&j#ZYPWRWINsto7@TQ zysfN-gN_2?N;+J3IHd3Yz~rVd(ov{XmcM=f&icgotls;!Jl{D8e`NWANMVPkA^4v& zzetQ^2GpHTcjghSbi#QR?$ z4~2A@0EOCN`Uj;QG_ek-<>k90I)BC}5u98lV0matvU#gTgz+QAQ~NPHW%b2vPsv$b zx*&we8zR)nlJXe#wZOK5+^zo(IY8+zDguN6-Vi4p{w)v22O1ihiEMX0|9(H^@*z^d z`LN3F&rY9PQ@bRSM)jtI-XO%U`LooVnuqNh%+OHL2C-SB04e?7EFTdu$f!PxBdcyx^pO}zleTwn7n`sgEeh!%8o zZq?Yy<^tJQS$JiS4inA>A8Lh;4f{9x9*}?g5xNS*Pkm=&hz=6U0(bL_{$SU6N~156 zSZ5>Zo5f!~^d@p8x4ZrQ#-GKxFyY|^R}1m==jwJHRm~LcR;A+TGmj;cUHWCh_hzR+K?{=p9UW{<>p!LO= zv(^s2W0T-XYFZ-*rofY8#EYkWgKGHZ>qjd)T%J{GJMzEe_)>L$)VYvHAz%sFz^+)5 zKx1<xAD8K6CoR%lP6L&Q2(Upei3panVdt(j0@5Iu?#$ZP8f0hBjp-3 z+pCi0{ZJ$fy>T8sqJuC9Ul}Uq@2*FU#=hcbJl&pK?3^JnebmV}@oT{}T=^PU4Eu6d zNZE7;XM=#2Oc{o3$Ms&f8DACOoGdGaw8l_vdX^vkH_vBHj`}CZFUey6M7}Iw*x*QI zkvGE5PM>;6*r)2V=9Jb(gv%LRINiIh?9_1#Cx=su4Me4=6B5vRle&!J|4BI`WuRZv zIveoN#kkS(obtf8H7K!7ov$48lZy-YR*uZq;_D2tE4$8?!RS7)=X4TcMoTco$v#(d zWaw7v(*Fq);A2?ou|a0vyCK0)s5?&L`veLc*JHq(Z+K*Ml+?ZiTGl{AMfST!)(juL zYqIJQ8e@^0@8X}5Bb?Dm+z&+;nIv0RT(FG-HbsmbywCwY51;&}pZ%{7Hu=nSPgE4% zV-KVe4TrZ*=kz51hASldfSjclOImX&q22#mQw|pGrv1%y*6aN`j}88;2r8b>SNb)0 zG8XOrFfcY+a z5Vg*tnfmo(g$W?Padc4U3QcMEj}cv7!AzoaQW5FkJ{scD5_N3?2aoq%H`Yr6H&wwVl2oON zBUNR{Jz~G;R~pq|x3$s7UY@`5QbCHsoLpY>b4A%m&JHaitAkvxr%EIn8|(l)!mx0R z7pvC!->2sPzGid7 ziZND=j-RU2UKbya_$(&>tF_x9qG|wd>0fK6btSkGe!q~QC11Bumpp9yF>0BJr!bh2 z=6}SmeT;6l0cdK=n?|p947CeKLZuYe;gOM-a|PMiUi4?Z0wMluxH$$lsE`2a`UbjaIgIOu;hZ)%oE#NC0NR^7^QKCKX?R*k6$d69T5 z*) + endif() + set(_GRPC_GRPCPP grpc++) + if(CMAKE_CROSSCOMPILING) + find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin) + else() + set(_GRPC_CPP_PLUGIN_EXECUTABLE $) + endif() +elseif(GRPC_FETCHCONTENT) + # Another way is to use CMake's FetchContent module to clone gRPC at + # configure time. This makes gRPC's source code available to your project, + # similar to a git submodule. + message(STATUS "Using gRPC via add_subdirectory (FetchContent).") + include(FetchContent) + FetchContent_Declare( + grpc + GIT_REPOSITORY https://github.com/grpc/grpc.git + # when using gRPC, you will actually set this to an existing tag, such as + # v1.25.0, v1.26.0 etc.. + # For the purpose of testing, we override the tag used to the commit + # that's currently under test. + GIT_TAG vGRPC_TAG_VERSION_OF_YOUR_CHOICE) + FetchContent_MakeAvailable(grpc) + + # Since FetchContent uses add_subdirectory under the hood, we can use + # the grpc targets directly from this build. + set(_PROTOBUF_LIBPROTOBUF libprotobuf) + set(_REFLECTION grpc++_reflection) + set(_PROTOBUF_PROTOC $) + set(_GRPC_GRPCPP grpc++) + if(CMAKE_CROSSCOMPILING) + find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin) + else() + set(_GRPC_CPP_PLUGIN_EXECUTABLE $) + endif() +else() + # This branch assumes that gRPC and all its dependencies are already installed + # on this system, so they can be located by find_package(). + + # Find Protobuf installation + # Looks for protobuf-config.cmake file installed by Protobuf's cmake installation. + set(protobuf_MODULE_COMPATIBLE TRUE) + find_package(Protobuf CONFIG REQUIRED) + message(STATUS "Using protobuf ${Protobuf_VERSION}") + + set(_PROTOBUF_LIBPROTOBUF protobuf::libprotobuf) + set(_REFLECTION gRPC::grpc++_reflection) + if(CMAKE_CROSSCOMPILING) + find_program(_PROTOBUF_PROTOC protoc) + else() + set(_PROTOBUF_PROTOC $) + endif() + + # Find gRPC installation + # Looks for gRPCConfig.cmake file installed by gRPC's cmake installation. + find_package(gRPC CONFIG REQUIRED) + message(STATUS "Using gRPC ${gRPC_VERSION}") + + set(_GRPC_GRPCPP gRPC::grpc++) + if(CMAKE_CROSSCOMPILING) + find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin) + else() + set(_GRPC_CPP_PLUGIN_EXECUTABLE $) + endif() +endif() diff --git a/service/snpe/server/inference_server.cc b/service/snpe/server/inference_server.cc new file mode 100644 index 000000000..9369cce48 --- /dev/null +++ b/service/snpe/server/inference_server.cc @@ -0,0 +1,109 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Copyright (c) OpenMMLab. All rights reserved. + +#include +#include +#include +#include +#include +#include + +#include + +#include "service_impl.h" +#include "text_table.h" + +void PrintIP() { + struct ifaddrs* ifAddrStruct = NULL; + void* tmpAddrPtr = NULL; + + int retval = getifaddrs(&ifAddrStruct); + if (retval == -1) { + return; + } + + helper::TextTable table("Device"); + table.padding(1); + table.add("port").add("ip").eor(); + while (ifAddrStruct != nullptr) { + if (ifAddrStruct->ifa_addr == nullptr) { + break; + } + + if (ifAddrStruct->ifa_addr->sa_family == AF_INET) { + tmpAddrPtr = &((struct sockaddr_in*)ifAddrStruct->ifa_addr)->sin_addr; + char addressBuffer[INET_ADDRSTRLEN]; + inet_ntop(AF_INET, tmpAddrPtr, addressBuffer, INET_ADDRSTRLEN); + table.add(std::string(ifAddrStruct->ifa_name)).add(std::string(addressBuffer)).eor(); + } else if (ifAddrStruct->ifa_addr->sa_family == AF_INET6) { + tmpAddrPtr = &((struct sockaddr_in*)ifAddrStruct->ifa_addr)->sin_addr; + char addressBuffer[INET6_ADDRSTRLEN]; + inet_ntop(AF_INET6, tmpAddrPtr, addressBuffer, INET6_ADDRSTRLEN); + table.add(std::string(ifAddrStruct->ifa_name)).add(std::string(addressBuffer)).eor(); + } + ifAddrStruct = ifAddrStruct->ifa_next; + } + std::cout << table << std::endl << std::endl; +} + +void RunServer(int port = 60000) { + // listen IPv4 and IPv6 + char server_address[64] = {0}; + sprintf(server_address, "[::]:%d", port); + InferenceServiceImpl service; + + grpc::EnableDefaultHealthCheckService(true); + grpc::reflection::InitProtoReflectionServerBuilderPlugin(); + ServerBuilder builder; + // Listen on the given address without any authentication mechanism. + builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); + + // Max 128MB + builder.SetMaxMessageSize(2 << 29); + builder.SetMaxSendMessageSize(2 << 29); + + // Register "service" as the instance through which we'll communicate with + // clients. In this case it corresponds to an *synchronous* service. + + builder.RegisterService(&service); + // Finally assemble the server. + std::unique_ptr server(builder.BuildAndStart()); + fprintf(stdout, "Server listening on %s\n", server_address); + + // Wait for the server to shutdown. Note that some other thread must be + // responsible for shutting down the server for this call to ever return. + server->Wait(); +} + +int main(int argc, char** argv) { + int port = 60000; + if (argc > 1) { + port = std::stoi(argv[1]); + } + + if (port <= 9999) { + fprintf(stdout, "Usage: %s [port]\n", argv[0]); + return 0; + } + PrintIP(); + RunServer(port); + + return 0; +} diff --git a/service/snpe/server/scope_timer.h b/service/snpe/server/scope_timer.h new file mode 100644 index 000000000..373006125 --- /dev/null +++ b/service/snpe/server/scope_timer.h @@ -0,0 +1,34 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#pragma once + +#include + +#include +#include +#include + +class ScopeTimer { + public: + ScopeTimer(std::string _name, bool _print = false) : name(_name), print(_print) { begin = now(); } + + ~ScopeTimer() { + if (!print) { + return; + } + fprintf(stdout, "%s: %ldms\n", name.c_str(), (now() - begin)); + } + + long now() const { + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec * 1000 + (tv.tv_usec / 1000); + } + + long cost() const { return now() - begin; } + + private: + std::string name; + bool print; + long begin; +}; diff --git a/service/snpe/server/service_impl.cpp b/service/snpe/server/service_impl.cpp new file mode 100644 index 000000000..6db484bd3 --- /dev/null +++ b/service/snpe/server/service_impl.cpp @@ -0,0 +1,358 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#include "service_impl.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "scope_timer.h" +#include "text_table.h" + +zdl::DlSystem::Runtime_t InferenceServiceImpl::CheckRuntime(zdl::DlSystem::Runtime_t runtime, + bool& staticQuantization) { + static zdl::DlSystem::Version_t Version = zdl::SNPE::SNPEFactory::getLibraryVersion(); + + fprintf(stdout, "SNPE Version: %s\n", Version.asString().c_str()); + + if ((runtime != zdl::DlSystem::Runtime_t::DSP) && staticQuantization) { + fprintf(stderr, + "ERROR: Cannot use static quantization with CPU/GPU runtimes. " + "It is only designed for DSP/AIP runtimes.\n" + "ERROR: Proceeding without static quantization on selected " + "runtime.\n"); + staticQuantization = false; + } + + if (!zdl::SNPE::SNPEFactory::isRuntimeAvailable(runtime)) { + fprintf(stderr, "Selected runtime not present. Falling back to CPU.\n"); + runtime = zdl::DlSystem::Runtime_t::CPU; + } + + return runtime; +} + +void InferenceServiceImpl::Build(std::unique_ptr& container, + zdl::DlSystem::Runtime_t runtime, + zdl::DlSystem::RuntimeList runtimeList, + bool useUserSuppliedBuffers, + zdl::DlSystem::PlatformConfig platformConfig) { + zdl::SNPE::SNPEBuilder snpeBuilder(container.get()); + + if (runtimeList.empty()) { + runtimeList.add(runtime); + } + + snpe = snpeBuilder.setOutputLayers({}) + .setRuntimeProcessorOrder(runtimeList) + .setUseUserSuppliedBuffers(useUserSuppliedBuffers) + .setPlatformConfig(platformConfig) + .setExecutionPriorityHint(zdl::DlSystem::ExecutionPriorityHint_t::HIGH) + .setPerformanceProfile(zdl::DlSystem::PerformanceProfile_t::SUSTAINED_HIGH_PERFORMANCE) + .build(); + return; +} + +void InferenceServiceImpl::SaveDLC(const ::mmdeploy::Model* request, const std::string& filename) { + auto model = request->weights(); + fprintf(stdout, "saving file to %s\n", filename.c_str()); + std::ofstream fout; + fout.open(filename, std::ios::binary | std::ios::out); + fout.write(model.data(), model.size()); + fout.flush(); + fout.close(); +} + +void InferenceServiceImpl::LoadFloatData(const std::string& data, std::vector& vec) { + size_t len = data.size(); + assert(len % sizeof(float) == 0); + const char* ptr = data.data(); + for (int i = 0; i < len; i += sizeof(float)) { + vec.push_back(*(float*)(ptr + i)); + } +} + +::grpc::Status InferenceServiceImpl::Echo(::grpc::ServerContext* context, + const ::mmdeploy::Empty* request, + ::mmdeploy::Reply* response) { + response->set_info("echo"); + return Status::OK; +} + +// Logic and data behind the server's behavior. +::grpc::Status InferenceServiceImpl::Init(::grpc::ServerContext* context, + const ::mmdeploy::Model* request, + ::mmdeploy::Reply* response) { + zdl::SNPE::SNPEFactory::initializeLogging(zdl::DlSystem::LogLevel_t::LOG_ERROR); + zdl::SNPE::SNPEFactory::setLogLevel(zdl::DlSystem::LogLevel_t::LOG_ERROR); + + if (snpe != nullptr) { + snpe.reset(); + } + if (container != nullptr) { + container.reset(); + } + + auto model = request->weights(); + container = + zdl::DlContainer::IDlContainer::open(reinterpret_cast(model.data()), model.size()); + if (container == nullptr) { + fprintf(stdout, "Stage Init: load dlc failed.\n"); + + response->set_status(-1); + response->set_info(zdl::DlSystem::getLastErrorString()); + return Status::OK; + } + fprintf(stdout, "Stage Init: load dlc success.\n"); + + zdl::DlSystem::Runtime_t runtime = zdl::DlSystem::Runtime_t::GPU; + if (request->has_device()) { + switch (request->device()) { + case mmdeploy::Model_Device_GPU: + runtime = zdl::DlSystem::Runtime_t::GPU; + break; + case mmdeploy::Model_Device_DSP: + runtime = zdl::DlSystem::Runtime_t::DSP; + default: + break; + } + } + + if (runtime != zdl::DlSystem::Runtime_t::CPU) { + bool static_quant = false; + runtime = CheckRuntime(runtime, static_quant); + } + + zdl::DlSystem::RuntimeList runtimeList; + runtimeList.add(zdl::DlSystem::Runtime_t::CPU); + runtimeList.add(runtime); + zdl::DlSystem::PlatformConfig platformConfig; + + { + ScopeTimer timer("build snpe"); + Build(container, runtime, runtimeList, false, platformConfig); + } + + if (snpe == nullptr) { + response->set_status(-1); + response->set_info(zdl::DlSystem::getLastErrorString()); + } + + // setup logger + auto logger_opt = snpe->getDiagLogInterface(); + if (!logger_opt) throw std::runtime_error("SNPE failed to obtain logging interface"); + auto logger = *logger_opt; + auto opts = logger->getOptions(); + static std::string OutputDir = "./output/"; + + opts.LogFileDirectory = OutputDir; + if (!logger->setOptions(opts)) { + std::cerr << "Failed to set options" << std::endl; + return Status::OK; + } + if (!logger->start()) { + std::cerr << "Failed to start logger" << std::endl; + return Status::OK; + } + + const auto& inputTensorNamesRef = snpe->getInputTensorNames(); + const auto& inputTensorNames = *inputTensorNamesRef; + + inputTensors.resize(inputTensorNames.size()); + for (int i = 0; i < inputTensorNames.size(); ++i) { + const char* pname = inputTensorNames.at(i); + const auto& shape_opt = snpe->getInputDimensions(pname); + const auto& shape = *shape_opt; + + fprintf(stdout, "Stage Init: input tensor info:\n"); + switch (shape.rank()) { + case 1: + fprintf(stdout, "name: %s, shape: [%ld]\n", pname, shape[0]); + break; + case 2: + fprintf(stdout, "name: %s, shape: [%ld,%ld]\n", pname, shape[0], shape[1]); + break; + case 3: + fprintf(stdout, "name: %s, shape: [%ld,%ld,%ld]\n", pname, shape[0], shape[1], shape[2]); + break; + case 4: + fprintf(stdout, "name: %s, shape: [%ld,%ld,%ld,%ld]\n", pname, shape[0], shape[1], shape[2], + shape[3]); + break; + } + inputTensors[i] = zdl::SNPE::SNPEFactory::getTensorFactory().createTensor(shape); + inputTensorMap.add(pname, inputTensors[i].get()); + } + + response->set_status(0); + response->set_info("Stage Init: success"); + return Status::OK; +} + +std::string InferenceServiceImpl::ContentStr(zdl::DlSystem::ITensor* pTensor) { + std::string str; + + const size_t N = std::min(5UL, pTensor->getSize()); + auto it = pTensor->cbegin(); + for (int i = 0; i < N; ++i) { + str += std::to_string(*(it + i)); + str += " "; + } + str += ".."; + str += std::to_string(*(it + pTensor->getSize() - 1)); + return str; +} + +std::string InferenceServiceImpl::ShapeStr(zdl::DlSystem::ITensor* pTensor) { + std::string str; + + str += "["; + auto shape = pTensor->getShape(); + for (int i = 0; i < shape.rank(); ++i) { + str += std::to_string(shape[i]); + str += ","; + } + str += ']'; + return str; +} + +::grpc::Status InferenceServiceImpl::OutputNames(::grpc::ServerContext* context, + const ::mmdeploy::Empty* request, + ::mmdeploy::Names* response) { + const auto& outputTensorNamesRef = snpe->getOutputTensorNames(); + const auto& outputTensorNames = *outputTensorNamesRef; + + for (int i = 0; i < outputTensorNames.size(); ++i) { + response->add_names(outputTensorNames.at(i)); + } + + return Status::OK; +} + +::grpc::Status InferenceServiceImpl::Inference(::grpc::ServerContext* context, + const ::mmdeploy::TensorList* request, + ::mmdeploy::Reply* response) { + // Get input names and number + const auto& inputTensorNamesRef = snpe->getInputTensorNames(); + + if (!inputTensorNamesRef) { + response->set_status(-1); + response->set_info(zdl::DlSystem::getLastErrorString()); + return Status::OK; + } + + const auto& inputTensorNames = *inputTensorNamesRef; + if (inputTensorNames.size() != request->data_size()) { + response->set_status(-1); + response->set_info("Stage Inference: input names count not match !"); + return Status::OK; + } + + helper::TextTable table("Inference"); + table.padding(1); + table.add("type").add("name").add("shape").add("content").eor(); + + // Load input/output buffers with TensorMap + { + // ScopeTimer timer("convert input"); + + for (int i = 0; i < request->data_size(); ++i) { + auto tensor = request->data(i); + std::vector float_input; + LoadFloatData(tensor.data(), float_input); + + zdl::DlSystem::ITensor* ptensor = inputTensorMap.getTensor(tensor.name().c_str()); + if (ptensor == nullptr) { + fprintf(stderr, "Stage Inference: name: %s not existed in input tensor map\n", + tensor.name().c_str()); + response->set_status(-1); + response->set_info("cannot find name in input tensor map."); + return Status::OK; + } + + if (float_input.size() != ptensor->getSize()) { + fprintf(stderr, "Stage Inference: input size not match, get %ld, expect %ld.\n", + float_input.size(), ptensor->getSize()); + response->set_status(-1); + response->set_info(zdl::DlSystem::getLastErrorString()); + return Status::OK; + } + + std::copy(float_input.begin(), float_input.end(), ptensor->begin()); + + table.add("IN").add(tensor.name()).add(ShapeStr(ptensor)).add(ContentStr(ptensor)).eor(); + } + } + + // A tensor map for SNPE execution outputs + zdl::DlSystem::TensorMap outputTensorMap; + // Execute the multiple input tensorMap on the model with SNPE + bool success = false; + { + ScopeTimer timer("execute", false); + success = snpe->execute(inputTensorMap, outputTensorMap); + + if (!success) { + response->set_status(-1); + response->set_info(zdl::DlSystem::getLastErrorString()); + return Status::OK; + } + + table.add("EXECUTE").add(std::to_string(timer.cost()) + "ms").eor(); + } + + { + // ScopeTimer timer("convert output"); + auto out_names = outputTensorMap.getTensorNames(); + for (size_t i = 0; i < out_names.size(); ++i) { + const char* name = out_names.at(i); + zdl::DlSystem::ITensor* ptensor = outputTensorMap.getTensor(name); + + table.add("OUT").add(std::string(name)).add(ShapeStr(ptensor)).add(ContentStr(ptensor)).eor(); + + const size_t data_length = ptensor->getSize(); + + std::string result; + result.resize(sizeof(float) * data_length); + int j = 0; + for (auto it = ptensor->cbegin(); it != ptensor->cend(); ++it, j += sizeof(float)) { + float f = *it; + memcpy(&result[0] + j, reinterpret_cast(&f), sizeof(float)); + } + + auto shape = ptensor->getShape(); + + ::mmdeploy::Tensor* pData = response->add_data(); + pData->set_dtype("float32"); + pData->set_name(name); + pData->set_data(result); + for (int j = 0; j < shape.rank(); ++j) { + pData->add_shape(shape[j]); + } + } + } + + std::cout << table << std::endl << std::endl; + + // build output status + response->set_status(0); + response->set_info("Stage Inference: success"); + return Status::OK; +} + +::grpc::Status InferenceServiceImpl::Destroy(::grpc::ServerContext* context, + const ::mmdeploy::Empty* request, + ::mmdeploy::Reply* response) { + snpe.reset(); + container.reset(); + inputTensors.clear(); + response->set_status(0); + zdl::SNPE::SNPEFactory::terminateLogging(); + return Status::OK; +} diff --git a/service/snpe/server/service_impl.h b/service/snpe/server/service_impl.h new file mode 100644 index 000000000..c6b825fdb --- /dev/null +++ b/service/snpe/server/service_impl.h @@ -0,0 +1,78 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef SERVICE_IMPL_H +#define SERVICE_IMPL_H + +#include +#include +#include + +#include +#include +#include + +#include "DiagLog/IDiagLog.hpp" +#include "DlContainer/IDlContainer.hpp" +#include "DlSystem/DlEnums.hpp" +#include "DlSystem/DlError.hpp" +#include "DlSystem/ITensorFactory.hpp" +#include "DlSystem/IUserBuffer.hpp" +#include "DlSystem/PlatformConfig.hpp" +#include "DlSystem/RuntimeList.hpp" +#include "DlSystem/UserBufferMap.hpp" +#include "SNPE/SNPE.hpp" +#include "SNPE/SNPEBuilder.hpp" +#include "SNPE/SNPEFactory.hpp" +#include "inference.grpc.pb.h" + +using grpc::Server; +using grpc::ServerBuilder; +using grpc::ServerContext; +using grpc::Status; + +using mmdeploy::Empty; +using mmdeploy::Inference; +using mmdeploy::Model; +using mmdeploy::Reply; +using mmdeploy::Tensor; +using mmdeploy::TensorList; + +// Logic and data behind the server's behavior. +class InferenceServiceImpl final : public Inference::Service { + ::grpc::Status Echo(::grpc::ServerContext* context, const ::mmdeploy::Empty* request, + ::mmdeploy::Reply* response) override; + + // Init Model with model file + ::grpc::Status Init(::grpc::ServerContext* context, const ::mmdeploy::Model* request, + ::mmdeploy::Reply* response) override; + // Get output names + ::grpc::Status OutputNames(::grpc::ServerContext* context, const ::mmdeploy::Empty* request, + ::mmdeploy::Names* response) override; + // Inference with inputs + ::grpc::Status Inference(::grpc::ServerContext* context, const ::mmdeploy::TensorList* request, + ::mmdeploy::Reply* response) override; + // Destroy handle + ::grpc::Status Destroy(::grpc::ServerContext* context, const ::mmdeploy::Empty* request, + ::mmdeploy::Reply* response) override; + + void SaveDLC(const ::mmdeploy::Model* request, const std::string& name); + + void LoadFloatData(const std::string& data, std::vector& vec); + + zdl::DlSystem::Runtime_t CheckRuntime(zdl::DlSystem::Runtime_t runtime, bool& staticQuantization); + + void Build(std::unique_ptr& container, + zdl::DlSystem::Runtime_t runtime, zdl::DlSystem::RuntimeList runtimeList, + bool useUserSuppliedBuffers, zdl::DlSystem::PlatformConfig platformConfig); + + std::string ShapeStr(zdl::DlSystem::ITensor* pTensor); + + std::string ContentStr(zdl::DlSystem::ITensor* pTensor); + + std::unique_ptr snpe; + std::unique_ptr container; + std::vector> inputTensors; + zdl::DlSystem::TensorMap inputTensorMap; +}; + +#endif diff --git a/service/snpe/server/text_table.h b/service/snpe/server/text_table.h new file mode 100644 index 000000000..39ea33088 --- /dev/null +++ b/service/snpe/server/text_table.h @@ -0,0 +1,209 @@ +/** + * \file sdk/load-and-run/src/text_table.h + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace helper { + +class TextTable { + public: + enum Level { Summary, Detail }; + enum class Align : int { Left, Right, Mid }; + TextTable() = default; + explicit TextTable(const std::string& table_name) : m_name(table_name) {} + TextTable& horizontal(char c) { + m_row.params.horizontal = c; + return *this; + } + TextTable& vertical(char c) { + m_row.params.vertical = c; + return *this; + } + TextTable& corner(char c) { + m_row.params.corner = c; + return *this; + } + TextTable& align(Align v) { + m_row.params.align = v; + return *this; + } + TextTable& padding(size_t w) { + m_padding = w; + return *this; + } + TextTable& prefix(const std::string& str) { + m_prefix = str; + return *this; + } + + template + TextTable& add(const T& value) { + m_row.values.emplace_back(value); + if (m_cols_max_w.size() < m_row.values.size()) { + m_cols_max_w.emplace_back(m_row.values.back().length()); + } else { + size_t i = m_row.values.size() - 1; + m_cols_max_w[i] = std::max(m_cols_max_w[i], m_row.values.back().length()); + } + return *this; + } + + template ::value, bool>::type = 0> + TextTable& add(const T& value) { + std::stringstream ss; + ss << std::setiosflags(std::ios::fixed) << std::setprecision(2); + ss << value; + m_row.values.emplace_back(ss.str()); + if (m_cols_max_w.size() < m_row.values.size()) { + m_cols_max_w.emplace_back(m_row.values.back().length()); + } else { + size_t i = m_row.values.size() - 1; + m_cols_max_w[i] = std::max(m_cols_max_w[i], m_row.values.back().length()); + } + return *this; + } + + template ::value, bool>::type = 0> + TextTable& add(const T& value) { + m_row.values.emplace_back(std::to_string(value)); + return *this; + } + + void eor() { + m_rows.emplace_back(m_row); + adjuster_last_row(); + m_row.values.clear(); + } + + void reset() { + m_row = {}; + m_cols_max_w.clear(); + m_padding = 0; + m_rows.clear(); + } + + void show(std::ostream& os) { + if (m_rows.empty()) return; + auto& last_row = m_rows.front(); + bool first = true; + for (auto& row : m_rows) { + auto& lrow = (last_row.values.size() * char_length(last_row.params.horizontal)) > + (row.values.size() * char_length(row.params.horizontal)) + ? last_row + : row; + // line before row + if (lrow.params.horizontal) { + if (not first) os << std::endl; + os << m_prefix; + if (lrow.params.corner) os << lrow.params.corner; + size_t skip_size = 0; + // table name + if (first) { + os << m_name; + skip_size = m_name.length(); + } + for (size_t i = 0; i < lrow.values.size(); ++i) { + auto max_w = m_cols_max_w.at(i) + m_padding * 2; + if (max_w + char_length(lrow.params.corner) <= skip_size) { + skip_size = skip_size - max_w - char_length(lrow.params.corner); + continue; + } + size_t rest = max_w + char_length(lrow.params.corner) - skip_size; + skip_size = 0; + if (rest > char_length(lrow.params.corner)) { + os << std::string(rest - char_length(lrow.params.corner), lrow.params.horizontal); + rest = char_length(lrow.params.corner); + } + if (rest > 0 && lrow.params.corner) os << lrow.params.corner; + } + } else if (first) { + os << m_prefix << ' ' << m_name; + } + first = false; + os << std::endl << m_prefix; + if (row.params.vertical) os << row.params.vertical; + // row + for (size_t i = 0; i < row.values.size(); ++i) { + auto& str = row.values.at(i); + auto max_w = m_cols_max_w.at(i) + 2 * m_padding; + if (row.params.align == Align::Mid) { + mid(os, str, max_w); + } else if (row.params.align == Align::Left) { + os << std::setw(max_w) << std::left << str; + } else { + os << std::setw(max_w) << std::right << str; + } + if (row.params.vertical) os << row.params.vertical; + } + last_row = row; + } + if (last_row.params.horizontal) { + os << std::endl << m_prefix; + if (last_row.params.corner) os << last_row.params.corner; + for (size_t i = 0; i < last_row.values.size(); ++i) { + auto max_w = m_cols_max_w.at(i); + std::string tmp(max_w + m_padding * 2, last_row.params.horizontal); + os << tmp; + if (last_row.params.corner) os << last_row.params.corner; + } + } + } + + private: + void adjuster_last_row() { + if (m_rows.empty()) return; + auto& row = m_rows.back(); + if (row.params.horizontal == 0 or row.params.vertical == 0) { + row.params.corner = 0; + } + if (row.params.horizontal != 0 && row.params.vertical != 0 && row.params.corner == 0) { + row.params.corner = row.params.horizontal; + } + } + + inline void mid(std::ostream& os, const std::string& str, size_t max_w) { + size_t l = (max_w - str.length()) / 2 + str.length(); + size_t r = max_w - l; + os << std::setw(l) << std::right << str; + if (r > 0) os << std::setw(r) << ' '; + } + inline size_t char_length(char c) { return c ? 1 : 0; } + std::string m_name; + std::vector m_cols_max_w; + size_t m_padding = 0; + std::string m_prefix = ""; + struct Row { + std::vector values; + struct Params { + Align align = Align::Left; + char horizontal = '-', vertical = '|', corner = '+'; + } params; + }; + std::vector m_rows; + Row m_row; +}; + +inline std::ostream& operator<<(std::ostream& stream, TextTable& table) { + table.show(stream); + return stream; +} + +} // namespace helper diff --git a/setup.cfg b/setup.cfg index 207376869..b02db3a5c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,3 +14,4 @@ known_first_party = mmdeploy known_third_party = h5py,m2r,mmcls,mmcv,mmdeploy_python,mmdet,mmedit,mmocr,mmseg,ncnn,numpy,onnx,onnxruntime,packaging,pyppeteer,pyppl,pytest,pytorch_sphinx_theme,recommonmark,setuptools,sphinx,tensorrt,torch,torchvision no_lines_before = STDLIB,LOCALFOLDER default_section = THIRDPARTY +skip = service/snpe/client/inference_pb2.py,service/snpe/client/inference_pb2_grpc.py diff --git a/setup.py b/setup.py index 86e5cdf02..ddd853648 100644 --- a/setup.py +++ b/setup.py @@ -2,6 +2,14 @@ import os from setuptools import find_packages, setup +EXT_TYPE = '' +try: + from torch.utils.cpp_extension import BuildExtension + cmd_class = {'build_ext': BuildExtension} + EXT_TYPE = 'torch' +except ModuleNotFoundError: + cmd_class = {} + print('Skip building ext ops due to the absence of torch.') pwd = os.path.dirname(__file__) version_file = 'mmdeploy/version.py' @@ -96,6 +104,74 @@ def parse_requirements(fname='requirements.txt', with_version=True): return packages +def get_extensions(): + extensions = [] + ext_name = 'mmdeploy.backend.torchscript.ts_optimizer' + + if EXT_TYPE == 'torch': + import glob + import platform + + from torch.utils.cpp_extension import CppExtension + + try: + import psutil + num_cpu = len(psutil.Process().cpu_affinity()) + cpu_use = max(4, num_cpu - 1) + except (ModuleNotFoundError, AttributeError): + cpu_use = 4 + + os.environ.setdefault('MAX_JOBS', str(cpu_use)) + define_macros = [] + + # Before PyTorch1.8.0, when compiling CUDA code, `cxx` is a + # required key passed to PyTorch. Even if there is no flag passed + # to cxx, users also need to pass an empty list to PyTorch. + # Since PyTorch1.8.0, it has a default value so users do not need + # to pass an empty list anymore. + # More details at https://github.com/pytorch/pytorch/pull/45956 + extra_compile_args = {'cxx': []} + + # c++14 is required. + # However, in the windows environment, some standard libraries + # will depend on c++17 or higher. In fact, for the windows + # environment, the compiler will choose the appropriate compiler + # to compile those cpp files, so there is no need to add the + # argument + if platform.system() != 'Windows': + extra_compile_args['cxx'] = ['-std=c++14'] + + include_dirs = [] + + op_files = glob.glob( + './csrc/mmdeploy/backend_ops/torchscript/optimizer/*.cpp' + ) + glob.glob( + './csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/*.cpp' + ) + glob.glob( + './csrc/mmdeploy/backend_ops/torchscript/optimizer/passes' + '/onnx/*.cpp') + extension = CppExtension + + # c++14 is required. + # However, in the windows environment, some standard libraries + # will depend on c++17 or higher. In fact, for the windows + # environment, the compiler will choose the appropriate compiler + # to compile those cpp files, so there is no need to add the + # argument + if 'nvcc' in extra_compile_args and platform.system() != 'Windows': + extra_compile_args['nvcc'] += ['-std=c++14'] + + ext_ops = extension( + name=ext_name, + sources=op_files, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args) + extensions.append(ext_ops) + + return extensions + + if __name__ == '__main__': setup( name='mmdeploy', @@ -128,6 +204,6 @@ if __name__ == '__main__': 'build': parse_requirements('requirements/build.txt'), 'optional': parse_requirements('requirements/optional.txt'), }, - ext_modules=[], - cmdclass={}, + ext_modules=get_extensions(), + cmdclass=cmd_class, zip_safe=False) diff --git a/tests/regression/mmcls.yml b/tests/regression/mmcls.yml index 398f9a8ed..78943b63d 100644 --- a/tests/regression/mmcls.yml +++ b/tests/regression/mmcls.yml @@ -54,6 +54,11 @@ tensorrt: backend_test: *default_backend_test deploy_config: configs/mmcls/classification_tensorrt-fp16_static-224x224.py + pipeline_trt_static_fp16_384x384: &pipeline_trt_static_fp16_384x384 + convert_image: *convert_image + backend_test: *default_backend_test + deploy_config: configs/mmcls/classification_tensorrt-fp16_static-384x384.py + pipeline_trt_static_int8: &pipeline_trt_static_int8 convert_image: *convert_image backend_test: *default_backend_test @@ -108,7 +113,7 @@ pplnn: torchscript: pipeline_ts_fp32: &pipeline_ts_fp32 convert_image: *convert_image - backend_test: False + backend_test: True deploy_config: configs/mmcls/classification_torchscript.py @@ -139,6 +144,18 @@ models: - *pipeline_pplnn_dynamic_fp32 - *pipeline_openvino_dynamic_fp32 + - name: DenseNet + metafile: configs/densenet/metafile.yml + model_configs: + - configs/densenet/densenet121_4xb256_in1k.py + pipelines: + - *pipeline_ts_fp32 + - *pipeline_ort_dynamic_fp32 + - *pipeline_trt_dynamic_fp16 + - *pipeline_ncnn_static_fp32 + - *pipeline_pplnn_dynamic_fp32 + - *pipeline_openvino_dynamic_fp32 + - name: SE-ResNet metafile: configs/seresnet/metafile.yml model_configs: @@ -186,3 +203,13 @@ models: - *pipeline_ncnn_static_fp32 # - *pipeline_pplnn_dynamic_fp32 # - *pipeline_openvino_dynamic_fp32 + + - name: VisionTransformer + metafile: configs/vision_transformer/metafile.yml + model_configs: + - configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py + pipelines: + - *pipeline_ts_fp32 + - *pipeline_ort_dynamic_fp32 + - *pipeline_trt_static_fp16_384x384 + - *pipeline_ncnn_static_fp32 diff --git a/tests/regression/mmdet.yml b/tests/regression/mmdet.yml index cc1f62d26..25b108ea6 100644 --- a/tests/regression/mmdet.yml +++ b/tests/regression/mmdet.yml @@ -185,8 +185,12 @@ models: pipelines: - *pipeline_ts_fp32 - *pipeline_ort_dynamic_fp32 + - deploy_config: configs/mmdet/detection/detection_tensorrt-fp16_dynamic-64x64-608x608.py + convert_image: *convert_image + backend_test: *default_backend_test + sdk_config: *sdk_dynamic # - *pipeline_trt_dynamic_fp32 - - *pipeline_trt_dynamic_fp16 +# - *pipeline_trt_dynamic_fp16 # - *pipeline_trt_dynamic_int8 - *pipeline_ncnn_static_fp32 # - *pipeline_pplnn_dynamic_fp32 @@ -218,6 +222,16 @@ models: - *pipeline_pplnn_dynamic_fp32 - *pipeline_openvino_dynamic_fp32 + - name: Cascade Mask R-CNN + metafile: configs/cascade_rcnn/metafile.yml + model_configs: + - configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py + pipelines: + - *pipeline_seg_ts_fp32 + - *pipeline_seg_ort_dynamic_fp32 + - *pipeline_seg_trt_dynamic_fp32 + - *pipeline_seg_openvino_dynamic_fp32 + - name: FCOS metafile: configs/fcos/metafile.yml model_configs: @@ -301,3 +315,11 @@ models: - *pipeline_seg_ort_dynamic_fp32 - *pipeline_seg_trt_dynamic_fp32 - *pipeline_seg_openvino_dynamic_fp32 + + - name: Swin Transformer + metafile: configs/swin/metafile.yml + model_configs: + - configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py + pipelines: + - *pipeline_seg_ort_dynamic_fp32 + - *pipeline_seg_trt_dynamic_fp32 diff --git a/tests/regression/mmrotate.yml b/tests/regression/mmrotate.yml index 45630304e..d6a9477dc 100644 --- a/tests/regression/mmrotate.yml +++ b/tests/regression/mmrotate.yml @@ -48,3 +48,29 @@ models: - *pipeline_ort_detection_dynamic_fp32 - *pipeline_trt_detection_dynamic_fp32 - *pipeline_trt_detection_dynamic_fp16 + + - name: oriented_rcnn + metafile: configs/oriented_rcnn/metafile.yml + model_configs: + - configs/oriented_rcnn/oriented_rcnn_r50_fpn_fp16_1x_dota_le90.py + pipelines: + - *pipeline_ort_detection_dynamic_fp32 + - *pipeline_trt_detection_dynamic_fp32 + - *pipeline_trt_detection_dynamic_fp16 + + - name: gliding_vertex + metafile: configs/gliding_vertex/metafile.yml + model_configs: + - configs/gliding_vertex/gliding_vertex_r50_fpn_1x_dota_le90.py + pipelines: + - *pipeline_trt_detection_dynamic_fp32 + - *pipeline_trt_detection_dynamic_fp16 + + - name: RoITransformer + metafile: configs/roi_trans/metafile.yml + model_configs: + - configs/roi_trans/roi_trans_r50_fpn_1x_dota_le90.py + pipelines: + - *pipeline_ort_detection_dynamic_fp32 + - *pipeline_trt_detection_dynamic_fp32 + - *pipeline_trt_detection_dynamic_fp16 diff --git a/tests/regression/mmseg.yml b/tests/regression/mmseg.yml index d048f3f7d..1fbeae39d 100644 --- a/tests/regression/mmseg.yml +++ b/tests/regression/mmseg.yml @@ -29,6 +29,11 @@ onnxruntime: sdk_config: *sdk_dynamic deploy_config: configs/mmseg/segmentation_onnxruntime_static-1024x2048.py + pipeline_ort_static_fp32_512x512: &pipeline_ort_static_fp32_512x512 + convert_image: *convert_image + backend_test: False + deploy_config: configs/mmseg/segmentation_onnxruntime_static-512x512.py + pipeline_ort_dynamic_fp32: &pipeline_ort_dynamic_fp32 convert_image: *convert_image deploy_config: configs/mmseg/segmentation_onnxruntime_dynamic.py @@ -40,6 +45,11 @@ tensorrt: sdk_config: *sdk_dynamic deploy_config: configs/mmseg/segmentation_tensorrt_static-1024x2048.py + pipeline_trt_static_fp32_512x512: &pipeline_trt_static_fp32_512x512 + convert_image: *convert_image + backend_test: False + deploy_config: configs/mmseg/segmentation_tensorrt_static-512x512.py + pipeline_trt_static_fp16: &pipeline_trt_static_fp16 convert_image: *convert_image backend_test: *default_backend_test @@ -52,6 +62,10 @@ tensorrt: sdk_config: *sdk_dynamic deploy_config: configs/mmseg/segmentation_tensorrt-int8_static-1024x2048.py + pipeline_trt_static_fp16_512x512: &pipeline_trt_static_fp16_512x512 + convert_image: *convert_image + backend_test: False + deploy_config: configs/mmseg/segmentation_tensorrt-fp16_static-512x512.py pipeline_trt_dynamic_fp32: &pipeline_trt_dynamic_fp32 convert_image: *convert_image @@ -82,6 +96,11 @@ openvino: backend_test: False deploy_config: configs/mmseg/segmentation_openvino_static-1024x2048.py + pipeline_openvino_static_fp32_512x512: &pipeline_openvino_static_fp32_512x512 + convert_image: *convert_image + backend_test: False + deploy_config: configs/mmseg/segmentation_openvino_static-512x512.py + ncnn: pipeline_ncnn_static_fp32: &pipeline_ncnn_static_fp32 convert_image: *convert_image @@ -338,3 +357,14 @@ models: pipelines: - *pipeline_ort_static_fp32 - *pipeline_trt_static_fp16 + - name: Segmenter + metafile: configs/segmenter/segmenter.yml + model_configs: + - configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py + - configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py + pipelines: + - *pipeline_ort_static_fp32_512x512 + - *pipeline_trt_static_fp32_512x512 + - *pipeline_openvino_static_fp32_512x512 + - *pipeline_ncnn_static_fp32 + - *pipeline_ts_fp32 diff --git a/tests/test_apis/test_onnx_passes.py b/tests/test_apis/test_onnx_passes.py index c7dc891c5..a2d77b446 100644 --- a/tests/test_apis/test_onnx_passes.py +++ b/tests/test_apis/test_onnx_passes.py @@ -30,7 +30,7 @@ def test_merge_shape_concate(): except ImportError: pytest.skip('pass not found.') - def _optimize_onnx(graph, params_dict, torch_out): + def _optimize_onnx(ctx, graph, params_dict, torch_out): opt_pass(graph) return graph, params_dict, torch_out @@ -82,7 +82,7 @@ def test_peephole(): except ImportError: pytest.skip('pass not found.') - def _optimize_onnx(graph, params_dict, torch_out): + def _optimize_onnx(ctx, graph, params_dict, torch_out): opt_pass(graph) return graph, params_dict, torch_out @@ -148,7 +148,7 @@ def test_flatten_cls_head(): except ImportError: pytest.skip('pass not found.') - def _optimize_onnx(graph, params_dict, torch_out): + def _optimize_onnx(ctx, graph, params_dict, torch_out): opt_pass(graph) return graph, params_dict, torch_out @@ -188,3 +188,101 @@ def test_flatten_cls_head(): node, idx = _find_next_node(idx + 1, nodes, 'Flatten') assert node is not None + + +def test_fuse_select_assign(): + pytest.importorskip('mmdeploy.backend.torchscript.ts_optimizer.onnx') + + try: + from mmdeploy.backend.torchscript import ts_optimizer + opt_pass = ts_optimizer.onnx._jit_pass_fuse_select_assign + except ImportError: + pytest.skip('pass not found.') + + def _optimize_onnx(ctx, graph, params_dict, torch_out): + opt_pass(graph, params_dict) + return graph, params_dict, torch_out + + class TestModel(torch.nn.Module): + + def __init__(self) -> None: + super().__init__() + + def forward(self, x): + z = x / 2 + y = torch.zeros_like(x) + y[x < 0.5] = z[x < 0.5] + return y + + model = TestModel() + x = torch.rand(1, 4, 8, 8) + + with RewriterContext({}, onnx_custom_passes=_optimize_onnx): + torch.onnx.export( + model, + x, + onnx_file, + input_names=['input'], + output_names=['output'], + dynamic_axes=dict(input={ + 2: 'h', + 3: 'w' + }), + opset_version=11) + + onnx_model = onnx.load(onnx_file) + graph = onnx_model.graph + nodes = graph.node + + node, _ = _find_next_node(0, nodes, 'Where') + assert node is not None + + +def test_common_subgraph_elimination(): + pytest.importorskip('mmdeploy.backend.torchscript.ts_optimizer.onnx') + + try: + from mmdeploy.backend.torchscript import ts_optimizer + opt_pass = ts_optimizer.onnx._jit_pass_common_subgraph_elimination + except ImportError: + pytest.skip('pass not found.') + + def _optimize_onnx(ctx, graph, params_dict, torch_out): + opt_pass(graph, params_dict) + return graph, params_dict, torch_out + + class TestModel(torch.nn.Module): + + def __init__(self) -> None: + super().__init__() + + def forward(self, x): + y = x.unsqueeze(1) + z = x.unsqueeze(1) + return y + z + + model = TestModel() + x = torch.rand(1, 2, 3) + + with RewriterContext({}, onnx_custom_passes=_optimize_onnx): + torch.onnx.export( + model, + x, + onnx_file, + input_names=['input'], + output_names=['output'], + dynamic_axes=dict(input={ + 1: 'h', + 2: 'w' + }), + opset_version=11) + + onnx_model = onnx.load(onnx_file) + graph = onnx_model.graph + nodes = graph.node + + unsqueeze_count = 0 + for n in nodes: + if n.op_type == 'Unsqueeze': + unsqueeze_count += 1 + assert unsqueeze_count == 1 diff --git a/tests/test_codebase/test_mmdet/test_mmdet_core.py b/tests/test_codebase/test_mmdet/test_mmdet_core.py index 892e7bfdf..4fd43a06c 100644 --- a/tests/test_codebase/test_mmdet/test_mmdet_core.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_core.py @@ -1,13 +1,17 @@ # Copyright (c) OpenMMLab. All rights reserved. +import tempfile + import mmcv import numpy as np import pytest import torch from mmdeploy.codebase import import_codebase +from mmdeploy.core.rewriters.rewriter_manager import RewriterContext from mmdeploy.utils import Backend, Codebase -from mmdeploy.utils.test import (WrapFunction, backend_checker, check_backend, - get_onnx_model, get_rewrite_outputs) +from mmdeploy.utils.test import (WrapFunction, WrapModel, backend_checker, + check_backend, get_onnx_model, + get_rewrite_outputs) import_codebase(Codebase.MMDET) @@ -146,12 +150,76 @@ def test_tblr2bbox(backend_type: Backend): assert rewrite_outputs is not None -def test_distance2bbox(): - from mmdeploy.codebase.mmdet.core import distance2bbox +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_distance2bbox(backend_type: Backend): + check_backend(backend_type) + deploy_cfg = mmcv.Config( + dict( + onnx_config=dict(output_names=None, input_shape=None), + backend_config=dict(type=backend_type.value, model_inputs=None), + codebase_config=dict(type='mmdet', task='ObjectDetection'))) + + # wrap function to enable rewrite + def distance2bbox(*args, **kwargs): + import mmdet.core.bbox.transforms + return mmdet.core.bbox.transforms.distance2bbox(*args, **kwargs) + points = torch.rand(3, 2) distance = torch.rand(3, 4) - bbox = distance2bbox(points, distance) - assert bbox.shape == torch.Size([3, 4]) + original_outputs = distance2bbox(points, distance) + + # wrap function to nn.Module, enable torch.onnx.export + wrapped_func = WrapFunction(distance2bbox) + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_func, + model_inputs={ + 'points': points, + 'distance': distance + }, + deploy_cfg=deploy_cfg) + + if is_backend_output: + model_output = original_outputs.squeeze().cpu().numpy() + rewrite_output = rewrite_outputs[0].squeeze() + assert np.allclose( + model_output, rewrite_output, rtol=1e-03, atol=1e-05) + else: + assert rewrite_outputs is not None + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test__distancepointbboxcoder__decode(backend_type: Backend): + check_backend(backend_type) + deploy_cfg = mmcv.Config( + dict( + onnx_config=dict(output_names=None, input_shape=None), + backend_config=dict(type=backend_type.value, model_inputs=None), + codebase_config=dict(type='mmdet', task='ObjectDetection'))) + from mmdet.core.bbox.coder import DistancePointBBoxCoder + coder = DistancePointBBoxCoder() + # wrap function to enable rewrite + + wrapped_model = WrapModel(coder, 'decode') + + points = torch.rand(3, 2) + pred_bboxes = torch.rand(3, 4) + original_outputs = coder.decode(points, pred_bboxes) + + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs={ + 'points': points, + 'pred_bboxes': pred_bboxes + }, + deploy_cfg=deploy_cfg) + + if is_backend_output: + model_output = original_outputs.squeeze().cpu().numpy() + rewrite_output = rewrite_outputs[0].squeeze() + assert np.allclose( + model_output, rewrite_output, rtol=1e-03, atol=1e-05) + else: + assert rewrite_outputs is not None @backend_checker(Backend.ONNXRUNTIME) @@ -223,3 +291,52 @@ def test_multiclass_nms_with_keep_top_k(pre_top_k): 'multiclass_nms returned more values than "keep_top_k"\n' \ f'dets.shape: {dets.shape}\n' \ f'keep_top_k: {keep_top_k}' + + +@backend_checker(Backend.TENSORRT) +def test__anchorgenerator__single_level_grid_priors(): + backend_type = 'tensorrt' + import onnx + from mmdet.core.anchor import AnchorGenerator + + from mmdeploy.apis.onnx import export + from mmdeploy.codebase.mmdet.core import anchor # noqa + + generator = AnchorGenerator( + scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4]) + + def single_level_grid_priors(input): + return generator.single_level_grid_priors(input.shape[2:], 0, + input.dtype, input.device) + + x = torch.rand(1, 3, 4, 4) + wrapped_func = WrapFunction(single_level_grid_priors) + output = wrapped_func(x) + + # test forward + with RewriterContext({}, backend_type): + wrap_output = wrapped_func(x) + torch.testing.assert_allclose(output, wrap_output) + + onnx_prefix = tempfile.NamedTemporaryFile().name + + export( + wrapped_func, + x, + onnx_prefix, + backend=backend_type, + input_names=['input'], + output_names=['output'], + dynamic_axes=dict(input={ + 2: 'h', + 3: 'w' + })) + + onnx_model = onnx.load(onnx_prefix + '.onnx') + + find_trt_grid_priors = False + for n in onnx_model.graph.node: + if n.op_type == 'GridPriorsTRT': + find_trt_grid_priors = True + + assert find_trt_grid_priors diff --git a/tests/test_codebase/test_mmdet/test_mmdet_models.py b/tests/test_codebase/test_mmdet/test_mmdet_models.py index def6a6283..45de806eb 100644 --- a/tests/test_codebase/test_mmdet/test_mmdet_models.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_models.py @@ -201,8 +201,8 @@ def get_gfl_head_model(): return model -def test_focus_forward_ncnn(): - backend_type = Backend.NCNN +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME, Backend.NCNN]) +def test_focus_forward(backend_type): check_backend(backend_type) focus_model = get_focus_backbone_model() focus_model.cpu().eval() @@ -222,11 +222,10 @@ def test_focus_forward_ncnn(): wrapped_model=wrapped_model, model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) - for model_output, rewrite_output in zip(model_outputs[0], - rewrite_outputs[0]): - model_output = model_output.squeeze().cpu().numpy() + for model_output, rewrite_output in zip(model_outputs[0], rewrite_outputs): + model_output = model_output.squeeze() rewrite_output = rewrite_output.squeeze() - assert np.allclose( + torch.testing.assert_allclose( model_output, rewrite_output, rtol=1e-03, atol=1e-05) @@ -1410,18 +1409,19 @@ def test_ssd_head_get_bboxes__ncnn(is_dynamic: bool): 'img_shape': (s, s, 3) }] output_names = ['output'] - input_names = ['input'] + input_names = [] + for i in range(6): + input_names.append('cls_scores_' + str(i)) + input_names.append('bbox_preds_' + str(i)) dynamic_axes = None if is_dynamic: dynamic_axes = { - input_names[0]: { - 2: 'height', - 3: 'width' - }, output_names[0]: { 1: 'num_dets', } } + for input_name in input_names: + dynamic_axes[input_name] = {2: 'height', 3: 'width'} deploy_cfg = mmcv.Config( dict( backend_config=dict(type=Backend.NCNN.value), @@ -1577,3 +1577,118 @@ def test_reppoints_head_points2bbox(backend_type: Backend, ir_type: str): wrapped_model=wrapped_model, model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) + + +@pytest.mark.skipif( + reason='Only support GPU test', condition=not torch.cuda.is_available()) +@pytest.mark.parametrize('backend_type', [(Backend.TENSORRT)]) +def test_windows_msa(backend_type: Backend): + check_backend(backend_type) + from mmdet.models.backbones.swin import WindowMSA + model = WindowMSA(96, 3, (7, 7)) + model.cuda().eval() + output_names = ['output'] + + deploy_cfg = mmcv.Config( + dict( + backend_config=dict( + type=backend_type.value, + common_config=dict(fp16_mode=True, max_workspace_size=1 << 20), + model_inputs=[ + dict( + input_shapes=dict( + x=dict( + min_shape=[12, 49, 96], + opt_shape=[12, 49, 96], + max_shape=[12, 49, 96]), + mask=dict( + min_shape=[12, 49, 49], + opt_shape=[12, 49, 49], + max_shape=[12, 49, 49]))) + ]), + onnx_config=dict( + input_shape=None, + input_names=['x', 'mask'], + output_names=output_names))) + + x = torch.randn([12, 49, 96]).cuda() + mask = torch.randn([12, 49, 49]).cuda() + wrapped_model = WrapModel(model, 'forward') + rewrite_inputs = {'x': x, 'mask': mask} + _ = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + + +@pytest.mark.skipif( + reason='Only support GPU test', condition=not torch.cuda.is_available()) +@pytest.mark.parametrize('backend_type', [(Backend.TENSORRT)]) +def test_shift_windows_msa(backend_type: Backend): + check_backend(backend_type) + from mmdet.models.backbones.swin import ShiftWindowMSA + model = ShiftWindowMSA(96, 3, 7) + model.cuda().eval() + output_names = ['output'] + + deploy_cfg = mmcv.Config( + dict( + backend_config=dict( + type=backend_type.value, + model_inputs=[ + dict( + input_shapes=dict( + query=dict( + min_shape=[1, 60800, 96], + opt_shape=[1, 60800, 96], + max_shape=[1, 60800, 96]))) + ]), + onnx_config=dict( + input_shape=None, + input_names=['query'], + output_names=output_names))) + + query = torch.randn([1, 60800, 96]).cuda() + hw_shape = (torch.tensor(200), torch.tensor(304)) + + wrapped_model = WrapModel(model, 'forward') + rewrite_inputs = {'query': query, 'hw_shape': hw_shape} + _ = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg, + run_with_backend=False) + + +@pytest.mark.skipif( + reason='Only support GPU test', condition=not torch.cuda.is_available()) +@pytest.mark.parametrize('backend_type', [(Backend.TENSORRT)]) +def test_mlvl_point_generator__single_level_grid_priors__tensorrt( + backend_type: Backend): + check_backend(backend_type) + from mmdet.core.anchor import MlvlPointGenerator + model = MlvlPointGenerator([8, 16, 32]) + output_names = ['output'] + + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict( + input_shape=None, + input_names=['query'], + output_names=output_names))) + + featmap_size = torch.Size([80, 80]) + with_stride = True + + wrapped_model = WrapModel(model, 'single_level_grid_priors') + rewrite_inputs = { + 'featmap_size': featmap_size, + 'with_stride': with_stride, + 'level_idx': 0 + } + _ = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg, + run_with_backend=False) diff --git a/tests/test_codebase/test_mmrotate/test_mmrotate_core.py b/tests/test_codebase/test_mmrotate/test_mmrotate_core.py index 487cbb627..3afb681a5 100644 --- a/tests/test_codebase/test_mmrotate/test_mmrotate_core.py +++ b/tests/test_codebase/test_mmrotate/test_mmrotate_core.py @@ -6,8 +6,9 @@ import torch from mmdeploy.codebase import import_codebase from mmdeploy.utils import Backend, Codebase -from mmdeploy.utils.test import (WrapFunction, backend_checker, check_backend, - get_onnx_model, get_rewrite_outputs) +from mmdeploy.utils.test import (WrapFunction, WrapModel, backend_checker, + check_backend, get_onnx_model, + get_rewrite_outputs) try: import_codebase(Codebase.MMROTATE) @@ -117,7 +118,7 @@ def test_multiclass_nms_rotated_with_keep_top_k(pre_top_k): model_inputs = {'boxes': test_boxes, 'scores': test_scores} import mmdeploy.backend.onnxruntime as ort_apis - backend_model = ort_apis.ORTWrapper(onnx_model_path, 'cuda:0', None) + backend_model = ort_apis.ORTWrapper(onnx_model_path, 'cpu', None) output = backend_model.forward(model_inputs) output = backend_model.output_to_list(output) dets = output[0] @@ -205,7 +206,7 @@ def test_delta_midpointoffset_rbbox_delta2bbox(backend_type: Backend): original_outputs = delta2bbox(rois, deltas, version='le90') # wrap function to nn.Module, enable torch.onnx.export - wrapped_func = WrapFunction(delta2bbox) + wrapped_func = WrapFunction(delta2bbox, version='le90') rewrite_outputs, is_backend_output = get_rewrite_outputs( wrapped_func, model_inputs={ @@ -270,3 +271,141 @@ def test_fake_multiclass_nms_rotated(): assert rewrite_outputs is not None, 'Got unexpected rewrite '\ 'outputs: {}'.format(rewrite_outputs) + + +@pytest.mark.parametrize('backend_type', [Backend.TENSORRT]) +def test_poly2obb_le90(backend_type: Backend): + check_backend(backend_type) + polys = torch.rand(1, 10, 8) + deploy_cfg = mmcv.Config( + dict( + onnx_config=dict(output_names=None, input_shape=None), + backend_config=dict( + type=backend_type.value, + model_inputs=[ + dict( + input_shapes=dict( + polys=dict( + min_shape=polys.shape, + opt_shape=polys.shape, + max_shape=polys.shape))) + ]), + codebase_config=dict(type='mmrotate', task='RotatedDetection'))) + + # import rewriter + from mmdeploy.codebase import Codebase, import_codebase + import_codebase(Codebase.MMROTATE) + + # wrap function to enable rewrite + def poly2obb_le90(*args, **kwargs): + import mmrotate + return mmrotate.core.bbox.transforms.poly2obb_le90(*args, **kwargs) + + # wrap function to nn.Module, enable torch.onnx.export + wrapped_func = WrapFunction(poly2obb_le90) + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_func, + model_inputs={'polys': polys}, + deploy_cfg=deploy_cfg, + run_with_backend=False) + + assert rewrite_outputs is not None + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_poly2obb_le135(backend_type: Backend): + check_backend(backend_type) + polys = torch.rand(1, 10, 8) + deploy_cfg = mmcv.Config( + dict( + onnx_config=dict(output_names=None, input_shape=None), + backend_config=dict( + type=backend_type.value, + model_inputs=[ + dict( + input_shapes=dict( + polys=dict( + min_shape=polys.shape, + opt_shape=polys.shape, + max_shape=polys.shape))) + ]), + codebase_config=dict(type='mmrotate', task='RotatedDetection'))) + + # wrap function to enable rewrite + def poly2obb_le135(*args, **kwargs): + import mmrotate + return mmrotate.core.bbox.transforms.poly2obb_le135(*args, **kwargs) + + # wrap function to nn.Module, enable torch.onnx.export + wrapped_func = WrapFunction(poly2obb_le135) + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_func, + model_inputs={'polys': polys}, + deploy_cfg=deploy_cfg, + run_with_backend=False) + + assert rewrite_outputs is not None + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_obb2poly_le135(backend_type: Backend): + check_backend(backend_type) + rboxes = torch.rand(1, 10, 5) + deploy_cfg = mmcv.Config( + dict( + onnx_config=dict(output_names=None, input_shape=None), + backend_config=dict( + type=backend_type.value, + model_inputs=[ + dict( + input_shapes=dict( + rboxes=dict( + min_shape=rboxes.shape, + opt_shape=rboxes.shape, + max_shape=rboxes.shape))) + ]), + codebase_config=dict(type='mmrotate', task='RotatedDetection'))) + + # wrap function to enable rewrite + def obb2poly_le135(*args, **kwargs): + import mmrotate + return mmrotate.core.bbox.transforms.obb2poly_le135(*args, **kwargs) + + # wrap function to nn.Module, enable torch.onnx.export + wrapped_func = WrapFunction(obb2poly_le135) + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_func, + model_inputs={'rboxes': rboxes}, + deploy_cfg=deploy_cfg, + run_with_backend=False) + + assert rewrite_outputs is not None + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_gvfixcoder__decode(backend_type: Backend): + check_backend(backend_type) + + deploy_cfg = mmcv.Config( + dict( + onnx_config=dict(output_names=['output'], input_shape=None), + backend_config=dict(type=backend_type.value), + codebase_config=dict(type='mmrotate', task='RotatedDetection'))) + + from mmrotate.core.bbox import GVFixCoder + coder = GVFixCoder(angle_range='le90') + + hbboxes = torch.rand(1, 10, 4) + fix_deltas = torch.rand(1, 10, 4) + + wrapped_model = WrapModel(coder, 'decode') + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model, + model_inputs={ + 'hbboxes': hbboxes, + 'fix_deltas': fix_deltas + }, + deploy_cfg=deploy_cfg, + run_with_backend=False) + + assert rewrite_outputs is not None diff --git a/tests/test_codebase/test_mmrotate/test_mmrotate_models.py b/tests/test_codebase/test_mmrotate/test_mmrotate_models.py index 656a2b4e2..491832655 100644 --- a/tests/test_codebase/test_mmrotate/test_mmrotate_models.py +++ b/tests/test_codebase/test_mmrotate/test_mmrotate_models.py @@ -332,3 +332,354 @@ def test_get_bboxes_of_oriented_rpn_head(backend_type: Backend): model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) assert rewrite_outputs is not None + + +def get_rotated_rpn_head_model(): + """Oriented RPN Head Config.""" + test_cfg = mmcv.Config( + dict( + nms_pre=2000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(iou_thr=0.1), + max_per_img=2000)) + from mmrotate.models.dense_heads import RotatedRPNHead + model = RotatedRPNHead( + version='le90', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + test_cfg=test_cfg) + + model.requires_grad_(False) + return model + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_get_bboxes_of_rotated_rpn_head(backend_type: Backend): + check_backend(backend_type) + head = get_rotated_rpn_head_model() + head.cpu().eval() + s = 128 + img_metas = [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] + + output_names = ['dets', 'labels'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmrotate', + task='RotatedDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.1, + pre_top_k=2000, + keep_top_k=2000)))) + + # the cls_score's size: (1, 3, 32, 32), (1, 3, 16, 16), + # (1, 3, 8, 8), (1, 3, 4, 4), (1, 3, 2, 2). + # the bboxes's size: (1, 18, 32, 32), (1, 18, 16, 16), + # (1, 18, 8, 8), (1, 18, 4, 4), (1, 18, 2, 2) + seed_everything(1234) + cls_score = [ + torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(5, 0, -1) + ] + seed_everything(5678) + bboxes = [torch.rand(1, 18, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + + # to get outputs of onnx model after rewrite + img_metas[0]['img_shape'] = torch.Tensor([s, s]) + wrapped_model = WrapModel( + head, 'get_bboxes', img_metas=img_metas, with_nms=True) + rewrite_inputs = { + 'cls_scores': cls_score, + 'bbox_preds': bboxes, + } + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + assert rewrite_outputs is not None + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_rotate_standard_roi_head__simple_test(backend_type: Backend): + check_backend(backend_type) + from mmrotate.models.roi_heads import OrientedStandardRoIHead + output_names = ['dets', 'labels'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmrotate', + task='RotatedDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.1, + pre_top_k=2000, + keep_top_k=2000)))) + angle_version = 'le90' + test_cfg = mmcv.Config( + dict( + nms_pre=2000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(iou_thr=0.1), + max_per_img=2000)) + head = OrientedStandardRoIHead( + bbox_roi_extractor=dict( + type='RotatedSingleRoIExtractor', + roi_layer=dict( + type='RoIAlignRotated', + out_size=7, + sample_num=2, + clockwise=True), + out_channels=3, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='RotatedShared2FCBBoxHead', + in_channels=3, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=15, + bbox_coder=dict( + type='DeltaXYWHAOBBoxCoder', + angle_range=angle_version, + norm_factor=None, + edge_swap=True, + proj_xy=True, + target_means=(.0, .0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.2, 0.2, 0.1)), + reg_class_agnostic=True), + test_cfg=test_cfg) + head.cpu().eval() + + seed_everything(1234) + x = [torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(4, 0, -1)] + proposals = [torch.rand(1, 100, 6), torch.randint(0, 10, (1, 100))] + img_metas = [{'img_shape': torch.tensor([224, 224])}] + + wrapped_model = WrapModel( + head, 'simple_test', proposals=proposals, img_metas=img_metas) + rewrite_inputs = {'x': x} + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + assert rewrite_outputs is not None + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_gv_ratio_roi_head__simple_test(backend_type: Backend): + check_backend(backend_type) + from mmrotate.models.roi_heads import GVRatioRoIHead + output_names = ['dets', 'labels'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmrotate', + task='RotatedDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.1, + pre_top_k=2000, + keep_top_k=2000, + max_output_boxes_per_class=1000)))) + angle_version = 'le90' + test_cfg = mmcv.Config( + dict( + nms_pre=2000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(iou_thr=0.1), + max_per_img=2000)) + head = GVRatioRoIHead( + version=angle_version, + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=3, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='GVBBoxHead', + version=angle_version, + num_shared_fcs=2, + in_channels=3, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=15, + ratio_thr=0.8, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=(.0, .0, .0, .0), + target_stds=(0.1, 0.1, 0.2, 0.2)), + fix_coder=dict(type='GVFixCoder', angle_range=angle_version), + ratio_coder=dict(type='GVRatioCoder', angle_range=angle_version), + reg_class_agnostic=True), + test_cfg=test_cfg) + head.cpu().eval() + + seed_everything(1234) + x = [torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(4, 0, -1)] + bboxes = torch.rand(1, 100, 2) + bboxes = torch.cat( + [bboxes, bboxes + torch.rand(1, 100, 2) + torch.rand(1, 100, 1)], + dim=-1) + proposals = [bboxes, torch.randint(0, 10, (1, 100))] + img_metas = [{'img_shape': torch.tensor([224, 224])}] + + wrapped_model = WrapModel( + head, 'simple_test', proposals=proposals, img_metas=img_metas) + rewrite_inputs = {'x': x} + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + assert rewrite_outputs is not None + + +def get_roi_trans_roi_head_model(): + """Oriented RPN Head Config.""" + angle_version = 'le90' + + num_stages = 2 + stage_loss_weights = [1, 1] + version = angle_version + bbox_roi_extractor = [ + dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=64, + featmap_strides=[4, 8, 16, 32]), + dict( + type='RotatedSingleRoIExtractor', + roi_layer=dict( + type='RoIAlignRotated', + out_size=7, + sample_num=2, + clockwise=True), + out_channels=64, + featmap_strides=[4, 8, 16, 32]), + ] + + bbox_head = [ + dict( + type='RotatedShared2FCBBoxHead', + in_channels=64, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=15, + bbox_coder=dict( + type='DeltaXYWHAHBBoxCoder', + angle_range=angle_version, + norm_factor=2, + edge_swap=True, + target_means=[0., 0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2, 1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), + dict( + type='RotatedShared2FCBBoxHead', + in_channels=64, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=15, + bbox_coder=dict( + type='DeltaXYWHAOBBoxCoder', + angle_range=angle_version, + norm_factor=None, + edge_swap=True, + proj_xy=True, + target_means=[0., 0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1, 0.5]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ] + test_cfg = mmcv.Config( + dict( + nms_pre=2000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(iou_thr=0.1), + max_per_img=2000)) + + args = [num_stages, stage_loss_weights, bbox_roi_extractor, bbox_head] + kwargs = {'version': version, 'test_cfg': test_cfg} + + from mmrotate.models.roi_heads import RoITransRoIHead + model = RoITransRoIHead(*args, **kwargs).eval() + return model + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_simple_test_of_roi_trans_roi_head(backend_type: Backend): + check_backend(backend_type) + + roi_head = get_roi_trans_roi_head_model() + roi_head.cpu() + + seed_everything(1234) + x = [ + torch.rand((1, 64, 32, 32)), + torch.rand((1, 64, 16, 16)), + torch.rand((1, 64, 8, 8)), + torch.rand((1, 64, 4, 4)), + ] + proposals = torch.tensor([[[58.8285, 52.1405, 188.2484, 141.5644, 0.5]]]) + labels = torch.tensor([[[0.]]]) + s = 256 + img_metas = [{ + 'img_shape': torch.tensor([s, s]), + 'ori_shape': torch.tensor([s, s]), + 'scale_factor': torch.tensor([1, 1, 1, 1]) + }] + + model_inputs = { + 'x': x, + } + + output_names = ['det_bboxes', 'det_labels'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmrotate', + task='RotatedDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.1, + pre_top_k=2000, + keep_top_k=2000)))) + + wrapped_model = WrapModel( + roi_head, + 'simple_test', + proposal_list=[proposals, labels], + img_metas=img_metas) + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=model_inputs, + deploy_cfg=deploy_cfg) + + assert rewrite_outputs is not None diff --git a/tests/test_csrc/CMakeLists.txt b/tests/test_csrc/CMakeLists.txt index 8f0398709..ad7934e0d 100644 --- a/tests/test_csrc/CMakeLists.txt +++ b/tests/test_csrc/CMakeLists.txt @@ -65,9 +65,13 @@ target_include_directories(mmdeploy_tests PRIVATE ${CMAKE_SOURCE_DIR}/third_party/catch2) target_include_directories(mmdeploy_tests PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) +if (NOT (MMDEPLOY_SHARED_LIBS OR MSVC)) + target_compile_options(mmdeploy_tests PRIVATE $<$:-fvisibility=hidden>) +endif () + mmdeploy_load_static(mmdeploy_tests MMDeployStaticModules) mmdeploy_load_dynamic(mmdeploy_tests MMDeployDynamicModules) target_link_libraries(mmdeploy_tests PRIVATE - MMDeployLibs - mmdeploy_transform - mmdeploy_opencv_utils) + MMDeployLibs + mmdeploy_transform + mmdeploy_opencv_utils) diff --git a/tests/test_csrc/capi/test_classifier.cpp b/tests/test_csrc/capi/test_classifier.cpp index a490bd224..602012849 100644 --- a/tests/test_csrc/capi/test_classifier.cpp +++ b/tests/test_csrc/capi/test_classifier.cpp @@ -4,34 +4,36 @@ #include "catch.hpp" // clang-format on -#include "mmdeploy/apis/c/classifier.h" +#include "mmdeploy/apis/c/mmdeploy/classifier.h" #include "mmdeploy/core/logger.h" #include "opencv2/opencv.hpp" #include "test_resource.h" using namespace std; -TEST_CASE("test classifier's c api", "[classifier]") { +TEST_CASE("test classifier's c api", "[.classifier][resource]") { auto test = [](const std::string& device_name, const std::string& model_path, const std::vector& img_list) { - mm_handle_t handle{nullptr}; + mmdeploy_classifier_t classifier{nullptr}; auto ret = - mmdeploy_classifier_create_by_path(model_path.c_str(), device_name.c_str(), 0, &handle); - REQUIRE(ret == MM_SUCCESS); + mmdeploy_classifier_create_by_path(model_path.c_str(), device_name.c_str(), 0, &classifier); + REQUIRE(ret == MMDEPLOY_SUCCESS); vector cv_mats; - vector mats; + vector mats; for (auto& img_path : img_list) { cv::Mat mat = cv::imread(img_path); REQUIRE(!mat.empty()); cv_mats.push_back(mat); - mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MM_BGR, MM_INT8}); + mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MMDEPLOY_PIXEL_FORMAT_BGR, + MMDEPLOY_DATA_TYPE_UINT8}); } - mm_class_t* results{nullptr}; + mmdeploy_classification_t* results{nullptr}; int* result_count{nullptr}; - ret = mmdeploy_classifier_apply(handle, mats.data(), (int)mats.size(), &results, &result_count); - REQUIRE(ret == MM_SUCCESS); + ret = mmdeploy_classifier_apply(classifier, mats.data(), (int)mats.size(), &results, + &result_count); + REQUIRE(ret == MMDEPLOY_SUCCESS); auto result_ptr = results; MMDEPLOY_INFO("model_path: {}", model_path); for (auto i = 0; i < (int)mats.size(); ++i) { @@ -42,7 +44,7 @@ TEST_CASE("test classifier's c api", "[classifier]") { } mmdeploy_classifier_release_result(results, result_count, (int)mats.size()); - mmdeploy_classifier_destroy(handle); + mmdeploy_classifier_destroy(classifier); }; auto gResources = MMDeployTestResources::Get(); diff --git a/tests/test_csrc/capi/test_detector.cpp b/tests/test_csrc/capi/test_detector.cpp index ae3b8aa46..0fdb1252d 100644 --- a/tests/test_csrc/capi/test_detector.cpp +++ b/tests/test_csrc/capi/test_detector.cpp @@ -4,33 +4,34 @@ #include "catch.hpp" // clang-format on -#include "mmdeploy/apis/c/detector.h" +#include "mmdeploy/apis/c/mmdeploy/detector.h" #include "mmdeploy/core/logger.h" #include "mmdeploy/core/utils/formatter.h" #include "opencv2/opencv.hpp" #include "test_resource.h" using namespace std; -TEST_CASE("test detector's c api", "[detector]") { +TEST_CASE("test detector's c api", "[.detector][resource]") { MMDEPLOY_INFO("test detector"); auto test = [](const string &device, const string &model_path, const vector &img_list) { - mm_handle_t handle{nullptr}; - auto ret = mmdeploy_detector_create_by_path(model_path.c_str(), device.c_str(), 0, &handle); - REQUIRE(ret == MM_SUCCESS); + mmdeploy_detector_t detector{nullptr}; + auto ret = mmdeploy_detector_create_by_path(model_path.c_str(), device.c_str(), 0, &detector); + REQUIRE(ret == MMDEPLOY_SUCCESS); vector cv_mats; - vector mats; + vector mats; for (auto &img_path : img_list) { cv::Mat mat = cv::imread(img_path); REQUIRE(!mat.empty()); cv_mats.push_back(mat); - mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MM_BGR, MM_INT8}); + mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MMDEPLOY_PIXEL_FORMAT_BGR, + MMDEPLOY_DATA_TYPE_UINT8}); } - mm_detect_t *results{nullptr}; + mmdeploy_detection_t *results{nullptr}; int *result_count{nullptr}; - ret = mmdeploy_detector_apply(handle, mats.data(), (int)mats.size(), &results, &result_count); - REQUIRE(ret == MM_SUCCESS); + ret = mmdeploy_detector_apply(detector, mats.data(), (int)mats.size(), &results, &result_count); + REQUIRE(ret == MMDEPLOY_SUCCESS); auto result_ptr = results; for (auto i = 0; i < mats.size(); ++i) { MMDEPLOY_INFO("the '{}-th' image has '{}' objects", i, result_count[i]); @@ -41,7 +42,7 @@ TEST_CASE("test detector's c api", "[detector]") { } } mmdeploy_detector_release_result(results, result_count, (int)mats.size()); - mmdeploy_detector_destroy(handle); + mmdeploy_detector_destroy(detector); }; MMDEPLOY_INFO("get test resources"); auto &gResources = MMDeployTestResources::Get(); diff --git a/tests/test_csrc/capi/test_model.cpp b/tests/test_csrc/capi/test_model.cpp index 34d952580..6c2aaa1e1 100644 --- a/tests/test_csrc/capi/test_model.cpp +++ b/tests/test_csrc/capi/test_model.cpp @@ -4,10 +4,10 @@ #include "catch.hpp" // clang-format on -#include "mmdeploy/apis/c/model.h" +#include "mmdeploy/apis/c/mmdeploy/model.h" #include "test_resource.h" -TEST_CASE("test model c capi", "[model]") { +TEST_CASE("test model c capi", "[.model][resource]") { auto &gResource = MMDeployTestResources::Get(); std::string model_path; for (auto const &codebase : gResource.codebases()) { @@ -21,11 +21,11 @@ TEST_CASE("test model c capi", "[model]") { } REQUIRE(!model_path.empty()); - mm_model_t model{}; - REQUIRE(mmdeploy_model_create_by_path(model_path.c_str(), &model) == MM_SUCCESS); + mmdeploy_model_t model{}; + REQUIRE(mmdeploy_model_create_by_path(model_path.c_str(), &model) == MMDEPLOY_SUCCESS); mmdeploy_model_destroy(model); model = nullptr; - REQUIRE(mmdeploy_model_create(nullptr, 0, &model) == MM_E_FAIL); + REQUIRE(mmdeploy_model_create(nullptr, 0, &model) == MMDEPLOY_E_FAIL); mmdeploy_model_destroy(model); } diff --git a/tests/test_csrc/capi/test_restorer.cpp b/tests/test_csrc/capi/test_restorer.cpp index 04670c1c6..bade09941 100644 --- a/tests/test_csrc/capi/test_restorer.cpp +++ b/tests/test_csrc/capi/test_restorer.cpp @@ -4,30 +4,31 @@ #include "catch.hpp" // clang-format on -#include "mmdeploy/apis/c/restorer.h" +#include "mmdeploy/apis/c/mmdeploy/restorer.h" #include "opencv2/opencv.hpp" #include "test_resource.h" using namespace std; -TEST_CASE("test restorer's c api", "[restorer]") { +TEST_CASE("test restorer's c api", "[.restorer][resource]") { auto test = [](const string &device, const string &backend, const string &model_path, const vector &img_list) { - mm_handle_t handle{nullptr}; - auto ret = mmdeploy_restorer_create_by_path(model_path.c_str(), device.c_str(), 0, &handle); - REQUIRE(ret == MM_SUCCESS); + mmdeploy_restorer_t restorer{nullptr}; + auto ret = mmdeploy_restorer_create_by_path(model_path.c_str(), device.c_str(), 0, &restorer); + REQUIRE(ret == MMDEPLOY_SUCCESS); vector cv_mats; - vector mats; + vector mats; for (auto &img_path : img_list) { cv::Mat mat = cv::imread(img_path); REQUIRE(!mat.empty()); cv_mats.push_back(mat); - mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MM_BGR, MM_INT8}); + mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MMDEPLOY_PIXEL_FORMAT_BGR, + MMDEPLOY_DATA_TYPE_UINT8}); } - mm_mat_t *res{}; - ret = mmdeploy_restorer_apply(handle, mats.data(), (int)mats.size(), &res); - REQUIRE(ret == MM_SUCCESS); + mmdeploy_mat_t *res{}; + ret = mmdeploy_restorer_apply(restorer, mats.data(), (int)mats.size(), &res); + REQUIRE(ret == MMDEPLOY_SUCCESS); for (auto i = 0; i < cv_mats.size(); ++i) { cv::Mat out(res[i].height, res[i].width, CV_8UC3, res[i].data); @@ -36,7 +37,7 @@ TEST_CASE("test restorer's c api", "[restorer]") { } mmdeploy_restorer_release_result(res, (int)mats.size()); - mmdeploy_restorer_destroy(handle); + mmdeploy_restorer_destroy(restorer); }; auto gResources = MMDeployTestResources::Get(); diff --git a/tests/test_csrc/capi/test_segmentor.cpp b/tests/test_csrc/capi/test_segmentor.cpp index cfaaf4a4f..ef9078aae 100644 --- a/tests/test_csrc/capi/test_segmentor.cpp +++ b/tests/test_csrc/capi/test_segmentor.cpp @@ -4,32 +4,33 @@ #include "catch.hpp" // clang-format on -#include "mmdeploy/apis/c/segmentor.h" +#include "mmdeploy/apis/c/mmdeploy/segmentor.h" #include "opencv2/opencv.hpp" #include "test_resource.h" using namespace std; -TEST_CASE("test segmentor's c api", "[segmentor]") { +TEST_CASE("test segmentor's c api", "[.segmentor][resource]") { auto test = [](const string &device, const string &backend, const string &model_path, const vector &img_list) { - mm_handle_t handle{nullptr}; - auto ret = mmdeploy_segmentor_create_by_path(model_path.c_str(), device.c_str(), 0, &handle); - REQUIRE(ret == MM_SUCCESS); + mmdeploy_segmentor_t segmentor{nullptr}; + auto ret = mmdeploy_segmentor_create_by_path(model_path.c_str(), device.c_str(), 0, &segmentor); + REQUIRE(ret == MMDEPLOY_SUCCESS); vector cv_mats; - vector mats; + vector mats; for (auto &img_path : img_list) { cv::Mat mat = cv::imread(img_path); REQUIRE(!mat.empty()); cv_mats.push_back(mat); - mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MM_BGR, MM_INT8}); + mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MMDEPLOY_PIXEL_FORMAT_BGR, + MMDEPLOY_DATA_TYPE_UINT8}); } - mm_segment_t *results{nullptr}; + mmdeploy_segmentation_t *results{nullptr}; int count = 0; - ret = mmdeploy_segmentor_apply(handle, mats.data(), (int)mats.size(), &results); - REQUIRE(ret == MM_SUCCESS); + ret = mmdeploy_segmentor_apply(segmentor, mats.data(), (int)mats.size(), &results); + REQUIRE(ret == MMDEPLOY_SUCCESS); REQUIRE(results != nullptr); auto result_ptr = results; @@ -39,7 +40,7 @@ TEST_CASE("test segmentor's c api", "[segmentor]") { } mmdeploy_segmentor_release_result(results, (int)mats.size()); - mmdeploy_segmentor_destroy(handle); + mmdeploy_segmentor_destroy(segmentor); }; auto gResources = MMDeployTestResources::Get(); diff --git a/tests/test_csrc/capi/test_text_detector.cpp b/tests/test_csrc/capi/test_text_detector.cpp index a53995432..95e1ae493 100644 --- a/tests/test_csrc/capi/test_text_detector.cpp +++ b/tests/test_csrc/capi/test_text_detector.cpp @@ -3,34 +3,35 @@ #include "catch.hpp" // clang-format on -#include "mmdeploy/apis/c/text_detector.h" +#include "mmdeploy/apis/c/mmdeploy/text_detector.h" #include "mmdeploy/core/logger.h" #include "opencv2/opencv.hpp" #include "test_resource.h" using namespace std; -TEST_CASE("test text detector's c api", "[text-detector]") { +TEST_CASE("test text detector's c api", "[.text-detector][resource]") { auto test = [](const string& device, const string& model_path, const vector& img_list) { - mm_handle_t handle{nullptr}; + mmdeploy_text_detector_t detector{nullptr}; auto ret = - mmdeploy_text_detector_create_by_path(model_path.c_str(), device.c_str(), 0, &handle); - REQUIRE(ret == MM_SUCCESS); + mmdeploy_text_detector_create_by_path(model_path.c_str(), device.c_str(), 0, &detector); + REQUIRE(ret == MMDEPLOY_SUCCESS); vector cv_mats; - vector mats; + vector mats; for (auto& img_path : img_list) { cv::Mat mat = cv::imread(img_path); REQUIRE(!mat.empty()); cv_mats.push_back(mat); - mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MM_BGR, MM_INT8}); + mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MMDEPLOY_PIXEL_FORMAT_BGR, + MMDEPLOY_DATA_TYPE_UINT8}); } - mm_text_detect_t* results{nullptr}; + mmdeploy_text_detection_t* results{nullptr}; int* result_count{nullptr}; - ret = mmdeploy_text_detector_apply(handle, mats.data(), (int)mats.size(), &results, + ret = mmdeploy_text_detector_apply(detector, mats.data(), (int)mats.size(), &results, &result_count); - REQUIRE(ret == MM_SUCCESS); + REQUIRE(ret == MMDEPLOY_SUCCESS); auto result_ptr = results; for (auto i = 0; i < mats.size(); ++i) { @@ -45,7 +46,7 @@ TEST_CASE("test text detector's c api", "[text-detector]") { } mmdeploy_text_detector_release_result(results, result_count, (int)mats.size()); - mmdeploy_text_detector_destroy(handle); + mmdeploy_text_detector_destroy(detector); }; auto& gResources = MMDeployTestResources::Get(); diff --git a/tests/test_csrc/capi/test_text_recognizer.cpp b/tests/test_csrc/capi/test_text_recognizer.cpp index 98d07189e..d7326848b 100644 --- a/tests/test_csrc/capi/test_text_recognizer.cpp +++ b/tests/test_csrc/capi/test_text_recognizer.cpp @@ -4,7 +4,7 @@ #include "catch.hpp" // clang-format on -#include "mmdeploy/apis/c/text_recognizer.h" +#include "mmdeploy/apis/c/mmdeploy/text_recognizer.h" #include "mmdeploy/core/logger.h" #include "mmdeploy/core/utils/formatter.h" #include "opencv2/opencv.hpp" @@ -12,26 +12,27 @@ using namespace std; -TEST_CASE("test text recognizer's c api", "[text-recognizer]") { +TEST_CASE("test text recognizer's c api", "[.text-recognizer][resource]") { auto test = [](const string& device, const string& model_path, const vector& img_list) { - mm_handle_t handle{nullptr}; + mmdeploy_text_recognizer_t recognizer{nullptr}; auto ret = - mmdeploy_text_recognizer_create_by_path(model_path.c_str(), device.c_str(), 0, &handle); - REQUIRE(ret == MM_SUCCESS); + mmdeploy_text_recognizer_create_by_path(model_path.c_str(), device.c_str(), 0, &recognizer); + REQUIRE(ret == MMDEPLOY_SUCCESS); vector cv_mats; - vector mats; + vector mats; for (auto& img_path : img_list) { cv::Mat mat = cv::imread(img_path); REQUIRE(!mat.empty()); cv_mats.push_back(mat); - mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MM_BGR, MM_INT8}); + mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MMDEPLOY_PIXEL_FORMAT_BGR, + MMDEPLOY_DATA_TYPE_UINT8}); } - mm_text_recognize_t* results{}; - ret = mmdeploy_text_recognizer_apply_bbox(handle, mats.data(), (int)mats.size(), nullptr, + mmdeploy_text_recognition_t* results{}; + ret = mmdeploy_text_recognizer_apply_bbox(recognizer, mats.data(), (int)mats.size(), nullptr, nullptr, &results); - REQUIRE(ret == MM_SUCCESS); + REQUIRE(ret == MMDEPLOY_SUCCESS); for (auto i = 0; i < mats.size(); ++i) { std::vector score(results[i].score, results[i].score + results[i].length); @@ -39,7 +40,7 @@ TEST_CASE("test text recognizer's c api", "[text-recognizer]") { } mmdeploy_text_recognizer_release_result(results, (int)mats.size()); - mmdeploy_text_recognizer_destroy(handle); + mmdeploy_text_recognizer_destroy(recognizer); }; auto& gResources = MMDeployTestResources::Get(); @@ -59,34 +60,35 @@ TEST_CASE("test text recognizer's c api", "[text-recognizer]") { } } -TEST_CASE("test text detector-recognizer combo", "[text-detector-recognizer]") { +TEST_CASE("test text detector-recognizer combo", "[.text-detector-recognizer]") { auto test = [](const std::string& device, const string& det_model_path, const string& reg_model_path, std::vector& img_list) { - mm_handle_t detector{}; + mmdeploy_text_detector_t detector{}; REQUIRE(mmdeploy_text_detector_create_by_path(det_model_path.c_str(), device.c_str(), 0, - &detector) == MM_SUCCESS); - mm_handle_t recognizer{}; + &detector) == MMDEPLOY_SUCCESS); + mmdeploy_text_recognizer_t recognizer{}; REQUIRE(mmdeploy_text_recognizer_create_by_path(reg_model_path.c_str(), device.c_str(), 0, - &recognizer) == MM_SUCCESS); + &recognizer) == MMDEPLOY_SUCCESS); vector cv_mats; - vector mats; + vector mats; for (const auto& img_path : img_list) { cv::Mat mat = cv::imread(img_path); REQUIRE(!mat.empty()); cv_mats.push_back(mat); - mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MM_BGR, MM_INT8}); + mats.push_back({mat.data, mat.rows, mat.cols, mat.channels(), MMDEPLOY_PIXEL_FORMAT_BGR, + MMDEPLOY_DATA_TYPE_UINT8}); } - mm_text_detect_t* bboxes{}; + mmdeploy_text_detection_t* bboxes{}; int* bbox_count{}; REQUIRE(mmdeploy_text_detector_apply(detector, mats.data(), mats.size(), &bboxes, - &bbox_count) == MM_SUCCESS); + &bbox_count) == MMDEPLOY_SUCCESS); - mm_text_recognize_t* texts{}; + mmdeploy_text_recognition_t* texts{}; REQUIRE(mmdeploy_text_recognizer_apply_bbox(recognizer, mats.data(), (int)mats.size(), bboxes, - bbox_count, &texts) == MM_SUCCESS); + bbox_count, &texts) == MMDEPLOY_SUCCESS); int offset = 0; for (auto i = 0; i < mats.size(); ++i) { diff --git a/tests/test_csrc/core/test_execution.cpp b/tests/test_csrc/core/test_execution.cpp index 979f06225..5704883cb 100644 --- a/tests/test_csrc/core/test_execution.cpp +++ b/tests/test_csrc/core/test_execution.cpp @@ -4,7 +4,7 @@ #include #include "catch.hpp" -#include "mmdeploy/apis/c/executor.h" +#include "mmdeploy/apis/c/mmdeploy/executor.h" #include "mmdeploy/core/utils/formatter.h" #include "mmdeploy/core/value.h" #include "mmdeploy/execution/expand.h" @@ -30,7 +30,7 @@ TEST_CASE("test basic execution", "[execution]") { static_assert(std::is_same_v); auto c = Then(b, [](Value v) -> Value { return {{"c", v["a"].get() + v["b"].get()}}; }); auto d = SyncWait(c); - MMDEPLOY_ERROR("{}", d); + MMDEPLOY_INFO("{}", d); } template @@ -45,7 +45,7 @@ TEST_CASE("test split", "[execution]") { auto y = GetKey(s, "y"); auto x_v = SyncWait(x); auto y_v = SyncWait(y); - MMDEPLOY_ERROR("x = {}, y = {}", x_v, y_v); + MMDEPLOY_INFO("x = {}, y = {}", x_v, y_v); } TEST_CASE("test when_all", "[execution]") { @@ -56,7 +56,7 @@ TEST_CASE("test when_all", "[execution]") { auto e = Just(500); auto t = WhenAll(a, b, c, d, e); auto v = SyncWait(t); - MMDEPLOY_ERROR("v = {}", v); + MMDEPLOY_INFO("v = {}", v); } void Func() { @@ -65,7 +65,7 @@ void Func() { LetValue(a, [](int& x, int& y) { return Then(Just(x + y), [](int v) { return v * v; }); }); auto v = SyncWait(b); static_assert(std::is_same_v>); - MMDEPLOY_ERROR("v = {}", v); + MMDEPLOY_INFO("v = {}", v); } TEST_CASE("test let_value", "[execution]") { Func(); } @@ -78,7 +78,7 @@ TEST_CASE("test fork-join", "[execution]") { auto xy = WhenAll(x, y); auto v = SyncWait(xy); static_assert(std::is_same_v>); - MMDEPLOY_ERROR("v = {}", v); + MMDEPLOY_INFO("v = {}", v); } TEST_CASE("test ensure_started", "[execution]") { @@ -97,7 +97,7 @@ TEST_CASE("test ensure_started", "[execution]") { std::this_thread::sleep_for(std::chrono::milliseconds(500)); MMDEPLOY_INFO("ensure_started sync_wait"); auto v = SyncWait(c); - MMDEPLOY_ERROR("ensure_started: {}", v); + MMDEPLOY_INFO("ensure_started: {}", v); } TEST_CASE("test start_detached", "[execution]") { @@ -119,7 +119,7 @@ TEST_CASE("test on", "[execution]") { auto b = On(pool.GetScheduler(), a); auto c = SyncWait(b); static_assert(std::is_same_v>); - MMDEPLOY_ERROR("c = {}", c); + MMDEPLOY_INFO("c = {}", c); } mmdeploy_value_t f(mmdeploy_value_t v, void*) { @@ -180,7 +180,7 @@ void TestFunc(const char* sched_name) { } SECTION("Bulk") { auto sender = Just(Value(Value::Array(100))) | Transfer(sched) | - Bulk(100, [](size_t index, Value& v) { v[index] = index; }); + Bulk(100, [](size_t index, Value& v) { v[index] = (uint32_t)index; }); auto [value] = SyncWait(std::move(sender)); std::vector a; std::vector b; @@ -246,7 +246,7 @@ TEST_CASE("test executor C API", "[execution]") { REQUIRE(b); auto c = mmdeploy_executor_sync_wait(b); REQUIRE(c); - MMDEPLOY_CRITICAL("{}", *(Value*)c); + MMDEPLOY_INFO("{}", *(Value*)c); mmdeploy_value_destroy(c); } diff --git a/tests/test_csrc/model/test_directory_model.cpp b/tests/test_csrc/model/test_directory_model.cpp index aa96836a3..6bb8ea357 100644 --- a/tests/test_csrc/model/test_directory_model.cpp +++ b/tests/test_csrc/model/test_directory_model.cpp @@ -9,7 +9,7 @@ using namespace mmdeploy; -TEST_CASE("test directory model", "[model]") { +TEST_CASE("test directory model", "[.model][resource]") { std::unique_ptr model_impl; for (auto& entry : ModelRegistry::Get().ListEntries()) { if (entry.name == "DirectoryModel") { diff --git a/tests/test_csrc/model/test_zip_model.cpp b/tests/test_csrc/model/test_zip_model.cpp index b92e50c30..03d8fd4d2 100644 --- a/tests/test_csrc/model/test_zip_model.cpp +++ b/tests/test_csrc/model/test_zip_model.cpp @@ -13,6 +13,7 @@ using namespace std; using namespace mmdeploy; +#if MMDEPLOY_ZIP_MODEL TEST_CASE("test zip model", "[zip_model]") { std::unique_ptr model_impl; for (auto& entry : ModelRegistry::Get().ListEntries()) { @@ -50,3 +51,4 @@ TEST_CASE("test zip model", "[zip_model]") { REQUIRE(!model_impl->Init(buffer.data(), buffer.size()).has_error()); } } +#endif diff --git a/tests/test_csrc/net/test_ncnn_net.cpp b/tests/test_csrc/net/test_ncnn_net.cpp index ea04b43cd..ab09e2f0a 100644 --- a/tests/test_csrc/net/test_ncnn_net.cpp +++ b/tests/test_csrc/net/test_ncnn_net.cpp @@ -10,7 +10,7 @@ using namespace mmdeploy; -TEST_CASE("test ncnn net", "[ncnn_net]") { +TEST_CASE("test ncnn net", "[.ncnn_net][resource]") { auto& gResource = MMDeployTestResources::Get(); auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "ncnn"); REQUIRE(!model_list.empty()); diff --git a/tests/test_csrc/net/test_openvino_net.cpp b/tests/test_csrc/net/test_openvino_net.cpp index df5c1be03..e965c59be 100644 --- a/tests/test_csrc/net/test_openvino_net.cpp +++ b/tests/test_csrc/net/test_openvino_net.cpp @@ -10,7 +10,7 @@ using namespace mmdeploy; -TEST_CASE("test openvino net", "[openvino_net]") { +TEST_CASE("test openvino net", "[.openvino_net][resource]") { auto& gResource = MMDeployTestResources::Get(); auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "openvino"); REQUIRE(!model_list.empty()); diff --git a/tests/test_csrc/net/test_ort_net.cpp b/tests/test_csrc/net/test_ort_net.cpp index 801ecf9e5..f2f863503 100644 --- a/tests/test_csrc/net/test_ort_net.cpp +++ b/tests/test_csrc/net/test_ort_net.cpp @@ -10,7 +10,7 @@ using namespace mmdeploy; -TEST_CASE("test ort net", "[ort_net]") { +TEST_CASE("test ort net", "[.ort_net][resource]") { auto& gResource = MMDeployTestResources::Get(); auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "ort"); REQUIRE(!model_list.empty()); diff --git a/tests/test_csrc/net/test_ppl_net.cpp b/tests/test_csrc/net/test_ppl_net.cpp index aa855bf00..6f2b45501 100644 --- a/tests/test_csrc/net/test_ppl_net.cpp +++ b/tests/test_csrc/net/test_ppl_net.cpp @@ -10,7 +10,7 @@ using namespace mmdeploy; -TEST_CASE("test pplnn net", "[ppl_net]") { +TEST_CASE("test pplnn net", "[.ppl_net][resource]") { auto& gResource = MMDeployTestResources::Get(); auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "pplnn"); REQUIRE(!model_list.empty()); diff --git a/tests/test_csrc/net/test_trt_net.cpp b/tests/test_csrc/net/test_trt_net.cpp index b6ab080ee..e567c63fc 100644 --- a/tests/test_csrc/net/test_trt_net.cpp +++ b/tests/test_csrc/net/test_trt_net.cpp @@ -10,7 +10,7 @@ using namespace mmdeploy; -TEST_CASE("test trt net", "[trt_net]") { +TEST_CASE("test trt net", "[.trt_net][resource]") { auto& gResource = MMDeployTestResources::Get(); auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "trt"); REQUIRE(!model_list.empty()); diff --git a/tests/test_csrc/preprocess/test_compose.cpp b/tests/test_csrc/preprocess/test_compose.cpp index f2a7994e1..ab3cb5f4a 100644 --- a/tests/test_csrc/preprocess/test_compose.cpp +++ b/tests/test_csrc/preprocess/test_compose.cpp @@ -11,6 +11,7 @@ #include "mmdeploy/core/mat.h" #include "mmdeploy/core/registry.h" #include "mmdeploy/core/utils/formatter.h" +#include "opencv2/imgcodecs/imgcodecs.hpp" #include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_crop.cpp b/tests/test_csrc/preprocess/test_crop.cpp index 4f23e68a9..00609af39 100644 --- a/tests/test_csrc/preprocess/test_crop.cpp +++ b/tests/test_csrc/preprocess/test_crop.cpp @@ -5,6 +5,7 @@ #include "mmdeploy/core/mat.h" #include "mmdeploy/core/utils/device_utils.h" #include "mmdeploy/preprocess/transform/transform.h" +#include "opencv2/imgcodecs/imgcodecs.hpp" #include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_default_format_bundle.cpp b/tests/test_csrc/preprocess/test_default_format_bundle.cpp index 489e8a249..a1fea516f 100644 --- a/tests/test_csrc/preprocess/test_default_format_bundle.cpp +++ b/tests/test_csrc/preprocess/test_default_format_bundle.cpp @@ -3,6 +3,7 @@ #include "mmdeploy/core/tensor.h" #include "mmdeploy/core/utils/device_utils.h" #include "mmdeploy/preprocess/transform/transform.h" +#include "opencv2/imgcodecs/imgcodecs.hpp" #include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" @@ -37,17 +38,18 @@ void TestDefaultFormatBundle(const Value& cfg, const cv::Mat& mat) { // mat's shape is {h, w, c}, while res_tensor's shape is {1, c, h, w} // compare each channel between `res_tensor` and `mat` - auto step = shape[2] * shape[3] * mat.elemSize1(); + // note `data_type` of `res_tensor` is `float` + auto step = shape[2] * shape[3] * sizeof(float); auto data = host_tensor.value().data(); for (auto i = 0; i < mat.channels(); ++i) { - cv::Mat _mat{mat.rows, mat.cols, CV_MAKETYPE(mat.depth(), 1), data}; + cv::Mat _mat{mat.rows, mat.cols, CV_32FC1, data}; REQUIRE(::mmdeploy::cpu::Compare(channel_mats[i], _mat)); data += step; } } } -TEST_CASE("transform DefaultFormatBundle", "[img2tensor]") { +TEST_CASE("transform DefaultFormatBundle", "[bundle]") { auto gResource = MMDeployTestResources::Get(); auto img_list = gResource.LocateImageResources("transform"); REQUIRE(!img_list.empty()); @@ -55,13 +57,10 @@ TEST_CASE("transform DefaultFormatBundle", "[img2tensor]") { auto img_path = img_list.front(); cv::Mat bgr_mat = cv::imread(img_path, cv::IMREAD_COLOR); cv::Mat gray_mat = cv::imread(img_path, cv::IMREAD_GRAYSCALE); - cv::Mat bgr_float_mat; - cv::Mat gray_float_mat; - bgr_mat.convertTo(bgr_float_mat, CV_32FC3); - gray_mat.convertTo(gray_float_mat, CV_32FC1); + Value cfg{{"type", "DefaultFormatBundle"}, {"keys", {"img"}}}; - vector mats{bgr_mat, gray_mat, bgr_float_mat, gray_float_mat}; + vector mats{bgr_mat, gray_mat}; for (auto& mat : mats) { TestDefaultFormatBundle(cfg, mat); } diff --git a/tests/test_csrc/preprocess/test_image2tensor.cpp b/tests/test_csrc/preprocess/test_image2tensor.cpp index 199c2bc66..024b0e2ad 100644 --- a/tests/test_csrc/preprocess/test_image2tensor.cpp +++ b/tests/test_csrc/preprocess/test_image2tensor.cpp @@ -3,6 +3,7 @@ #include "mmdeploy/core/tensor.h" #include "mmdeploy/core/utils/device_utils.h" #include "mmdeploy/preprocess/transform/transform.h" +#include "opencv2/imgcodecs/imgcodecs.hpp" #include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_load.cpp b/tests/test_csrc/preprocess/test_load.cpp index fc825155c..5e99f7a6a 100644 --- a/tests/test_csrc/preprocess/test_load.cpp +++ b/tests/test_csrc/preprocess/test_load.cpp @@ -5,6 +5,8 @@ #include "mmdeploy/core/tensor.h" #include "mmdeploy/core/utils/device_utils.h" #include "mmdeploy/preprocess/transform/transform.h" +#include "opencv2/imgcodecs/imgcodecs.hpp" +#include "opencv2/imgproc/imgproc.hpp" #include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" @@ -46,7 +48,7 @@ void TestLoad(const Value& cfg, const cv::Mat& mat, PixelFormat src_format, } } -TEST_CASE("prepare image, that is LoadImageFromFile transform", "[load]") { +TEST_CASE("prepare image, that is LoadImageFromFile transform", "[.load]") { auto gResource = MMDeployTestResources::Get(); auto img_list = gResource.LocateImageResources("transform"); REQUIRE(!img_list.empty()); diff --git a/tests/test_csrc/preprocess/test_normalize.cpp b/tests/test_csrc/preprocess/test_normalize.cpp index 2b976ed41..7f4270a68 100644 --- a/tests/test_csrc/preprocess/test_normalize.cpp +++ b/tests/test_csrc/preprocess/test_normalize.cpp @@ -4,6 +4,8 @@ #include "mmdeploy/core/mat.h" #include "mmdeploy/core/utils/device_utils.h" #include "mmdeploy/preprocess/transform/transform.h" +#include "opencv2/imgcodecs/imgcodecs.hpp" +#include "opencv2/imgproc/imgproc.hpp" #include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_pad.cpp b/tests/test_csrc/preprocess/test_pad.cpp index 77828b625..cc6f425ec 100644 --- a/tests/test_csrc/preprocess/test_pad.cpp +++ b/tests/test_csrc/preprocess/test_pad.cpp @@ -4,6 +4,8 @@ #include "mmdeploy/core/mat.h" #include "mmdeploy/core/utils/device_utils.h" #include "mmdeploy/preprocess/transform/transform.h" +#include "opencv2/imgcodecs/imgcodecs.hpp" +#include "opencv2/imgproc/imgproc.hpp" #include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_resize.cpp b/tests/test_csrc/preprocess/test_resize.cpp index 8b7bb6bf8..da0500912 100644 --- a/tests/test_csrc/preprocess/test_resize.cpp +++ b/tests/test_csrc/preprocess/test_resize.cpp @@ -4,6 +4,8 @@ #include "mmdeploy/core/mat.h" #include "mmdeploy/core/utils/device_utils.h" #include "mmdeploy/preprocess/transform/transform.h" +#include "opencv2/imgcodecs/imgcodecs.hpp" +#include "opencv2/imgproc/imgproc.hpp" #include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" @@ -114,7 +116,9 @@ void TestResizeWithScaleFactor(const Value& cfg, const std::string& device_name, auto transform = CreateTransform(cfg, device, stream); REQUIRE(transform != nullptr); - auto [dst_height, dst_width] = make_tuple(mat.rows * scale_factor, mat.cols * scale_factor); + // keep round policy with resize.cpp + const int dst_height = static_cast(mat.rows * scale_factor + 0.5); + const int dst_width = static_cast(mat.cols * scale_factor + 0.5); auto interpolation = cfg["interpolation"].get(); auto ref_mat = mmdeploy::cpu::Resize(mat, dst_height, dst_width, interpolation); diff --git a/tests/test_csrc/test_resource.h b/tests/test_csrc/test_resource.h index f6a3fb826..8d88cce3e 100644 --- a/tests/test_csrc/test_resource.h +++ b/tests/test_csrc/test_resource.h @@ -129,7 +129,11 @@ class MMDeployTestResources { } // Didn't find 'mmdeploy_test_resources' in current directory. // Move to its parent directory and keep looking for it - return LocateResourceRootPath(cur_path.parent_path(), max_depth - 1); + if (cur_path.has_parent_path()) { + return LocateResourceRootPath(cur_path.parent_path(), max_depth - 1); + } else { + return ""; + } } private: diff --git a/tests/test_mmcv/test_mmcv_cnn.py b/tests/test_mmcv/test_mmcv_cnn.py new file mode 100644 index 000000000..7f7a37b02 --- /dev/null +++ b/tests/test_mmcv/test_mmcv_cnn.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import torch + +from mmdeploy.utils import Backend +from mmdeploy.utils.test import check_backend, get_rewrite_outputs + + +def test_multiheadattention_ncnn(): + check_backend(Backend.NCNN) + from mmcv.cnn.bricks.transformer import MultiheadAttention + embed_dims, num_heads = 12, 2 + model = MultiheadAttention(embed_dims, num_heads, batch_first=True) + query = torch.rand(1, 3, embed_dims) + + deploy_cfg = mmcv.Config( + dict( + onnx_config=dict(input_shape=None), + backend_config=dict(type=Backend.NCNN.value), + )) + model_outputs = model(query) + rewrite_inputs = dict(query=query) + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg, + run_with_backend=True) + if is_backend_output is None: + assert rewrite_outputs is not None + else: + assert torch.allclose( + model_outputs, rewrite_outputs[0], rtol=1e-03, atol=1e-05) diff --git a/tests/test_ops/test_ops.py b/tests/test_ops/test_ops.py index 273500f08..e6121880d 100644 --- a/tests/test_ops/test_ops.py +++ b/tests/test_ops/test_ops.py @@ -931,3 +931,168 @@ def test_roi_align_rotated(backend, input_names=['input', 'rois'], output_names=['roi_feat'], save_dir=save_dir) + + +@pytest.mark.parametrize('backend', [TEST_TENSORRT]) +@pytest.mark.parametrize( + 'out_size, clockwise, sampling_ratio, roi_scale_factor,' + ' finest_scale, featmap_strides, aligned', + [(tuple([2, 2]), False, 2, 1.0, 2, list([1.0]), 1)]) +def test_multi_level_rotated_roi_align(backend, + out_size, + clockwise, + sampling_ratio, + roi_scale_factor, + finest_scale, + featmap_strides, + aligned, + input_list=None, + save_dir=None): + backend.check_env() + + if input_list is None: + import numpy as np + input = [ + torch.tensor([[[[1., 2., 5., 6.], [3., 4., 7., 8.], + [9., 10., 13., 14.], [11., 12., 15., 16.]]]]) + ] + rois = torch.tensor([[0., 1.5, 1.5, 3., 3., np.pi / 2]]) + expected_result = torch.tensor([[[[7.5625, 1.9375], [10.375, 4.75]]]]) + else: + input = input_list[0] + rois = input_list[1] + expected_result = input_list[2] + input_name = [('input_' + str(i)) for i in range(len(featmap_strides))] + input_name.insert(0, 'rois') + + inputs = [ + onnx.helper.make_tensor_value_info( + input_name[i + 1], onnx.TensorProto.FLOAT, shape=input[i].shape) + for i in range(len(input_name) - 1) + ] + inputs.append( + onnx.helper.make_tensor_value_info( + 'rois', onnx.TensorProto.FLOAT, shape=rois.shape)) + outputs = [ + onnx.helper.make_tensor_value_info( + 'bbox_feats', onnx.TensorProto.FLOAT, shape=expected_result.shape) + ] + node = onnx.helper.make_node( + 'MMCVMultiLevelRotatedRoiAlign', + input_name, ['bbox_feats'], + 'MMCVMultiLevelRotatedRoiAlign_0', + None, + 'mmdeploy', + featmap_strides=featmap_strides, + finest_scale=finest_scale, + output_height=out_size[0], + output_width=out_size[1], + clockwise=clockwise, + roi_scale_factor=roi_scale_factor, + sampling_ratio=sampling_ratio, + aligned=aligned) + graph = onnx.helper.make_graph([node], 'torch-jit-export', inputs, outputs) + onnx_model = onnx.helper.make_model( + graph, producer_name='pytorch', producer_version='1.8') + onnx_model.opset_import[0].version = 11 + onnx_model.opset_import.append( + onnx.onnx_ml_pb2.OperatorSetIdProto(domain='mmdeploy', version=1)) + + backend.run_and_validate( + onnx_model, [rois, *input], + 'multi_level_rotated_roi_align', + input_names=input_name, + output_names=['bbox_feats'], + expected_result=expected_result, + save_dir=save_dir) + + +@pytest.mark.parametrize('backend', [TEST_TENSORRT]) +@pytest.mark.parametrize('strides', [(4, 4)]) +def test_trt_grid_priors(backend, strides, input_list=None, save_dir=None): + backend.check_env() + + if input_list is None: + input = torch.rand(1, 3, 2, 2) + base_anchors = torch.tensor([[-22.6274, -11.3137, 22.6274, 11.3137], + [-16.0000, -16.0000, 16.0000, 16.0000], + [-11.3137, -22.6274, 11.3137, 22.6274]]) + + expected_result = torch.tensor([[-22.6274, -11.3137, 22.6274, 11.3137], + [-16.0000, -16.0000, 16.0000, 16.0000], + [-11.3137, -22.6274, 11.3137, 22.6274], + [-18.6274, -11.3137, 26.6274, 11.3137], + [-12.0000, -16.0000, 20.0000, 16.0000], + [-7.3137, -22.6274, 15.3137, 22.6274], + [-22.6274, -7.3137, 22.6274, 15.3137], + [-16.0000, -12.0000, 16.0000, 20.0000], + [-11.3137, -18.6274, 11.3137, 26.6274], + [-18.6274, -7.3137, 26.6274, 15.3137], + [-12.0000, -12.0000, 20.0000, 20.0000], + [-7.3137, -18.6274, 15.3137, 26.6274]]) + else: + input = input_list[0] + base_anchors = input_list[1] + expected_result = input_list[2] + input_name = ['input'] + output_name = ['output'] + + class GridPriorsTestOps(torch.autograd.Function): + + @staticmethod + def forward(ctx, base_anchor, feat_h, feat_w, stride_h: int, + stride_w: int): + a = base_anchor.shape[0] + return base_anchor.new_empty(feat_h * feat_w * a, 4) + + @staticmethod + def symbolic(g, base_anchor, feat_h, feat_w, stride_h: int, + stride_w: int): + from torch.onnx import symbolic_helper + feat_h = symbolic_helper._unsqueeze_helper(g, feat_h, [0]) + feat_w = symbolic_helper._unsqueeze_helper(g, feat_w, [0]) + zero_h = g.op( + 'ConstantOfShape', + feat_h, + value_t=torch.tensor([0], dtype=torch.long), + ) + zero_w = g.op( + 'ConstantOfShape', + feat_w, + value_t=torch.tensor([0], dtype=torch.long), + ) + return g.op( + 'mmdeploy::GridPriorsTRT', + base_anchor, + zero_h, + zero_w, + stride_h_i=stride_h, + stride_w_i=stride_w) + + class GridPriorsTestModel(torch.nn.Module): + + def __init__(self, strides, base_anchors=base_anchors) -> None: + super().__init__() + self.strides = strides + self.base_anchors = base_anchors + + def forward(self, x): + base_anchors = self.base_anchors + h, w = x.shape[2:] + strides = self.strides + return GridPriorsTestOps.apply(base_anchors, h, w, strides[0], + strides[1]) + + model = GridPriorsTestModel(strides=strides) + + backend.run_and_validate( + model, [input], + 'trt_grid_priors', + input_names=input_name, + output_names=output_name, + expected_result=expected_result, + dynamic_axes=dict(input={ + 2: 'h', + 3: 'w' + }), + save_dir=save_dir) diff --git a/tests/test_ops/utils.py b/tests/test_ops/utils.py index 90f3f4e5b..52e563a37 100644 --- a/tests/test_ops/utils.py +++ b/tests/test_ops/utils.py @@ -91,11 +91,13 @@ class TestTensorRTExporter: expected_result=None, save_dir=None): if save_dir is None: - onnx_file_path = tempfile.NamedTemporaryFile().name - trt_file_path = tempfile.NamedTemporaryFile().name + onnx_file_path = tempfile.NamedTemporaryFile(suffix='.onnx').name + trt_file_path = tempfile.NamedTemporaryFile(suffix='.engine').name else: + os.makedirs(save_dir, exist_ok=True) onnx_file_path = os.path.join(save_dir, model_name + '.onnx') - trt_file_path = os.path.join(save_dir, model_name + '.trt') + trt_file_path = os.path.join(save_dir, model_name + '.engine') + input_list = [data.cuda() for data in input_list] if isinstance(model, onnx.onnx_ml_pb2.ModelProto): onnx.save(model, onnx_file_path) else: @@ -131,9 +133,10 @@ class TestTensorRTExporter: ]))) onnx_model = onnx.load(onnx_file_path) + work_dir, filename = os.path.split(trt_file_path) trt_apis.onnx2tensorrt( - os.path.dirname(trt_file_path), - trt_file_path, + work_dir, + filename, 0, deploy_cfg=deploy_cfg, onnx_model=onnx_model) @@ -150,7 +153,6 @@ class TestTensorRTExporter: from mmdeploy.backend.tensorrt import TRTWrapper trt_model = TRTWrapper(trt_file_path, output_names) - input_list = [data.cuda() for data in input_list] trt_outputs = trt_model(dict(zip(input_names, input_list))) trt_outputs = [trt_outputs[i].float().cpu() for i in output_names] assert_allclose(model_outputs, trt_outputs, tolerate_small_mismatch) diff --git a/tests/test_pytorch/test_pytorch_functions.py b/tests/test_pytorch/test_pytorch_functions.py index 89d9db063..65bda0ecf 100644 --- a/tests/test_pytorch/test_pytorch_functions.py +++ b/tests/test_pytorch/test_pytorch_functions.py @@ -1,9 +1,12 @@ # Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + import mmcv import numpy as np import pytest import torch -import torch.nn.functional as func +import torch.nn.functional as F +from packaging.version import parse from mmdeploy.utils import Backend from mmdeploy.utils.test import (WrapFunction, backend_checker, @@ -61,10 +64,10 @@ def test_group_norm_ncnn(): input = torch.rand([1, 2, 2, 2]) weight = torch.rand([2]) bias = torch.rand([2]) - model_output = func.group_norm(input, 1, weight, bias, 1e-05) + model_output = F.group_norm(input, 1, weight, bias, 1e-05) def group_norm_caller(input): - return func.group_norm(input, 1, weight, bias) + return F.group_norm(input, 1, weight, bias) wrapped_func = WrapFunction(group_norm_caller) rewrite_output, _ = get_rewrite_outputs( @@ -101,10 +104,10 @@ def test_chunk_ncnn(): @backend_checker(Backend.NCNN) def test_interpolate_static(): input = torch.rand([1, 2, 2, 2]) - model_output = func.interpolate(input, scale_factor=[2, 2]) + model_output = F.interpolate(input, scale_factor=[2, 2]) def interpolate_caller(*arg, **kwargs): - return func.interpolate(*arg, **kwargs) + return F.interpolate(*arg, **kwargs) wrapped_func = WrapFunction(interpolate_caller, size=[4, 4]) rewrite_output, _ = get_rewrite_outputs( @@ -121,10 +124,10 @@ def test_linear_ncnn(): input = torch.rand([1, 2, 2]) weight = torch.rand([2, 2]) bias = torch.rand([2]) - model_output = func.linear(input, weight=weight, bias=bias) + model_output = F.linear(input, weight=weight, bias=bias) def linear_caller(*arg, **kwargs): - return func.linear(*arg, **kwargs) + return F.linear(*arg, **kwargs) wrapped_func = WrapFunction(linear_caller, weight=weight, bias=bias) rewrite_output, _ = get_rewrite_outputs( @@ -236,25 +239,105 @@ class TestTopk: @backend_checker(Backend.TENSORRT) @pytest.mark.parametrize('shape', [[2, 2], [4, 2], [2, 4], [2, 4, 2]]) -def test_triu_trt(shape): +@pytest.mark.parametrize('diagonal', [0, 1, -1]) +def test_triu_trt(shape, diagonal): input = torch.rand(shape) + model_output = torch.triu(input=input, diagonal=diagonal) def triu_caller(*arg, **kwargs): return torch.triu(*arg, **kwargs) - wrapped_func = WrapFunction(triu_caller, diagonal=1) - import tempfile + wrapped_func = WrapFunction(triu_caller, diagonal=diagonal) + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_func, + model_inputs={'input': input}, + deploy_cfg=get_trt_config(['output'], shape=shape), + run_with_backend=True) + if is_backend_output: + rewrite_outputs = rewrite_outputs[0].detach().cpu() + assert np.allclose( + model_output, rewrite_outputs, rtol=1e-03, atol=1e-05) + else: + assert rewrite_outputs is not None + + +@backend_checker(Backend.NCNN) +@pytest.mark.parametrize( + 'input', + [torch.rand(1, 16, 16), torch.rand(1, 3, 16, 16)]) +@pytest.mark.parametrize('dim', [1, 2]) +def test_normalize_ncnn(input, dim): + import mmdeploy.apis.ncnn as ncnn_apis + from mmdeploy.utils.test import get_onnx_model + + def norm_func(input, dim): + return F.normalize(input, p=2, dim=dim) + + wrapped_func = WrapFunction(norm_func, dim=dim) + model_inputs = {'input': input} + ir_file_path = get_onnx_model(wrapped_func, model_inputs, deploy_cfg_ncnn) + assert osp.exists(ir_file_path) + ncnn_files_prefix = osp.splitext(ir_file_path)[0] + ncnn_apis.from_onnx(ir_file_path, ncnn_files_prefix) + param_path, bin_path = ncnn_apis.get_output_model_file(ir_file_path) + assert osp.exists(param_path) + assert osp.exists(bin_path) + + +@backend_checker(Backend.ONNXRUNTIME) +@pytest.mark.parametrize( + 'input', + [torch.rand(1, 16, 16), torch.rand(1, 3, 16, 16)]) +def test_masked_fill_onnxruntime(input): + mask = input > 0 + value = float('-inf') + + def masked_fill_caller(*arg, **kwargs): + return torch.masked_fill(*arg, **kwargs) + + deploy_cfg_ort = mmcv.Config( + dict( + onnx_config=dict(input_shape=None), + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmdet', task='ObjectDetection'))) + + wrapped_func = WrapFunction(masked_fill_caller, mask=mask, value=value) + rewrite_output, _ = get_rewrite_outputs( + wrapped_func, + model_inputs={'input': input}, + deploy_cfg=deploy_cfg_ort, + run_with_backend=True) + assert rewrite_output is not None + + +@backend_checker(Backend.ONNXRUNTIME) +@pytest.mark.skipif( + parse(torch.__version__) < parse('1.9.0'), reason='requires torch>1.8.0') +@pytest.mark.parametrize('x', [torch.rand(1, 3, 16, 16)]) +@pytest.mark.parametrize('y', [torch.rand(1, 3, 4, 4)]) +def test_tensor_setitem(x, y): import onnx - from mmdeploy.core import RewriterContext - onnx_file = tempfile.NamedTemporaryFile(suffix='onnx').name - with RewriterContext( - cfg=get_trt_config('output', shape), - backend=Backend.TENSORRT.value, - opset=11), torch.no_grad(): - torch.onnx.export(wrapped_func, input, onnx_file, opset_version=11) - onnx_model = onnx.load(onnx_file) + from mmdeploy.utils.test import get_onnx_model + + def setitem_slice(x, y): + H, W = y.shape[2:] + x[:, :, 2:H + 2, 2:W + 2] = y + return x + + wrapped_func = WrapFunction(setitem_slice) + model_inputs = {'x': x, 'y': y} + + deploy_cfg = mmcv.Config( + dict( + onnx_config=dict(input_shape=None), + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmdet', task='ObjectDetection'))) + ir_file_path = get_onnx_model(wrapped_func, model_inputs, deploy_cfg) + + onnx_model = onnx.load(ir_file_path) nodes = onnx_model.graph.node - assert nodes is not None + for node in nodes: + assert node.op_type != 'ScatterND' diff --git a/tests/test_pytorch/test_pytorch_ops.py b/tests/test_pytorch/test_pytorch_ops.py index 841e3ea7e..e3e49f345 100644 --- a/tests/test_pytorch/test_pytorch_ops.py +++ b/tests/test_pytorch/test_pytorch_ops.py @@ -132,15 +132,9 @@ class TestLinear: def check(self, nodes): print(nodes) - - from packaging.version import parse as version_parse - version = version_parse(torch.__version__) - target = 'Gemm' - if version.major <= 1 and version.minor <= 8: - target = 'MatMul' exist = False for node in nodes: - if node.op_type == target: + if node.op_type in ['Gemm', 'MatMul']: exist = True break diff --git a/tests/test_utils/test_timer.py b/tests/test_utils/test_timer.py index c9997e28a..5258617f8 100644 --- a/tests/test_utils/test_timer.py +++ b/tests/test_utils/test_timer.py @@ -8,7 +8,7 @@ def test_count_time(): class test: - @TimeCounter.count_time() + @TimeCounter.count_time('fun1') def fun1(self): time.sleep(0.01) @@ -19,3 +19,5 @@ def test_count_time(): for i in range(50): t.fun1() + + TimeCounter.print_stats('fun1') diff --git a/tools/check_env.py b/tools/check_env.py index 3718db1bd..e25806fa5 100644 --- a/tools/check_env.py +++ b/tools/check_env.py @@ -25,15 +25,15 @@ def check_backend(): import mmdeploy.apis.onnxruntime as ort_apis logger = get_root_logger() logger.info(f'onnxruntime: {ort_version}\tops_is_avaliable : ' - f'{ort_apis.is_available()}') + f'{ort_apis.is_custom_ops_available()}') import mmdeploy.apis.tensorrt as trt_apis logger.info(f'tensorrt: {trt_version}\tops_is_avaliable : ' - f'{trt_apis.is_available()}') + f'{trt_apis.is_custom_ops_available()}') import mmdeploy.apis.ncnn as ncnn_apis - logger.info( - f'ncnn: {ncnn_version}\tops_is_avaliable : {ncnn_apis.is_available()}') + logger.info(f'ncnn: {ncnn_version}\tops_is_avaliable : ' + f'{ncnn_apis.is_custom_ops_available()}') import mmdeploy.apis.pplnn as pplnn_apis logger.info(f'pplnn_is_avaliable: {pplnn_apis.is_available()}') @@ -41,6 +41,9 @@ def check_backend(): import mmdeploy.apis.openvino as openvino_apis logger.info(f'openvino_is_avaliable: {openvino_apis.is_available()}') + import mmdeploy.apis.snpe as snpe_apis + logger.info(f'snpe_is_available: {snpe_apis.is_available()}') + def check_codebase(): codebase_versions = get_codebase_version() diff --git a/tools/deploy.py b/tools/deploy.py index a1eda321e..0452cec23 100644 --- a/tools/deploy.py +++ b/tools/deploy.py @@ -54,6 +54,10 @@ def parse_args(): help='Image directory for quantize model.') parser.add_argument( '--quant', action='store_true', help='Quantize model to low bit.') + parser.add_argument( + '--uri', + default='192.168.1.1:60000', + help='Remote ipv4:port or ipv6:port for inference on edge device.') args = parser.parse_args() return args @@ -221,8 +225,8 @@ def main(): if not is_available_ncnn(): logger.error('ncnn support is not available, please make sure \ - 1) `onnx2ncnn` existed in `PATH` 2) python import ncnn success' - ) + 1) `mmdeploy_onnx2ncnn` existed in `PATH` \ + 2) python import ncnn success') exit(1) import mmdeploy.apis.ncnn as ncnn_api @@ -266,6 +270,30 @@ def main(): else: backend_files += [model_param_path, model_bin_path] + elif backend == Backend.SNPE: + from mmdeploy.apis.snpe import is_available as is_available + + if not is_available(): + logger.error('snpe support is not available, please check \ + 1) `snpe-onnx-to-dlc` existed in `PATH` 2) snpe only support \ + ubuntu18.04') + exit(1) + + import mmdeploy.apis.snpe as snpe_api + from mmdeploy.apis.snpe import get_env_key, get_output_model_file + + if get_env_key() not in os.environ: + os.environ[get_env_key()] = args.uri + + PIPELINE_MANAGER.set_log_level(log_level, [snpe_api.from_onnx]) + + backend_files = [] + for onnx_path in ir_files: + dlc_path = get_output_model_file(onnx_path, args.work_dir) + onnx_name = osp.splitext(osp.split(onnx_path)[1])[0] + snpe_api.from_onnx(onnx_path, osp.join(args.work_dir, onnx_name)) + backend_files = [dlc_path] + elif backend == Backend.OPENVINO: from mmdeploy.apis.openvino import \ is_available as is_available_openvino @@ -331,17 +359,19 @@ def main(): # for headless installation. if not headless: - # visualize model of the backend + extra = dict( + backend=backend, + output_file=osp.join(args.work_dir, f'output_{backend.value}.jpg'), + show_result=args.show) + if backend == Backend.SNPE: + extra['uri'] = args.uri + create_process( f'visualize {backend.value} model', target=visualize_model, args=(model_cfg_path, deploy_cfg_path, backend_files, args.test_img, args.device), - kwargs=dict( - backend=backend, - output_file=osp.join(args.work_dir, - f'output_{backend.value}.jpg'), - show_result=args.show), + kwargs=extra, ret_value=ret_value) # visualize pytorch model diff --git a/tools/onnx2dlc.py b/tools/onnx2dlc.py new file mode 100644 index 000000000..526a9c7f7 --- /dev/null +++ b/tools/onnx2dlc.py @@ -0,0 +1,37 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import logging + +from mmdeploy.apis.snpe import from_onnx +from mmdeploy.utils import get_root_logger + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert ONNX to snpe dlc format.') + parser.add_argument('onnx_path', help='ONNX model path') + parser.add_argument('output_prefix', help='output snpe dlc model path') + parser.add_argument( + '--log-level', + help='set log level', + default='INFO', + choices=list(logging._nameToLevel.keys())) + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + logger = get_root_logger(log_level=args.log_level) + + onnx_path = args.onnx_path + output_prefix = args.output_prefix + + logger.info(f'onnx2dlc: \n\tonnx_path: {onnx_path} ') + from_onnx(onnx_path, output_prefix) + logger.info('onnx2dlc success.') + + +if __name__ == '__main__': + main() diff --git a/tools/onnx2ncnn.py b/tools/onnx2ncnn.py index 9c17bcab3..129e86940 100644 --- a/tools/onnx2ncnn.py +++ b/tools/onnx2ncnn.py @@ -27,13 +27,9 @@ def main(): onnx_path = args.onnx_path output_prefix = args.output_prefix - logger.info(f'onnx2ncnn: \n\tonnx_path: {onnx_path} ') - try: - from_onnx(onnx_path, output_prefix) - logger.info('onnx2ncnn success.') - except Exception as e: - logger.error(e) - logger.error('onnx2ncnn failed.') + logger.info(f'mmdeploy_onnx2ncnn: \n\tonnx_path: {onnx_path} ') + from_onnx(onnx_path, output_prefix) + logger.info('mmdeploy_onnx2ncnn success.') if __name__ == '__main__': diff --git a/tools/onnx2pplnn.py b/tools/onnx2pplnn.py index 5a26a4487..4be570858 100644 --- a/tools/onnx2pplnn.py +++ b/tools/onnx2pplnn.py @@ -49,15 +49,11 @@ def main(): if isinstance(input_shapes[0], int): input_shapes = [input_shapes] - logger.info(f'onnx2ppl: \n\tonnx_path: {onnx_path} ' + logger.info(f'onnx2pplnn: \n\tonnx_path: {onnx_path} ' f'\n\toutput_prefix: {output_prefix}' f'\n\topt_shapes: {input_shapes}') - try: - from_onnx(onnx_path, output_prefix, device, input_shapes) - logger.info('onnx2tpplnn success.') - except Exception as e: - logger.error(e) - logger.error('onnx2tpplnn failed.') + from_onnx(onnx_path, output_prefix, device, input_shapes) + logger.info('onnx2pplnn success.') if __name__ == '__main__': diff --git a/tools/onnx2tensorrt.py b/tools/onnx2tensorrt.py index 78a4558cc..b7e7a7e50 100644 --- a/tools/onnx2tensorrt.py +++ b/tools/onnx2tensorrt.py @@ -55,22 +55,18 @@ def main(): logger.info(f'onnx2tensorrt: \n\tonnx_path: {onnx_path} ' f'\n\tdeploy_cfg: {deploy_cfg_path}') - try: - from_onnx( - onnx_path, - output_prefix, - input_shapes=final_params['input_shapes'], - log_level=get_trt_log_level(), - fp16_mode=final_params.get('fp16_mode', False), - int8_mode=final_params.get('int8_mode', False), - int8_param=int8_param, - max_workspace_size=final_params.get('max_workspace_size', 0), - device_id=device_id) + from_onnx( + onnx_path, + output_prefix, + input_shapes=final_params['input_shapes'], + log_level=get_trt_log_level(), + fp16_mode=final_params.get('fp16_mode', False), + int8_mode=final_params.get('int8_mode', False), + int8_param=int8_param, + max_workspace_size=final_params.get('max_workspace_size', 0), + device_id=device_id) - logger.info('onnx2tensorrt success.') - except Exception as e: - logger.error(e) - logger.error('onnx2tensorrt failed.') + logger.info('onnx2tensorrt success.') if __name__ == '__main__': diff --git a/tools/package_tools/README.md b/tools/package_tools/README.md new file mode 100644 index 000000000..285bb51b8 --- /dev/null +++ b/tools/package_tools/README.md @@ -0,0 +1,47 @@ +# Precompiled package + +This document is going to describe the way to build MMDeploy package. + +## Prerequisites + +- Download and install Miniconda from the [official website](https://docs.conda.io/en/latest/miniconda.html). + +- Create conda environments for python 3.6, 3.7, 3.8 and 3.9, respectively. + + ```shell + for PYTHON_VERSION in 3.6 3.7 3.8 3.9 + do + conda create --name mmdeploy-$PYTHON_VERSION python=$PYTHON_VERSION -y + done + ``` + +- Prepare MMDeploy dependencies + + Please follow the [build-on-Linux guide](../../docs/en/01-how-to-build/linux-x86_64.md) or [build-on-Windows guide](../../docs/en/01-how-to-build/linux-x86_64.md) to install dependencies of MMDeploy, + including PyTorch, MMCV, OpenCV, ppl.cv, ONNX Runtime and TensorRT. + + Make sure the environment variables `pplcv_DIR`, `ONNXRUNTIME_DIR`, `TENSORRT_DIR`, `CUDNN_DIR` and `CUDA_TOOLKIT_ROOT_DIR` are exported. + +## Run precompiled command + +- On Linux platform, + + ```shell + conda activate mmdeploy-3.6 + pip install pyyaml + cd the/root/path/of/mmdeploy + python tools/package_tools/mmdeploy_builder.py tools/package_tools/configs/linux_x64.yaml . + ``` + + You will get the precompiled packages `mmdeploy-{version}-linux-x86_64-cuda11.1-tensorrt8.2.3.0` and `mmdeploy-{version}-linux-x86_64-onnxruntime1.8.1` in the current directory if everything's going well. + +- On Windows platform, open `Anaconda Powershell Prompt` from the start menu and execute: + + ```shell + conda activate mmdeploy-3.6 + pip install pyyaml + cd the/root/path/of/MMDeploy + python tools/package_tools/mmdeploy_builder.py tools/package_tools/configs/windows_x64.yaml . + ``` + + When the build procedure finishes successfully, you will find `mmdeploy-{version}-windows-amd64-cuda11.1-tensorrt8.2.3.0` and `mmdeploy-{version}-windows-amd64-onnxruntime1.8.1` precompiled packages in the current directory. diff --git a/tools/package_tools/configs/linux_x64.yaml b/tools/package_tools/configs/linux_x64.yaml index a1dc7bc45..f0e3b0731 100644 --- a/tools/package_tools/configs/linux_x64.yaml +++ b/tools/package_tools/configs/linux_x64.yaml @@ -2,7 +2,11 @@ global_config: cmake_envs: CMAKE_CXX_COMPILER: "g++-7" MMDEPLOY_BUILD_SDK: "ON" - MMDEPLOY_BUILD_SDK_PYTHON_API: "ON" + MMDEPLOY_BUILD_SDK_MONOLITHIC: "ON" + MMDEPLOY_BUILD_SDK_CXX_API: "ON" + MMDEPLOY_BUILD_EXAMPLES: "ON" + MMDEPLOY_SHARED_LIBS: "OFF" + OpenCV_DIR: "${OpenCV_DIR}" local_configs: - BUILD_NAME: "mmdeploy-{mmdeploy_v}-{system}-{machine}-onnxruntime{ort_v}" @@ -15,6 +19,6 @@ local_configs: MMDEPLOY_TARGET_DEVICES: '"cuda"' MMDEPLOY_TARGET_BACKENDS: "trt" TENSORRT_DIR: "${TENSORRT_DIR}" - CUDA_TOOLKIT_ROOT_DIR: "/usr/local/cuda-11.3" + CUDA_TOOLKIT_ROOT_DIR: "${CUDA_TOOLKIT_ROOT_DIR}" CUDNN_DIR: "${CUDNN_DIR}" pplcv_DIR: ${pplcv_DIR}/cuda-build/install/lib/cmake/ppl diff --git a/tools/package_tools/configs/windows_x64.yaml b/tools/package_tools/configs/windows_x64.yaml index 47d0ccf59..c104a8c78 100644 --- a/tools/package_tools/configs/windows_x64.yaml +++ b/tools/package_tools/configs/windows_x64.yaml @@ -1,11 +1,13 @@ global_config: - cmake_flags: ['-G "Visual Studio 16 2019" -A x64 -T v142'] + cmake_flags: ['-A x64 -T v142'] cmake_envs: MMDEPLOY_BUILD_SDK: "ON" - MMDEPLOY_BUILD_SDK_PYTHON_API: "ON" + MMDEPLOY_BUILD_SDK_MONOLITHIC: "ON" + MMDEPLOY_BUILD_SDK_CXX_API: "ON" + MMDEPLOY_BUILD_EXAMPLES: "ON" + MMDEPLOY_SHARED_LIBS: "OFF" MMDEPLOY_CODEBASES: "all" OpenCV_DIR: "%OpenCV_DIR%" - spdlog_DIR: '"%spdlog_DIR%"' local_configs: - BUILD_NAME: "mmdeploy-{mmdeploy_v}-{system}-{machine}-onnxruntime{ort_v}" diff --git a/tools/package_tools/mmdeploy_builder.py b/tools/package_tools/mmdeploy_builder.py index 1e3125ec7..a4a80aa1a 100644 --- a/tools/package_tools/mmdeploy_builder.py +++ b/tools/package_tools/mmdeploy_builder.py @@ -15,6 +15,7 @@ from subprocess import CalledProcessError, check_output, run from typing import Dict import yaml +from packaging import version logger = logging.getLogger() logger.setLevel(logging.INFO) @@ -94,8 +95,8 @@ def _create_bdist_cmd(cfg, c_ext=False, dist_dir=None): bdist_cmd += f' --plat-name {PLATFORM_TAG} ' # python tag - py_flag = 'cp' if c_ext else 'py' - python_tag = f'{py_flag}{sys.version_info.major}{sys.version_info.minor}' + python_tag = f'cp{sys.version_info.major}{sys.version_info.minor}'\ + if c_ext else 'py3' if 'python_tag' in bdist_tags: python_tag = bdist_tags['python_tag'] bdist_cmd += f' --python-tag {python_tag} ' @@ -126,11 +127,19 @@ def clear_mmdeploy(mmdeploy_dir: str): # remove onnx2ncnn and ncnn ext _remove_in_mmdeploy('mmdeploy/backend/ncnn/onnx2ncnn') _remove_in_mmdeploy('mmdeploy/backend/ncnn/onnx2ncnn.exe') + _remove_in_mmdeploy('mmdeploy/backend/ncnn/mmdeploy_onnx2ncnn') + _remove_in_mmdeploy('mmdeploy/backend/ncnn/mmdeploy_onnx2ncnn.exe') ncnn_ext_paths = glob( osp.join(mmdeploy_dir, 'mmdeploy/backend/ncnn/ncnn_ext.*')) for ncnn_ext_path in ncnn_ext_paths: os.remove(ncnn_ext_path) + # remove ts_optmizer + ts_optimizer_paths = glob( + osp.join(mmdeploy_dir, 'mmdeploy/backend/torchscript/ts_optimizer.*')) + for ts_optimizer_path in ts_optimizer_paths: + os.remove(ts_optimizer_path) + def build_mmdeploy(cfg, mmdeploy_dir, dist_dir=None): cmake_flags = cfg.get('cmake_flags', []) @@ -165,6 +174,41 @@ def build_mmdeploy(cfg, mmdeploy_dir, dist_dir=None): _call_command(bdist_cmd, mmdeploy_dir) +def build_mmdeploy_python(python_executable, cfg, mmdeploy_dir): + cmake_flags = cfg.get('cmake_flags', []) + cmake_envs = cfg.get('cmake_envs', dict()) + + args = [f'-D{k}={v}' for k, v in cmake_envs.items()] + args.append( + f'-DMMDeploy_DIR={mmdeploy_dir}/build/install/lib/cmake/MMDeploy') + args.append(f'-DPYTHON_EXECUTABLE={python_executable}') + + if sys.platform == 'win32': + build_cmd = 'cmake --build . --config Release -- /m' + pass + else: + build_cmd = 'cmake --build . -- -j$(nproc)' + cmake_cmd = ' '.join(['cmake ../csrc/mmdeploy/apis/python'] + cmake_flags + + args) + + build_dir = osp.join(mmdeploy_dir, 'build_python') + _remove_if_exist(build_dir) + os.mkdir(build_dir) + + _call_command(cmake_cmd, build_dir) + _call_command(build_cmd, build_dir) + + python_api_lib_path = [] + lib_patterns = ['*mmdeploy_python*.so', '*mmdeploy_python*.pyd'] + for pattern in lib_patterns: + python_api_lib_path.extend( + glob( + osp.join(mmdeploy_dir, 'build_python/**', pattern), + recursive=True, + )) + return python_api_lib_path[0] + + def get_dir_name(cfg, tag, default_name): if tag not in cfg: logging.warning(f'{tag} not found, use `{default_name}` as default.') @@ -189,8 +233,8 @@ def check_env(cfg: Dict): CUDA_TOOLKIT_ROOT_DIR = cmake_envs.get('CUDA_TOOLKIT_ROOT_DIR', '') CUDA_TOOLKIT_ROOT_DIR = osp.expandvars(CUDA_TOOLKIT_ROOT_DIR) - nvcc_cmd = 'nvcc' if len(CUDA_TOOLKIT_ROOT_DIR) <= 0 else osp.join( - CUDA_TOOLKIT_ROOT_DIR, 'bin', 'nvcc') + nvcc_cmd = ('nvcc' if len(CUDA_TOOLKIT_ROOT_DIR) <= 0 else osp.join( + CUDA_TOOLKIT_ROOT_DIR, 'bin', 'nvcc')) try: nvcc = check_output(f'"{nvcc_cmd}" -V', shell=True) @@ -234,10 +278,9 @@ def check_env(cfg: Dict): patch = re.search(r'#define NV_TENSORRT_PATCH (\d+)', data) build = re.search(r'#define NV_TENSORRT_BUILD (\d+)', data) if major is not None and minor is not None and patch is not None: - tensorrt_version = f'{major.group(1)}.' +\ - f'{minor.group(1)}.' +\ - f'{patch.group(1)}.' +\ - f'{build.group(1)}' + tensorrt_version = (f'{major.group(1)}.' + + f'{minor.group(1)}.' + + f'{patch.group(1)}.' + f'{build.group(1)}') env_info['trt_v'] = tensorrt_version @@ -251,7 +294,7 @@ def create_package(cfg: Dict, mmdeploy_dir: str): # load flags cfg, build_dir = get_dir_name(cfg, 'BUILD_NAME', build_dir) cmake_envs = cfg.get('cmake_envs', dict()) - build_sdk_flag = cmake_envs.get('MMDEPLOY_BUILD_SDK', False) + build_sdk_flag = cmake_envs.get('MMDEPLOY_BUILD_SDK', 'OFF') if 'TAR_NAME' in cfg: cfg, sdk_tar_name = get_dir_name(cfg, 'TAR_NAME', sdk_tar_name) @@ -275,42 +318,54 @@ def create_package(cfg: Dict, mmdeploy_dir: str): dist_dir = osp.join(build_dir, 'dist') build_mmdeploy(cfg, mmdeploy_dir, dist_dir=dist_dir) - if build_sdk_flag: + if build_sdk_flag == 'ON': sdk_tar_dir = osp.join(build_dir, sdk_tar_name) # copy lib and install into sdk dir install_dir = osp.join(mmdeploy_dir, 'build/install/') _copy(install_dir, sdk_tar_dir) + _copy(f'{mmdeploy_dir}/demo/python', + f'{sdk_tar_dir}/example/python') _remove_if_exist(osp.join(sdk_tar_dir, 'example', 'build')) - # create sdk python api wheel - # for linux - python_api_lib_path = glob( - osp.join(mmdeploy_dir, 'build/lib/mmdeploy_python.*.so')) - # for windows - python_api_lib_path += glob( - osp.join(mmdeploy_dir, 'build/bin/*/mmdeploy_python.*.pyd')) - num_libs = len(python_api_lib_path) - if num_libs != 1: - logging.info('find multiple mmdeploy_python libraries.') - python_api_lib_path = python_api_lib_path[0] + # build SDK Python API according to different python version + for python_version in ['3.6', '3.7', '3.8', '3.9']: + _version = version.parse(python_version) + python_major, python_minor = _version.major, _version.minor - sdk_python_package_dir = osp.join(build_dir, '.mmdeploy_python') - _copy(PACKAGING_DIR, sdk_python_package_dir) - _copy( - osp.join(mmdeploy_dir, 'mmdeploy', 'version.py'), - osp.join(sdk_python_package_dir, 'mmdeploy_python', - 'version.py')) - _copy(python_api_lib_path, - osp.join(sdk_python_package_dir, 'mmdeploy_python')) - sdk_wheel_dir = osp.abspath(osp.join(sdk_tar_dir, 'python')) - bdist_cmd = _create_bdist_cmd( - cfg, c_ext=True, dist_dir=sdk_wheel_dir) - _call_command(bdist_cmd, sdk_python_package_dir) + # create sdk python api wheel + sdk_python_package_dir = osp.join(build_dir, + '.mmdeploy_python') + _copy(PACKAGING_DIR, sdk_python_package_dir) + _copy( + osp.join(mmdeploy_dir, 'mmdeploy', 'version.py'), + osp.join(sdk_python_package_dir, 'mmdeploy_python', + 'version.py'), + ) - # remove temp package dir - _remove_if_exist(sdk_python_package_dir) + # build mmdeploy sdk python api + python_executable = shutil.which('python')\ + .replace('mmdeploy-3.6', f'mmdeploy-{python_version}') + python_api_lib_path = build_mmdeploy_python( + python_executable, cfg, mmdeploy_dir) + _copy( + python_api_lib_path, + osp.join(sdk_python_package_dir, 'mmdeploy_python'), + ) + _remove_if_exist(osp.join(mmdeploy_dir, 'build_python')) + + sdk_wheel_dir = osp.abspath(osp.join(sdk_tar_dir, 'python')) + + bdist_cmd = (f'{python_executable} ' + f'setup.py bdist_wheel --plat-name ' + f'{PLATFORM_TAG} --python-tag ' + f'cp{python_major}{python_minor} ' + f'--dist-dir {sdk_wheel_dir}') + _call_command(bdist_cmd, sdk_python_package_dir) + + # remove temp package dir + _remove_if_exist(sdk_python_package_dir) logging.info('build finish.') diff --git a/tools/package_tools/packaging/mmdeploy_python/version.py b/tools/package_tools/packaging/mmdeploy_python/version.py index b3309754c..806f4defe 100644 --- a/tools/package_tools/packaging/mmdeploy_python/version.py +++ b/tools/package_tools/packaging/mmdeploy_python/version.py @@ -1,2 +1,2 @@ # Copyright (c) OpenMMLab. All rights reserved. -__version__ = '0.5.0' +__version__ = '0.7.0' diff --git a/tools/profile.py b/tools/profile.py new file mode 100644 index 000000000..d7c1cf980 --- /dev/null +++ b/tools/profile.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import glob +import os.path as osp + +import numpy as np +import torch +from mmcv import DictAction +from prettytable import PrettyTable + +from mmdeploy.apis import build_task_processor +from mmdeploy.utils import get_root_logger +from mmdeploy.utils.config_utils import (Backend, get_backend, get_input_shape, + load_config) +from mmdeploy.utils.timer import TimeCounter + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDeploy Model Latency Test Tool.') + parser.add_argument('deploy_cfg', help='Deploy config path') + parser.add_argument('model_cfg', help='Model config path') + parser.add_argument('image_dir', help='Input directory to image files') + parser.add_argument( + '--model', type=str, nargs='+', help='Input model files.') + parser.add_argument( + '--device', help='device type for inference', default='cuda:0') + parser.add_argument( + '--shape', + type=str, + help='Input shape to test in `HxW` format, e.g., `800x1344`', + default=None) + parser.add_argument( + '--warmup', + type=int, + help='warmup iterations before counting inference latency.', + default=10) + parser.add_argument( + '--num-iter', + type=int, + help='Number of iterations to run the inference.', + default=100) + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + + args = parser.parse_args() + return args + + +def get_images( + image_dir, + extensions=['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']): + images = [] + files = glob.glob(osp.join(image_dir, '**', '*'), recursive=True) + for f in files: + _, ext = osp.splitext(f) + if ext.lower() in extensions: + images.append(f) + return images + + +class TorchWrapper(torch.nn.Module): + + def __init__(self, model): + super(TorchWrapper, self).__init__() + + self.model = model + + @TimeCounter.count_time(Backend.PYTORCH.value) + def forward(self, *args, **kwargs): + return self.model(*args, return_loss=False, **kwargs) + + +def main(): + args = parse_args() + deploy_cfg_path = args.deploy_cfg + model_cfg_path = args.model_cfg + logger = get_root_logger() + # load deploy_cfg + deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path) + + # merge options for model cfg + if args.cfg_options is not None: + model_cfg.merge_from_dict(args.cfg_options) + + deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) + if args.shape is not None: + h, w = [int(_) for _ in args.shape.split('x')] + input_shape = [w, h] + else: + input_shape = get_input_shape(deploy_cfg) + assert input_shape is not None, 'Input_shape should not be None' + + # create model an inputs + task_processor = build_task_processor(model_cfg, deploy_cfg, args.device) + + model_ext = osp.splitext(args.model[0])[1] + is_pytorch = model_ext in ['.pth', '.pt'] + if is_pytorch: + # load pytorch model + model = task_processor.init_pytorch_model(args.model[0]) + model = TorchWrapper(model) + backend = Backend.PYTORCH.value + else: + # load the model of the backend + model = task_processor.init_backend_model(args.model) + backend = get_backend(deploy_cfg).value + + model = model.eval().to(args.device) + is_device_cpu = args.device == 'cpu' + with_sync = not is_device_cpu + if not is_device_cpu: + torch.backends.cudnn.benchmark = True + + image_files = get_images(args.image_dir) + nrof_image = len(image_files) + assert nrof_image > 0, f'No image files found in {args.image_dir}' + logger.info(f'Found totally {nrof_image} image files in {args.image_dir}') + total_iters = args.num_iter + args.warmup + if nrof_image < total_iters: + np.random.seed(1234) + image_files += [ + image_files[i] + for i in np.random.choice(nrof_image, total_iters - nrof_image) + ] + image_files = image_files[:total_iters] + with TimeCounter.activate( + warmup=args.warmup, log_interval=20, with_sync=with_sync): + for image in image_files: + data, _ = task_processor.create_input(image, input_shape) + model(**data) + + print('----- Settings:') + settings = PrettyTable() + settings.header = False + batch_size = 1 + settings.add_row(['batch size', batch_size]) + settings.add_row(['shape', f'{input_shape[1]}x{input_shape[0]}']) + settings.add_row(['iterations', args.num_iter]) + settings.add_row(['warmup', args.warmup]) + print(settings) + print('----- Results:') + TimeCounter.print_stats(backend) + + +if __name__ == '__main__': + main() diff --git a/tools/scripts/build_linux_nvidia.sh b/tools/scripts/build_linux_nvidia.sh index 082e88fed..f1861726b 100755 --- a/tools/scripts/build_linux_nvidia.sh +++ b/tools/scripts/build_linux_nvidia.sh @@ -23,7 +23,7 @@ MMDEPLOY_DIR=${WORKING_DIR} ##### # Versions -PPLCV_VER="0.6.2" +PPLCV_VER="0.7.0" CMAKE_VER="3.23.0" ##### @@ -337,7 +337,7 @@ pplcv() { # build mkdir build -p && cd build - cmake -DHPCC_USE_CUDA=ON -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} .. && make -j${processor_num} && sudo make install + cmake -DPPLCV_USE_CUDA=ON -DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} .. && make -j${processor_num} && sudo make install sudo ldconfig # generate prebuild and pack into .tar.gz diff --git a/tools/test.py b/tools/test.py index 6e276aa67..c656ba4d2 100644 --- a/tools/test.py +++ b/tools/test.py @@ -65,6 +65,17 @@ def parse_args(): help='the interval between each log, require setting ' 'speed-test first', default=100) + parser.add_argument( + '--batch-size', + type=int, + default=1, + help='the batch size for test, would override `samples_per_gpu`' + 'in data config.') + parser.add_argument( + '--uri', + action='store_true', + default='192.168.1.1:60000', + help='Remote ipv4:port or ipv6:port for inference on edge device.') args = parser.parse_args() return args diff --git a/tools/torch2onnx.py b/tools/torch2onnx.py index e13110353..1beffce9e 100644 --- a/tools/torch2onnx.py +++ b/tools/torch2onnx.py @@ -1,10 +1,13 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse import logging +import os import os.path as osp -from mmdeploy.apis import torch2onnx -from mmdeploy.utils import get_root_logger +from mmdeploy.apis import (extract_model, get_predefined_partition_cfg, + torch2onnx) +from mmdeploy.utils import (get_ir_config, get_partition_config, + get_root_logger, load_config) def parse_args(): @@ -13,7 +16,10 @@ def parse_args(): parser.add_argument('model_cfg', help='model config path') parser.add_argument('checkpoint', help='model checkpoint path') parser.add_argument('img', help='image used to convert model model') - parser.add_argument('output', help='output onnx path') + parser.add_argument( + '--work-dir', + default='./work-dir', + help='Directory to save output files.') parser.add_argument( '--device', help='device used for conversion', default='cpu') parser.add_argument( @@ -30,29 +36,49 @@ def main(): args = parse_args() logger = get_root_logger(log_level=args.log_level) - deploy_cfg_path = args.deploy_cfg - model_cfg_path = args.model_cfg - checkpoint_path = args.checkpoint - img = args.img - output_path = args.output - work_dir, save_file = osp.split(output_path) - device = args.device + logger.info(f'torch2onnx: \n\tmodel_cfg: {args.model_cfg} ' + f'\n\tdeploy_cfg: {args.deploy_cfg}') - logger.info(f'torch2onnx: \n\tmodel_cfg: {model_cfg_path} ' - f'\n\tdeploy_cfg: {deploy_cfg_path}') - try: - torch2onnx( - img, - work_dir, - save_file, - deploy_cfg=deploy_cfg_path, - model_cfg=model_cfg_path, - model_checkpoint=checkpoint_path, - device=device) - logger.info('torch2onnx success.') - except Exception as e: - logger.error(e) - logger.error('torch2onnx failed.') + os.makedirs(args.work_dir, exist_ok=True) + # load deploy_cfg + deploy_cfg = load_config(args.deploy_cfg)[0] + save_file = get_ir_config(deploy_cfg)['save_file'] + + torch2onnx( + args.img, + args.work_dir, + save_file, + deploy_cfg=args.deploy_cfg, + model_cfg=args.model_cfg, + model_checkpoint=args.checkpoint, + device=args.device) + + # partition model + partition_cfgs = get_partition_config(deploy_cfg) + + if partition_cfgs is not None: + if 'partition_cfg' in partition_cfgs: + partition_cfgs = partition_cfgs.get('partition_cfg', None) + else: + assert 'type' in partition_cfgs + partition_cfgs = get_predefined_partition_cfg( + deploy_cfg, partition_cfgs['type']) + + origin_ir_file = osp.join(args.work_dir, save_file) + for partition_cfg in partition_cfgs: + save_file = partition_cfg['save_file'] + save_path = osp.join(args.work_dir, save_file) + start = partition_cfg['start'] + end = partition_cfg['end'] + dynamic_axes = partition_cfg.get('dynamic_axes', None) + + extract_model( + origin_ir_file, + start, + end, + dynamic_axes=dynamic_axes, + save_file=save_path) + logger.info(f'torch2onnx finished. Results saved to {args.work_dir}') if __name__ == '__main__':