mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* fix pose demo and windows build (#307) * init * Update nms_rotated.cpp * add postprocessing_masks gpu version (#276) * add postprocessing_masks gpu version * default device cpu * pre-commit fix Co-authored-by: hadoop-basecv <hadoop-basecv@set-gh-basecv-serving-classify11.mt> * fixed a bug causes text-recognizer to fail when (non-NULL) empty bboxes list is passed (#310) * [Fix] include missing <type_traits> for formatter.h (#313) * fix formatter * relax GCC version requirement * fix * fix lint * fix lint * [Fix] MMEditing cannot save results when testing (#336) * fix show * lint * remove redundant codes * resolve comment * type hint * docs(build): fix typo (#352) * docs(build): add missing build option * docs(build): add onnx install * style(doc): trim whitespace * docs(build): revert install onnx * docs(build): add ncnn LD_LIBRARY_PATH * docs(build): fix path error * fix openvino export tmp model, add binary flag (#353) * init circleci (#348) * fix wrong input mat type (#362) * fix wrong input mat type * fix lint * fix(docs): remove redundant doc tree (#360) * fix missing ncnn_DIR & InferenceEngine_DIR (#364) * update doc Co-authored-by: Chen Xin <xinchen.tju@gmail.com> Co-authored-by: Shengxi Li <982783556@qq.com> Co-authored-by: hadoop-basecv <hadoop-basecv@set-gh-basecv-serving-classify11.mt> Co-authored-by: lzhangzz <lzhang329@gmail.com> Co-authored-by: Yifan Zhou <singlezombie@163.com> Co-authored-by: tpoisonooo <khj.application@aliyun.com> Co-authored-by: lvhan028 <lvhan_028@163.com>
49 lines
1.2 KiB
C++
49 lines
1.2 KiB
C++
// Copyright (c) OpenMMLab. All rights reserved.
|
|
#ifndef ONNXRUNTIME_NMS_ROTATED_H
|
|
#define ONNXRUNTIME_NMS_ROTATED_H
|
|
|
|
#include <assert.h>
|
|
#include <onnxruntime_cxx_api.h>
|
|
|
|
#include <cmath>
|
|
#include <mutex>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
namespace mmdeploy {
|
|
struct NMSRotatedKernel {
|
|
NMSRotatedKernel(OrtApi api, const OrtKernelInfo* info);
|
|
|
|
void Compute(OrtKernelContext* context);
|
|
|
|
private:
|
|
OrtApi api_;
|
|
Ort::CustomOpApi ort_;
|
|
const OrtKernelInfo* info_;
|
|
Ort::AllocatorWithDefaultOptions allocator_;
|
|
float iou_threshold_;
|
|
};
|
|
|
|
struct NMSRotatedOp : Ort::CustomOpBase<NMSRotatedOp, NMSRotatedKernel> {
|
|
void* CreateKernel(OrtApi api, const OrtKernelInfo* info) const {
|
|
return new NMSRotatedKernel(api, info);
|
|
}
|
|
const char* GetName() const { return "NMSRotated"; }
|
|
|
|
size_t GetInputTypeCount() const { return 2; }
|
|
ONNXTensorElementDataType GetInputType(size_t) const {
|
|
return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT;
|
|
}
|
|
|
|
size_t GetOutputTypeCount() const { return 1; }
|
|
ONNXTensorElementDataType GetOutputType(size_t) const {
|
|
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
|
|
}
|
|
|
|
// force cpu
|
|
const char* GetExecutionProviderType() const { return "CPUExecutionProvider"; }
|
|
};
|
|
} // namespace mmdeploy
|
|
|
|
#endif // ONNXRUNTIME_NMS_ROTATED_H
|