mmdeploy/csrc/apis/c/detector.cpp
lvhan028 36124f6205
Merge sdk (#251)
* check in cmake

* move backend_ops to csrc/backend_ops

* check in preprocess, model, some codebase and their c-apis

* check in CMakeLists.txt

* check in parts of test_csrc

* commit everything else

* add readme

* update core's BUILD_INTERFACE directory

* skip codespell on third_party

* update trt_net and ort_net's CMakeLists

* ignore clion's build directory

* check in pybind11

* add onnx.proto. Remove MMDeploy's dependency on ncnn's source code

* export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON

* remove useless message

* target include directory is wrong

* change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net

* skip install directory

* update project's cmake

* remove useless code

* set CMAKE_BUILD_TYPE to Release by force if it isn't set by user

* update custom ops CMakeLists

* pass object target's source lists

* fix lint end-of-file

* fix lint: trailing whitespace

* fix codespell hook

* remove bicubic_interpolate to csrc/backend_ops/

* set MMDEPLOY_BUILD_SDK OFF

* change custom ops build command

* add spdlog installation command

* update docs on how to checkout pybind11

* move bicubic_interpolate to backend_ops/tensorrt directory

* remove useless code

* correct cmake

* fix typo

* fix typo

* fix install directory

* correct sdk's readme

* set cub dir when cuda version < 11.0

* change directory where clang-format will apply to

* fix build command

* add .clang-format

* change clang-format style from google to file

* reformat csrc/backend_ops

* format sdk's code

* turn off clang-format for some files

* add -Xcompiler=-fno-gnu-unique

* fix trt topk initialize

* check in config for sdk demo

* update cmake script and csrc's readme

* correct config's path

* add cuda include directory, otherwise compile failed in case of tensorrt8.2

* clang-format onnx2ncnn.cpp

Co-authored-by: zhangli <lzhang329@gmail.com>
Co-authored-by: grimoire <yaoqian@sensetime.com>
2021-12-07 10:57:55 +08:00

148 lines
4.3 KiB
C++

// Copyright (c) OpenMMLab. All rights reserved.
#include "detector.h"
#include <numeric>
#include "archive/value_archive.h"
#include "codebase/mmdet/mmdet.h"
#include "core/device.h"
#include "core/graph.h"
#include "core/mat.h"
#include "core/utils/formatter.h"
#include "handle.h"
using namespace std;
using namespace mmdeploy;
namespace {
Value& config_template() {
// clang-format off
static Value v{
{
"pipeline", {
{"input", {"image"}},
{"output", {"det"}},
{
"tasks",{
{
{"name", "mmdetection"},
{"type", "Inference"},
{"params", {{"model", "TBD"}}},
{"input", {"image"}},
{"output", {"det"}}
}
}
}
}
}
};
// clang-format on
return v;
}
template <class ModelType>
int mmdeploy_detector_create_impl(ModelType&& m, const char* device_name, int device_id,
mm_handle_t* handle) {
try {
auto value = config_template();
value["pipeline"]["tasks"][0]["params"]["model"] = std::forward<ModelType>(m);
auto detector = std::make_unique<Handle>(device_name, device_id, std::move(value));
*handle = detector.release();
return MM_SUCCESS;
} catch (const std::exception& e) {
ERROR("exception caught: {}", e.what());
} catch (...) {
ERROR("unknown exception caught");
}
return MM_E_FAIL;
}
} // namespace
MM_SDK_API int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id,
mm_handle_t* handle) {
return mmdeploy_detector_create_impl(*static_cast<Model*>(model), device_name, device_id, handle);
}
MM_SDK_API int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name,
int device_id, mm_handle_t* handle) {
return mmdeploy_detector_create_impl(model_path, device_name, device_id, handle);
}
MM_SDK_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count,
mm_detect_t** results, int** result_count) {
if (handle == nullptr || mats == nullptr || mat_count == 0) {
return MM_E_INVALID_ARG;
}
try {
auto detector = static_cast<Handle*>(handle);
Value input{Value::kArray};
for (int i = 0; i < mat_count; ++i) {
mmdeploy::Mat _mat{mats[i].height, mats[i].width, PixelFormat(mats[i].format),
DataType(mats->type), mats[i].data, Device{"cpu"}};
input.front().push_back({{"ori_img", _mat}});
}
auto output = detector->Run(std::move(input)).value().front();
ERROR("output: {}", output);
auto detector_outputs = from_value<vector<mmdet::DetectorOutput>>(output);
vector<int> _result_count;
_result_count.reserve(mat_count);
for (const auto& det_output : detector_outputs) {
_result_count.push_back((int)det_output.detections.size());
}
auto total = std::accumulate(_result_count.begin(), _result_count.end(), 0);
std::unique_ptr<int[]> result_count_data(new int[_result_count.size()]{});
std::copy(_result_count.begin(), _result_count.end(), result_count_data.get());
std::unique_ptr<mm_detect_t[]> result_data(new mm_detect_t[total]{});
auto result_ptr = result_data.get();
for (const auto& det_output : detector_outputs) {
for (const auto& detection : det_output.detections) {
result_ptr->label_id = detection.label_id;
result_ptr->score = detection.score;
const auto& bbox = detection.bbox;
result_ptr->bbox = {(int)bbox[0], (int)bbox[1], (int)bbox[2], (int)bbox[3]};
++result_ptr;
}
}
*result_count = result_count_data.release();
*results = result_data.release();
return MM_SUCCESS;
} catch (const std::exception& e) {
ERROR("exception caught: {}", e.what());
} catch (...) {
ERROR("unknown exception caught");
}
return MM_E_FAIL;
}
MM_SDK_API void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count,
int count) {
delete[] results;
delete[] result_count;
}
MM_SDK_API void mmdeploy_detector_destroy(mm_handle_t handle) {
if (handle != nullptr) {
auto detector = static_cast<Handle*>(handle);
delete detector;
}
}