mmdeploy/csrc/apis/c/classifier.cpp
lzhangzz 46bfe0ac87
[Feature] New pipeline & executor for SDK (#497)
* executor prototype

* add split/when_all

* fix GCC build

* WIP let_value

* fix let_value

* WIP ensure_started

* ensure_started & start_detached

* fix let_value + when_all combo on MSVC 142

* fix static thread pool

* generic just, then, let_value, sync_wait

* minor

* generic split and when_all

* fully generic sender adapters

* when_all: workaround for GCC7

* support legacy spdlog

* fix memleak

* bulk

* static detector

* fix bulk & first pipeline

* bulk for static thread pools

* fix on MSVC

* WIP async batch submission

* WIP collation

* async batch

* fix detector

* fix async detector

* fix

* fix

* debug

* fix cuda allocator

* WIP type erased executor

* better type erasure

* simplify C API impl

* Expand & type erase TC

* deduction guide for type erased senders

* fix GCC build

* when_all for arrays of Value senders

* WIP pipeline v2

* WIP pipeline parser

* WIP timed batch operation

* add registry

* experiment

* fix pipeline

* naming

* fix mem-leak

* fix deferred batch operation

* WIP

* WIP configurable scheduler

* WIP configurable scheduler

* add comment

* parse scheduler config

* force link schedulers

* WIP pipeable sender

* WIP CPO

* ADL isolation and dismantle headers

* type erase single thread context

* fix MSVC build

* CPO

* replace decay_t with remove_cvref_t

* structure adjustment

* structure adjustment

* apply CPOs & C API rework

* refine C API

* detector async C API

* adjust detector async C API

* # Conflicts:
#	csrc/apis/c/detector.cpp

* fix when_all for type erased senders

* support void return for Then

* async detector

* fix some CPOs

* minor

* WIP rework capture mechanism for type erased types

* minor fix

* fix MSVC build

* move expand.h to execution

* make `Expand` pipeable

* fix type erased

* un-templatize `_TypeErasedOperation`

* re-work C API

* remove async_detector C API

* fix pipeline

* add flatten & unflatten

* fix flatten & unflatten

* add aync OCR demo

* config executor for nodes & better executor API

* working async OCR example

* minor

* dynamic batch via scheduler

* dynamic batch on `Value`

* fix MSVC build

* type erase dynamic batch scheduler

* sender as Python Awaitable

* naming

* naming

* add docs

* minor

* merge tmp branch

* unify C APIs

* fix ocr

* unify APIs

* fix typo

* update async OCR demo

* add v3 API text recognizer

* fix v3 API

* fix lint

* add license info & reformat

* add demo async_ocr_v2

* revert files

* revert files

* resolve link issues

* fix scheduler linkage for shared libs

* fix license header

* add docs for `mmdeploy_executor_split`

* add missing `mmdeploy_executor_transfer_just` and `mmdeploy_executor_execute`

* make `TimedSingleThreadContext` header only

* fix lint

* simplify type-erased sender
2022-06-01 14:10:43 +08:00

163 lines
5.0 KiB
C++

// Copyright (c) OpenMMLab. All rights reserved.
#include "classifier.h"
#include <numeric>
#include "apis/c/common_internal.h"
#include "apis/c/handle.h"
#include "apis/c/pipeline.h"
#include "archive/value_archive.h"
#include "codebase/mmcls/mmcls.h"
#include "core/device.h"
#include "core/graph.h"
#include "core/utils/formatter.h"
using namespace mmdeploy;
using namespace std;
namespace {
Value& config_template() {
// clang-format off
static Value v{
{
"pipeline", {
{"input", {"img"}},
{"output", {"cls"}},
{
"tasks", {
{
{"name", "classifier"},
{"type", "Inference"},
{"params", {{"model", "TBD"}}},
{"input", {"img"}},
{"output", {"cls"}}
}
}
}
}
}
};
// clang-format on
return v;
}
int mmdeploy_classifier_create_impl(mm_model_t model, const char* device_name, int device_id,
mmdeploy_exec_info_t exec_info, mm_handle_t* handle) {
auto config = config_template();
config["pipeline"]["tasks"][0]["params"]["model"] = *static_cast<Model*>(model);
return mmdeploy_pipeline_create(Cast(&config), device_name, device_id, exec_info, handle);
}
} // namespace
int mmdeploy_classifier_create(mm_model_t model, const char* device_name, int device_id,
mm_handle_t* handle) {
return mmdeploy_classifier_create_impl(model, device_name, device_id, nullptr, handle);
}
int mmdeploy_classifier_create_v2(mm_model_t model, const char* device_name, int device_id,
mmdeploy_exec_info_t exec_info, mm_handle_t* handle) {
return mmdeploy_classifier_create_impl(model, device_name, device_id, exec_info, handle);
}
int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name,
int device_id, mm_handle_t* handle) {
mm_model_t model{};
if (auto ec = mmdeploy_model_create_by_path(model_path, &model)) {
return ec;
}
auto ec = mmdeploy_classifier_create_impl(model, device_name, device_id, nullptr, handle);
mmdeploy_model_destroy(model);
return ec;
}
int mmdeploy_classifier_create_input(const mm_mat_t* mats, int mat_count, mmdeploy_value_t* value) {
return mmdeploy_common_create_input(mats, mat_count, value);
}
int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count,
mm_class_t** results, int** result_count) {
wrapped<mmdeploy_value_t> input;
if (auto ec = mmdeploy_classifier_create_input(mats, mat_count, input.ptr())) {
return ec;
}
wrapped<mmdeploy_value_t> output;
if (auto ec = mmdeploy_classifier_apply_v2(handle, input, output.ptr())) {
return ec;
}
if (auto ec = mmdeploy_classifier_get_result(output, results, result_count)) {
return ec;
}
return MM_SUCCESS;
}
int mmdeploy_classifier_apply_v2(mm_handle_t handle, mmdeploy_value_t input,
mmdeploy_value_t* output) {
return mmdeploy_pipeline_apply(handle, input, output);
}
int mmdeploy_classifier_apply_async(mm_handle_t handle, mmdeploy_sender_t input,
mmdeploy_sender_t* output) {
return mmdeploy_pipeline_apply_async(handle, input, output);
}
int mmdeploy_classifier_get_result(mmdeploy_value_t output, mm_class_t** results,
int** result_count) {
if (!output || !results || !result_count) {
return MM_E_INVALID_ARG;
}
try {
Value& value = Cast(output)->front();
auto classify_outputs = from_value<vector<mmcls::ClassifyOutput>>(value);
vector<int> _result_count;
_result_count.reserve(classify_outputs.size());
for (const auto& cls_output : classify_outputs) {
_result_count.push_back((int)cls_output.labels.size());
}
auto total = std::accumulate(begin(_result_count), end(_result_count), 0);
std::unique_ptr<int[]> result_count_data(new int[_result_count.size()]{});
std::copy(_result_count.begin(), _result_count.end(), result_count_data.get());
std::unique_ptr<mm_class_t[]> result_data(new mm_class_t[total]{});
auto result_ptr = result_data.get();
for (const auto& cls_output : classify_outputs) {
for (const auto& label : cls_output.labels) {
result_ptr->label_id = label.label_id;
result_ptr->score = label.score;
++result_ptr;
}
}
*result_count = result_count_data.release();
*results = result_data.release();
return MM_SUCCESS;
} catch (const std::exception& e) {
MMDEPLOY_ERROR("unhandled exception: {}", e.what());
} catch (...) {
MMDEPLOY_ERROR("unknown exception caught");
}
return MM_E_FAIL;
}
void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, int count) {
delete[] results;
delete[] result_count;
}
void mmdeploy_classifier_destroy(mm_handle_t handle) {
if (handle != nullptr) {
auto classifier = static_cast<AsyncHandle*>(handle);
delete classifier;
}
}