mmdeploy/csrc/apis/c/classifier.h
lzhangzz 46bfe0ac87
[Feature] New pipeline & executor for SDK (#497)
* executor prototype

* add split/when_all

* fix GCC build

* WIP let_value

* fix let_value

* WIP ensure_started

* ensure_started & start_detached

* fix let_value + when_all combo on MSVC 142

* fix static thread pool

* generic just, then, let_value, sync_wait

* minor

* generic split and when_all

* fully generic sender adapters

* when_all: workaround for GCC7

* support legacy spdlog

* fix memleak

* bulk

* static detector

* fix bulk & first pipeline

* bulk for static thread pools

* fix on MSVC

* WIP async batch submission

* WIP collation

* async batch

* fix detector

* fix async detector

* fix

* fix

* debug

* fix cuda allocator

* WIP type erased executor

* better type erasure

* simplify C API impl

* Expand & type erase TC

* deduction guide for type erased senders

* fix GCC build

* when_all for arrays of Value senders

* WIP pipeline v2

* WIP pipeline parser

* WIP timed batch operation

* add registry

* experiment

* fix pipeline

* naming

* fix mem-leak

* fix deferred batch operation

* WIP

* WIP configurable scheduler

* WIP configurable scheduler

* add comment

* parse scheduler config

* force link schedulers

* WIP pipeable sender

* WIP CPO

* ADL isolation and dismantle headers

* type erase single thread context

* fix MSVC build

* CPO

* replace decay_t with remove_cvref_t

* structure adjustment

* structure adjustment

* apply CPOs & C API rework

* refine C API

* detector async C API

* adjust detector async C API

* # Conflicts:
#	csrc/apis/c/detector.cpp

* fix when_all for type erased senders

* support void return for Then

* async detector

* fix some CPOs

* minor

* WIP rework capture mechanism for type erased types

* minor fix

* fix MSVC build

* move expand.h to execution

* make `Expand` pipeable

* fix type erased

* un-templatize `_TypeErasedOperation`

* re-work C API

* remove async_detector C API

* fix pipeline

* add flatten & unflatten

* fix flatten & unflatten

* add aync OCR demo

* config executor for nodes & better executor API

* working async OCR example

* minor

* dynamic batch via scheduler

* dynamic batch on `Value`

* fix MSVC build

* type erase dynamic batch scheduler

* sender as Python Awaitable

* naming

* naming

* add docs

* minor

* merge tmp branch

* unify C APIs

* fix ocr

* unify APIs

* fix typo

* update async OCR demo

* add v3 API text recognizer

* fix v3 API

* fix lint

* add license info & reformat

* add demo async_ocr_v2

* revert files

* revert files

* resolve link issues

* fix scheduler linkage for shared libs

* fix license header

* add docs for `mmdeploy_executor_split`

* add missing `mmdeploy_executor_transfer_just` and `mmdeploy_executor_execute`

* make `TimedSingleThreadContext` header only

* fix lint

* simplify type-erased sender
2022-06-01 14:10:43 +08:00

131 lines
5.1 KiB
C

// Copyright (c) OpenMMLab. All rights reserved.
/**
* @file classifier.h
* @brief Interface to MMClassification task
*/
#ifndef MMDEPLOY_CLASSIFIER_H
#define MMDEPLOY_CLASSIFIER_H
#include "common.h"
#include "executor.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct mm_class_t {
int label_id;
float score;
} mm_class_t;
/**
* @brief Create classifier's handle
* @param[in] model an instance of mmclassification sdk model created by
* \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
* @param[in] device_id id of device.
* @param[out] handle instance of a classifier, which must be destroyed
* by \ref mmdeploy_classifier_destroy
* @return status of creating classifier's handle
*/
MMDEPLOY_API int mmdeploy_classifier_create(mm_model_t model, const char* device_name,
int device_id, mm_handle_t* handle);
/**
* @brief Create classifier's handle
* @param[in] model_path path of mmclassification sdk model exported by mmdeploy model converter
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
* @param[in] device_id id of device.
* @param[out] handle instance of a classifier, which must be destroyed
* by \ref mmdeploy_classifier_destroy
* @return status of creating classifier's handle
*/
MMDEPLOY_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name,
int device_id, mm_handle_t* handle);
/**
* @brief Use classifier created by \ref mmdeploy_classifier_create_by_path to get label
* information of each image in a batch
* @param[in] handle classifier's handle created by \ref mmdeploy_classifier_create_by_path
* @param[in] mats a batch of images
* @param[in] mat_count number of images in the batch
* @param[out] results a linear buffer to save classification results of each
* image, which must be freed by \ref mmdeploy_classifier_release_result
* @param[out] result_count a linear buffer with length being \p mat_count to save the number of
* classification results of each image. It must be released by \ref
* mmdeploy_classifier_release_result
* @return status of inference
*/
MMDEPLOY_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count,
mm_class_t** results, int** result_count);
/**
* @brief Release the inference result buffer created \ref mmdeploy_classifier_apply
* @param[in] results classification results buffer
* @param[in] result_count \p results size buffer
* @param[in] count length of \p result_count
*/
MMDEPLOY_API void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count,
int count);
/**
* @brief Destroy classifier's handle
* @param[in] handle classifier's handle created by \ref mmdeploy_classifier_create_by_path
*/
MMDEPLOY_API void mmdeploy_classifier_destroy(mm_handle_t handle);
/**
* @brief Same as \ref mmdeploy_classifier_create, but allows to control execution context of tasks
* via exec_info
*/
MMDEPLOY_API int mmdeploy_classifier_create_v2(mm_model_t model, const char* device_name,
int device_id, mmdeploy_exec_info_t exec_info,
mm_handle_t* handle);
/**
* @brief Pack classifier inputs into mmdeploy_value_t
* @param[in] mats a batch of images
* @param[in] mat_count number of images in the batch
* @param[out] value the packed value
* @return status of the operation
*/
MMDEPLOY_API int mmdeploy_classifier_create_input(const mm_mat_t* mats, int mat_count,
mmdeploy_value_t* value);
/**
* @brief Same as \ref mmdeploy_classifier_apply, but input and output are packed in \ref
* mmdeploy_value_t.
*/
MMDEPLOY_API int mmdeploy_classifier_apply_v2(mm_handle_t handle, mmdeploy_value_t input,
mmdeploy_value_t* output);
/**
* @brief Apply classifier asynchronously
* @param[in] handle handle of the classifier
* @param[in] input input sender that will be consumed by the operation
* @param[out] output output sender
* @return status of the operation
*/
MMDEPLOY_API int mmdeploy_classifier_apply_async(mm_handle_t handle, mmdeploy_sender_t input,
mmdeploy_sender_t* output);
/**
*
* @param[in] output output obtained by applying a classifier
* @param[out] results a linear buffer containing classification results of each image, released by
* \ref mmdeploy_classifier_release_result
* @param[out] result_count a linear buffer containing the number of results for each input image,
* released by \ref mmdeploy_classifier_release_result
* @return status of the operation
*/
MMDEPLOY_API int mmdeploy_classifier_get_result(mmdeploy_value_t output, mm_class_t** results,
int** result_count);
#ifdef __cplusplus
}
#endif
#endif // MMDEPLOY_CLASSIFIER_H