mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* minor changes * support windows * fix GCC build * fix lint * reformat * fix Windows build * fix GCC build * search backend ops for onnxruntime * fix lint * fix lint * code clean-up * code clean-up * fix clang build * fix trt support * fix cmake for ncnn * fix cmake for openvino * fix SDK Python API * handle ops for other backends (ncnn, trt) * handle SDK Python API library location * robustify linkage * fix cuda * minor fix for openvino & ncnn * use CMAKE_CUDA_ARCHITECTURES if set * fix cuda preprocessor * fix misc * fix pplnn & pplcv, drop support for pplcv<0.6.0 * robustify cmake * update build.md (#2) * build dynamic modules as module library & fix demo (partially) * fix candidate path for mmdeploy_python * move "enable CUDA" to cmake config for demo * refine demo cmake * add comment * fix ubuntu build * revert docs/en/build.md * fix C API * fix lint * Windows build doc (#3) * check in docs related to mmdeploy build on windows * update build guide on windows platform * update build guide on windows platform * make path of thirdparty libraries consistent * make path consistency * correct build command for custom ops * correct build command for sdk * update sdk build instructions * update doc * correct build command * fix lint * correct build command and fix lint Co-authored-by: lvhan <lvhan@pjlab.org> * trailing whitespace (#4) * minor fix * fix sr sdk model * fix type deduction * fix cudaFree after driver shutting down * update ppl.cv installation warning (#5) * fix device allocator threshold & fix lint * update doc (#6) * update ppl.cv installation warning * missing 'git clone' Co-authored-by: chenxin <chenxin2@sensetime.com> Co-authored-by: zhangli <zhangli@sensetime.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: lvhan <lvhan@pjlab.org>
83 lines
3.1 KiB
C
83 lines
3.1 KiB
C
// Copyright (c) OpenMMLab. All rights reserved.
|
|
|
|
/**
|
|
* @file classifier.h
|
|
* @brief Interface to MMClassification task
|
|
*/
|
|
|
|
#ifndef MMDEPLOY_CLASSIFIER_H
|
|
#define MMDEPLOY_CLASSIFIER_H
|
|
|
|
#include "common.h"
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
typedef struct mm_class_t {
|
|
int label_id;
|
|
float score;
|
|
} mm_class_t;
|
|
|
|
/**
|
|
* @brief Create classifier's handle
|
|
* @param[in] model an instance of mmclassification sdk model created by
|
|
* \ref mmdeploy_model_create_by_path or \ref mmdeploy_model_create in \ref model.h
|
|
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
|
|
* @param[in] device_id id of device.
|
|
* @param[out] handle instance of a classifier, which must be destroyed
|
|
* by \ref mmdeploy_classifier_destroy
|
|
* @return status of creating classifier's handle
|
|
*/
|
|
MMDEPLOY_API int mmdeploy_classifier_create(mm_model_t model, const char* device_name,
|
|
int device_id, mm_handle_t* handle);
|
|
|
|
/**
|
|
* @brief Create classifier's handle
|
|
* @param[in] model_path path of mmclassification sdk model exported by mmdeploy model converter
|
|
* @param[in] device_name name of device, such as "cpu", "cuda", etc.
|
|
* @param[in] device_id id of device.
|
|
* @param[out] handle instance of a classifier, which must be destroyed
|
|
* by \ref mmdeploy_classifier_destroy
|
|
* @return status of creating classifier's handle
|
|
*/
|
|
MMDEPLOY_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name,
|
|
int device_id, mm_handle_t* handle);
|
|
|
|
/**
|
|
* @brief Use classifier created by \ref mmdeploy_classifier_create_by_path to get label
|
|
* information of each image in a batch
|
|
* @param[in] handle classifier's handle created by \ref mmdeploy_classifier_create_by_path
|
|
* @param[in] mats a batch of images
|
|
* @param[in] mat_count number of images in the batch
|
|
* @param[out] results a linear buffer to save classification results of each
|
|
* image, which must be freed by \ref mmdeploy_classifier_release_result
|
|
* @param[out] result_count a linear buffer with length being \p mat_count to save the number of
|
|
* classification results of each image. It must be released by \ref
|
|
* mmdeploy_classifier_release_result
|
|
* @return status of inference
|
|
*/
|
|
MMDEPLOY_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count,
|
|
mm_class_t** results, int** result_count);
|
|
|
|
/**
|
|
* @brief Release the inference result buffer created \ref mmdeploy_classifier_apply
|
|
* @param[in] results classification results buffer
|
|
* @param[in] result_count \p results size buffer
|
|
* @param[in] count length of \p result_count
|
|
*/
|
|
MMDEPLOY_API void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count,
|
|
int count);
|
|
|
|
/**
|
|
* @brief Destroy classifier's handle
|
|
* @param[in] handle classifier's handle created by \ref mmdeploy_classifier_create_by_path
|
|
*/
|
|
MMDEPLOY_API void mmdeploy_classifier_destroy(mm_handle_t handle);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
#endif // MMDEPLOY_CLASSIFIER_H
|