mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* minor changes * support windows * fix GCC build * fix lint * reformat * fix Windows build * fix GCC build * search backend ops for onnxruntime * fix lint * fix lint * code clean-up * code clean-up * fix clang build * fix trt support * fix cmake for ncnn * fix cmake for openvino * fix SDK Python API * handle ops for other backends (ncnn, trt) * handle SDK Python API library location * robustify linkage * fix cuda * minor fix for openvino & ncnn * use CMAKE_CUDA_ARCHITECTURES if set * fix cuda preprocessor * fix misc * fix pplnn & pplcv, drop support for pplcv<0.6.0 * robustify cmake * update build.md (#2) * build dynamic modules as module library & fix demo (partially) * fix candidate path for mmdeploy_python * move "enable CUDA" to cmake config for demo * refine demo cmake * add comment * fix ubuntu build * revert docs/en/build.md * fix C API * fix lint * Windows build doc (#3) * check in docs related to mmdeploy build on windows * update build guide on windows platform * update build guide on windows platform * make path of thirdparty libraries consistent * make path consistency * correct build command for custom ops * correct build command for sdk * update sdk build instructions * update doc * correct build command * fix lint * correct build command and fix lint Co-authored-by: lvhan <lvhan@pjlab.org> * trailing whitespace (#4) * minor fix * fix sr sdk model * fix type deduction * fix cudaFree after driver shutting down * update ppl.cv installation warning (#5) * fix device allocator threshold & fix lint * update doc (#6) * update ppl.cv installation warning * missing 'git clone' Co-authored-by: chenxin <chenxin2@sensetime.com> Co-authored-by: zhangli <zhangli@sensetime.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: lvhan <lvhan@pjlab.org>
47 lines
1.2 KiB
C++
47 lines
1.2 KiB
C++
#include <fstream>
|
|
#include <opencv2/imgcodecs/imgcodecs.hpp>
|
|
#include <string>
|
|
|
|
#include "classifier.h"
|
|
|
|
int main(int argc, char *argv[]) {
|
|
if (argc != 4) {
|
|
fprintf(stderr, "usage:\n image_classification device_name model_path image_path\n");
|
|
return 1;
|
|
}
|
|
auto device_name = argv[1];
|
|
auto model_path = argv[2];
|
|
auto image_path = argv[3];
|
|
cv::Mat img = cv::imread(image_path);
|
|
if (!img.data) {
|
|
fprintf(stderr, "failed to load image: %s\n", image_path);
|
|
return 1;
|
|
}
|
|
|
|
mm_handle_t classifier{};
|
|
int status{};
|
|
status = mmdeploy_classifier_create_by_path(model_path, device_name, 0, &classifier);
|
|
if (status != MM_SUCCESS) {
|
|
fprintf(stderr, "failed to create classifier, code: %d\n", (int)status);
|
|
return 1;
|
|
}
|
|
|
|
mm_mat_t mat{img.data, img.rows, img.cols, 3, MM_BGR, MM_INT8};
|
|
|
|
mm_class_t *res{};
|
|
int *res_count{};
|
|
status = mmdeploy_classifier_apply(classifier, &mat, 1, &res, &res_count);
|
|
if (status != MM_SUCCESS) {
|
|
fprintf(stderr, "failed to apply classifier, code: %d\n", (int)status);
|
|
return 1;
|
|
}
|
|
|
|
fprintf(stderr, "label: %d, score: %.4f\n", res->label_id, res->score);
|
|
|
|
mmdeploy_classifier_release_result(res, res_count, 1);
|
|
|
|
mmdeploy_classifier_destroy(classifier);
|
|
|
|
return 0;
|
|
}
|