1
0
mirror of https://github.com/open-mmlab/mmdeploy.git synced 2025-01-14 08:09:43 +08:00

90 lines
3.1 KiB
C++
Raw Normal View History

Merge sdk (#251) * check in cmake * move backend_ops to csrc/backend_ops * check in preprocess, model, some codebase and their c-apis * check in CMakeLists.txt * check in parts of test_csrc * commit everything else * add readme * update core's BUILD_INTERFACE directory * skip codespell on third_party * update trt_net and ort_net's CMakeLists * ignore clion's build directory * check in pybind11 * add onnx.proto. Remove MMDeploy's dependency on ncnn's source code * export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON * remove useless message * target include directory is wrong * change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net * skip install directory * update project's cmake * remove useless code * set CMAKE_BUILD_TYPE to Release by force if it isn't set by user * update custom ops CMakeLists * pass object target's source lists * fix lint end-of-file * fix lint: trailing whitespace * fix codespell hook * remove bicubic_interpolate to csrc/backend_ops/ * set MMDEPLOY_BUILD_SDK OFF * change custom ops build command * add spdlog installation command * update docs on how to checkout pybind11 * move bicubic_interpolate to backend_ops/tensorrt directory * remove useless code * correct cmake * fix typo * fix typo * fix install directory * correct sdk's readme * set cub dir when cuda version < 11.0 * change directory where clang-format will apply to * fix build command * add .clang-format * change clang-format style from google to file * reformat csrc/backend_ops * format sdk's code * turn off clang-format for some files * add -Xcompiler=-fno-gnu-unique * fix trt topk initialize * check in config for sdk demo * update cmake script and csrc's readme * correct config's path * add cuda include directory, otherwise compile failed in case of tensorrt8.2 * clang-format onnx2ncnn.cpp Co-authored-by: zhangli <lzhang329@gmail.com> Co-authored-by: grimoire <yaoqian@sensetime.com>
2021-12-07 10:57:55 +08:00
// Copyright (c) OpenMMLab. All rights reserved.
#include <fstream>
// clang-format off
#include "catch.hpp"
// clang-format on
#include "archive/json_archive.h"
#include "core/mat.h"
#include "core/registry.h"
#include "core/utils/formatter.h"
#include "json.hpp"
#include "preprocess/cpu/opencv_utils.h"
#include "preprocess/transform/transform_utils.h"
#include "test_utils.h"
using namespace mmdeploy;
using namespace mmdeploy::test;
using namespace std;
using nlohmann::json;
void TestCpuCompose(const Value& cfg, const cv::Mat& mat) {
Device device{"cpu"};
Stream stream{device};
auto transform = CreateTransform(cfg, device, stream);
REQUIRE(transform != nullptr);
}
void TestCudaCompose(const Value& cfg, const cv::Mat& mat) {
Device device{"cuda"};
Stream stream{device};
auto transform = CreateTransform(cfg, device, stream);
REQUIRE(transform != nullptr);
}
TEST_CASE("compose", "[compose]") {
const char* img_path = "../../tests/data/images/ocr.jpg";
cv::Mat bgr_mat = cv::imread(img_path, cv::IMREAD_COLOR);
auto src_mat = cpu::CVMat2Mat(bgr_mat, PixelFormat::kBGR);
Value input{{"ori_img", src_mat}};
auto config_path{"../../config/text-detector/dbnet18_t4-cuda11.1-trt7.2-fp16/pipeline.json"};
ifstream ifs(config_path);
std::string config(istreambuf_iterator<char>{ifs}, istreambuf_iterator<char>{});
auto json = json::parse(config);
auto transform_json = json["pipeline"]["tasks"][0]["transforms"];
auto cfg = ::mmdeploy::from_json<Value>(transform_json);
Value compose_cfg{{"type", "Compose"}, {"transforms", cfg}};
INFO("cfg: {}", compose_cfg);
Device cpu_device{"cpu"};
Stream cpu_stream{cpu_device};
auto cpu_transform = CreateTransform(compose_cfg, cpu_device, cpu_stream);
REQUIRE(cpu_transform != nullptr);
auto cpu_result = cpu_transform->Process({{"ori_img", src_mat}});
REQUIRE(!cpu_result.has_error());
auto _cpu_result = cpu_result.value();
auto cpu_tensor = _cpu_result["img"].get<Tensor>();
INFO("cpu_tensor.shape: {}", cpu_tensor.shape());
cpu_tensor.Reshape(
{cpu_tensor.shape(0), cpu_tensor.shape(2), cpu_tensor.shape(3), cpu_tensor.shape(1)});
auto ref_mat = mmdeploy::cpu::Tensor2CVMat(cpu_tensor);
INFO("ref_mat, h:{}, w:{}, c:{}", ref_mat.rows, ref_mat.cols, ref_mat.channels());
Device cuda_device{"cuda"};
Stream cuda_stream{cuda_device};
auto gpu_transform = CreateTransform(compose_cfg, cuda_device, cuda_stream);
REQUIRE(gpu_transform != nullptr);
auto gpu_result = gpu_transform->Process({{"ori_img", src_mat}});
REQUIRE(!gpu_result.has_error());
auto _gpu_result = gpu_result.value();
auto gpu_tensor = _gpu_result["img"].get<Tensor>();
Device _device{"cpu"};
auto host_tensor = MakeAvailableOnDevice(gpu_tensor, _device, cuda_stream).value();
REQUIRE(cuda_stream.Wait());
INFO("host_tensor.shape: {}", host_tensor.shape());
host_tensor.Reshape(
{host_tensor.shape(0), host_tensor.shape(2), host_tensor.shape(3), host_tensor.shape(1)});
auto res_mat = mmdeploy::cpu::Tensor2CVMat(host_tensor);
INFO("res_mat, h:{}, w:{}, c:{}", res_mat.rows, res_mat.cols, res_mat.channels());
REQUIRE(mmdeploy::cpu::Compare(ref_mat, res_mat));
}