mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* check in cmake * move backend_ops to csrc/backend_ops * check in preprocess, model, some codebase and their c-apis * check in CMakeLists.txt * check in parts of test_csrc * commit everything else * add readme * update core's BUILD_INTERFACE directory * skip codespell on third_party * update trt_net and ort_net's CMakeLists * ignore clion's build directory * check in pybind11 * add onnx.proto. Remove MMDeploy's dependency on ncnn's source code * export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON * remove useless message * target include directory is wrong * change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net * skip install directory * update project's cmake * remove useless code * set CMAKE_BUILD_TYPE to Release by force if it isn't set by user * update custom ops CMakeLists * pass object target's source lists * fix lint end-of-file * fix lint: trailing whitespace * fix codespell hook * remove bicubic_interpolate to csrc/backend_ops/ * set MMDEPLOY_BUILD_SDK OFF * change custom ops build command * add spdlog installation command * update docs on how to checkout pybind11 * move bicubic_interpolate to backend_ops/tensorrt directory * remove useless code * correct cmake * fix typo * fix typo * fix install directory * correct sdk's readme * set cub dir when cuda version < 11.0 * change directory where clang-format will apply to * fix build command * add .clang-format * change clang-format style from google to file * reformat csrc/backend_ops * format sdk's code * turn off clang-format for some files * add -Xcompiler=-fno-gnu-unique * fix trt topk initialize * check in config for sdk demo * update cmake script and csrc's readme * correct config's path * add cuda include directory, otherwise compile failed in case of tensorrt8.2 * clang-format onnx2ncnn.cpp Co-authored-by: zhangli <lzhang329@gmail.com> Co-authored-by: grimoire <yaoqian@sensetime.com>
110 lines
4.2 KiB
C++
110 lines
4.2 KiB
C++
// Copyright (c) OpenMMLab. All rights reserved.
|
|
|
|
#include "codebase/mmseg/mmseg.h"
|
|
#include "core/tensor.h"
|
|
#include "core/utils/formatter.h"
|
|
#include "preprocess/transform/transform.h"
|
|
#include "preprocess/transform/transform_utils.h"
|
|
|
|
namespace mmdeploy::mmseg {
|
|
|
|
static Result<void> VisualizeMask(const std::string &image_name, const Tensor &mask, int height,
|
|
int width, Stream &stream) {
|
|
Device cpu_device{"cpu"};
|
|
OUTCOME_TRY(auto host_mask, MakeAvailableOnDevice(mask, cpu_device, stream));
|
|
OUTCOME_TRY(stream.Wait());
|
|
// cv::Mat mask_image(height, width, CV_32SC1, host_mask.data<int>());
|
|
// cv::imwrite(image_name + ".png", mask_image * 10);
|
|
// ofstream ofs(image_name + ".data");
|
|
// auto _data_ptr = host_mask.data<int>();
|
|
// for (auto i = 0; i < height; ++i) {
|
|
// for (auto j = 0; j < width; ++j) {
|
|
// ofs << *_data_ptr++ << ", ";
|
|
// }
|
|
// ofs << "\n";
|
|
// }
|
|
return success();
|
|
}
|
|
|
|
class Segmentor : public MMSegPostprocess {
|
|
public:
|
|
explicit Segmentor(const Value &cfg) : MMSegPostprocess(cfg) {
|
|
classes_ = cfg["params"]["classes"].get<int>();
|
|
if (classes_ >= 256) {
|
|
throw_exception(eNotSupported);
|
|
}
|
|
}
|
|
|
|
Result<Value> operator()(const Value &preprocess_result, const Value &inference_result) {
|
|
DEBUG("preprocess: {}\ninference: {}", preprocess_result, inference_result);
|
|
// Value res;
|
|
// res = preprocess_result;
|
|
|
|
auto mask = inference_result["mask"].get<Tensor>();
|
|
INFO("tensor.name: {}, tensor.shape: {}", mask.name(), mask.shape());
|
|
assert(mask.data_type() == DataType::kINT32);
|
|
assert(mask.shape(0) == 1);
|
|
assert(mask.shape(1) == 1);
|
|
|
|
auto height = mask.shape(2);
|
|
auto width = mask.shape(3);
|
|
|
|
// Resize mask back to the size of the input image.
|
|
auto input_height = preprocess_result["img_metas"]["ori_shape"][1].get<int>();
|
|
auto input_width = preprocess_result["img_metas"]["ori_shape"][2].get<int>();
|
|
auto keep_ratio = preprocess_result["img_metas"]["keep_ratio"].get<bool>();
|
|
|
|
// Construct transform op 'Resize'
|
|
Value resize_cfg{{"type", "Resize"}, {"interpolation", "nearest"}};
|
|
resize_cfg["context"]["device"] = device_;
|
|
resize_cfg["context"]["stream"] = stream_;
|
|
resize_cfg["size"].push_back(input_width);
|
|
resize_cfg["size"].push_back(input_height);
|
|
resize_cfg["keep_ratio"] = keep_ratio;
|
|
DEBUG("resize_cfg: {}", resize_cfg);
|
|
|
|
// Create 'Resize' transform operator and resize the mask
|
|
auto creator = Registry<Transform>::Get().GetCreator("Resize");
|
|
assert(creator != nullptr);
|
|
auto transform = creator->Create(resize_cfg);
|
|
assert(transform != nullptr);
|
|
|
|
// change from (int32_t / 1 channel) to (int8_t / 4 channel), cuz ppl.cv doesn't support
|
|
// 'Resize<int>'
|
|
TensorShape char4_mask_shape{mask.shape(0), 4, height, width};
|
|
TensorDesc desc{device_, DataType::kINT8, char4_mask_shape, mask.name()};
|
|
Tensor char4_mask(desc, mask.buffer());
|
|
// `Resize` transform op requires {1, h, w, c}, therefore `char4_mask` needs to be reshaped
|
|
char4_mask.Reshape({1, height, width, 4});
|
|
|
|
// Do `Resize`
|
|
auto char4_resize_mask = transform->Process({{"img", char4_mask}});
|
|
assert(!char4_resize_mask.has_error());
|
|
|
|
auto _char4_resize_mask = char4_resize_mask.value();
|
|
auto _char4_resize_mask_tensor = _char4_resize_mask["img"].get<Tensor>();
|
|
assert(_char4_resize_mask_tensor.shape(1) == input_height);
|
|
assert(_char4_resize_mask_tensor.shape(2) == input_width);
|
|
|
|
// change tensor's shape from (int8_4/char4) to (int32_t)
|
|
TensorShape int_resize_mask_shape{1, 1, input_height, input_width};
|
|
TensorDesc int_resize_mask_desc{_char4_resize_mask_tensor.device(), DataType::kINT32,
|
|
int_resize_mask_shape, _char4_resize_mask_tensor.name()};
|
|
Tensor _int_resize_mask_tensor{int_resize_mask_desc, _char4_resize_mask_tensor.buffer()};
|
|
|
|
SegmentorOutput output{_int_resize_mask_tensor, input_height, input_width, classes_};
|
|
|
|
// OUTCOME_TRY(
|
|
// VisualizeMask("resize_mask", _int_resize_mask_tensor, input_height, input_width,
|
|
// stream_));
|
|
return to_value(output);
|
|
}
|
|
|
|
protected:
|
|
int classes_{};
|
|
};
|
|
|
|
REGISTER_CODEBASE_MODULE(MMSegPostprocess, Segmentor);
|
|
|
|
} // namespace mmdeploy::mmseg
|