mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* executor prototype * add split/when_all * fix GCC build * WIP let_value * fix let_value * WIP ensure_started * ensure_started & start_detached * fix let_value + when_all combo on MSVC 142 * fix static thread pool * generic just, then, let_value, sync_wait * minor * generic split and when_all * fully generic sender adapters * when_all: workaround for GCC7 * support legacy spdlog * fix memleak * bulk * static detector * fix bulk & first pipeline * bulk for static thread pools * fix on MSVC * WIP async batch submission * WIP collation * async batch * fix detector * fix async detector * fix * fix * debug * fix cuda allocator * WIP type erased executor * better type erasure * simplify C API impl * Expand & type erase TC * deduction guide for type erased senders * fix GCC build * when_all for arrays of Value senders * WIP pipeline v2 * WIP pipeline parser * WIP timed batch operation * add registry * experiment * fix pipeline * naming * fix mem-leak * fix deferred batch operation * WIP * WIP configurable scheduler * WIP configurable scheduler * add comment * parse scheduler config * force link schedulers * WIP pipeable sender * WIP CPO * ADL isolation and dismantle headers * type erase single thread context * fix MSVC build * CPO * replace decay_t with remove_cvref_t * structure adjustment * structure adjustment * apply CPOs & C API rework * refine C API * detector async C API * adjust detector async C API * # Conflicts: # csrc/apis/c/detector.cpp * fix when_all for type erased senders * support void return for Then * async detector * fix some CPOs * minor * WIP rework capture mechanism for type erased types * minor fix * fix MSVC build * move expand.h to execution * make `Expand` pipeable * fix type erased * un-templatize `_TypeErasedOperation` * re-work C API * remove async_detector C API * fix pipeline * add flatten & unflatten * fix flatten & unflatten * add aync OCR demo * config executor for nodes & better executor API * working async OCR example * minor * dynamic batch via scheduler * dynamic batch on `Value` * fix MSVC build * type erase dynamic batch scheduler * sender as Python Awaitable * naming * naming * add docs * minor * merge tmp branch * unify C APIs * fix ocr * unify APIs * fix typo * update async OCR demo * add v3 API text recognizer * fix v3 API * fix lint * add license info & reformat * add demo async_ocr_v2 * revert files * revert files * resolve link issues * fix scheduler linkage for shared libs * fix license header * add docs for `mmdeploy_executor_split` * add missing `mmdeploy_executor_transfer_just` and `mmdeploy_executor_execute` * make `TimedSingleThreadContext` header only * fix lint * simplify type-erased sender
97 lines
3.3 KiB
C++
97 lines
3.3 KiB
C++
// Copyright (c) OpenMMLab. All rights reserved.
|
|
|
|
#include "codebase/mmseg/mmseg.h"
|
|
#include "core/tensor.h"
|
|
#include "core/utils/device_utils.h"
|
|
#include "core/utils/formatter.h"
|
|
#include "opencv_utils.h"
|
|
#include "preprocess/transform/transform.h"
|
|
|
|
namespace mmdeploy::mmseg {
|
|
|
|
class ResizeMask : public MMSegmentation {
|
|
public:
|
|
explicit ResizeMask(const Value &cfg) : MMSegmentation(cfg) {
|
|
try {
|
|
classes_ = cfg["params"]["num_classes"].get<int>();
|
|
little_endian_ = IsLittleEndian();
|
|
} catch (const std::exception &e) {
|
|
MMDEPLOY_ERROR("no ['params']['num_classes'] is specified in cfg: {}", cfg);
|
|
throw_exception(eInvalidArgument);
|
|
}
|
|
}
|
|
|
|
Result<Value> operator()(const Value &preprocess_result, const Value &inference_result) {
|
|
MMDEPLOY_DEBUG("preprocess: {}\ninference: {}", preprocess_result, inference_result);
|
|
|
|
auto mask = inference_result["output"].get<Tensor>();
|
|
MMDEPLOY_DEBUG("tensor.name: {}, tensor.shape: {}, tensor.data_type: {}", mask.name(),
|
|
mask.shape(), mask.data_type());
|
|
if (!(mask.shape().size() == 4 && mask.shape(0) == 1 && mask.shape(1) == 1)) {
|
|
MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}", mask.shape());
|
|
return Status(eNotSupported);
|
|
}
|
|
|
|
auto height = (int)mask.shape(2);
|
|
auto width = (int)mask.shape(3);
|
|
auto input_height = preprocess_result["img_metas"]["ori_shape"][1].get<int>();
|
|
auto input_width = preprocess_result["img_metas"]["ori_shape"][2].get<int>();
|
|
Device host{"cpu"};
|
|
OUTCOME_TRY(auto host_tensor, MakeAvailableOnDevice(mask, host, stream_));
|
|
OUTCOME_TRY(stream_.Wait());
|
|
if (mask.data_type() == DataType::kINT64) {
|
|
// change kINT64 to 2 INT32
|
|
TensorDesc desc{
|
|
host_tensor.device(), DataType::kINT32, {1, 2, height, width}, host_tensor.name()};
|
|
Tensor _host_tensor(desc, host_tensor.buffer());
|
|
return MaskResize(_host_tensor, input_height, input_width);
|
|
} else if (mask.data_type() == DataType::kINT32) {
|
|
return MaskResize(host_tensor, input_height, input_width);
|
|
} else {
|
|
MMDEPLOY_ERROR("unsupported `output` tensor, dtype: {}", (int)mask.data_type());
|
|
return Status(eNotSupported);
|
|
}
|
|
}
|
|
|
|
private:
|
|
Result<Value> MaskResize(Tensor &tensor, int dst_height, int dst_width) {
|
|
auto channel = tensor.shape(1);
|
|
auto height = tensor.shape(2);
|
|
auto width = tensor.shape(3);
|
|
|
|
// reshape tensor to convert it to cv::Mat
|
|
tensor.Reshape({1, height, width, channel});
|
|
auto mat = cpu::Tensor2CVMat(tensor);
|
|
auto dst = cpu::Resize(mat, dst_height, dst_width, "nearest");
|
|
if (channel == 1) {
|
|
auto output_tensor = cpu::CVMat2Tensor(dst);
|
|
SegmentorOutput output{output_tensor, dst_height, dst_width, classes_};
|
|
return to_value(output);
|
|
} else {
|
|
cv::Mat _dst;
|
|
int channel = little_endian_ ? 0 : dst.dims - 1;
|
|
cv::extractChannel(dst, _dst, channel);
|
|
auto output_tensor = cpu::CVMat2Tensor(_dst);
|
|
SegmentorOutput output{output_tensor, dst_height, dst_width, classes_};
|
|
return to_value(output);
|
|
}
|
|
}
|
|
|
|
bool IsLittleEndian() {
|
|
union Un {
|
|
char a;
|
|
int b;
|
|
} un;
|
|
un.b = 1;
|
|
return (int)un.a == 1;
|
|
}
|
|
|
|
protected:
|
|
int classes_{};
|
|
bool little_endian_;
|
|
};
|
|
|
|
REGISTER_CODEBASE_COMPONENT(MMSegmentation, ResizeMask);
|
|
|
|
} // namespace mmdeploy::mmseg
|