mmdeploy/csrc/codebase/mmedit/restorer.cpp
lzhangzz 640aa03538
Support Windows (#106)
* minor changes

* support windows

* fix GCC build

* fix lint

* reformat

* fix Windows build

* fix GCC build

* search backend ops for onnxruntime

* fix lint

* fix lint

* code clean-up

* code clean-up

* fix clang build

* fix trt support

* fix cmake for ncnn

* fix cmake for openvino

* fix SDK Python API

* handle ops for other backends (ncnn, trt)

* handle SDK Python API library location

* robustify linkage

* fix cuda

* minor fix for openvino & ncnn

* use CMAKE_CUDA_ARCHITECTURES if set

* fix cuda preprocessor

* fix misc

* fix pplnn & pplcv, drop support for pplcv<0.6.0

* robustify cmake

* update build.md (#2)

* build dynamic modules as module library & fix demo (partially)

* fix candidate path for mmdeploy_python

* move "enable CUDA" to cmake config for demo

* refine demo cmake

* add comment

* fix ubuntu build

* revert docs/en/build.md

* fix C API

* fix lint

* Windows build doc (#3)

* check in docs related to mmdeploy build on windows

* update build guide on windows platform

* update build guide on windows platform

* make path of thirdparty libraries consistent

* make path consistency

* correct build command for custom ops

* correct build command for sdk

* update sdk build instructions

* update doc

* correct build command

* fix lint

* correct build command and fix lint

Co-authored-by: lvhan <lvhan@pjlab.org>

* trailing whitespace (#4)

* minor fix

* fix sr sdk model

* fix type deduction

* fix cudaFree after driver shutting down

* update ppl.cv installation warning (#5)

* fix device allocator threshold & fix lint

* update doc (#6)

* update ppl.cv installation warning

* missing 'git clone'

Co-authored-by: chenxin <chenxin2@sensetime.com>
Co-authored-by: zhangli <zhangli@sensetime.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: lvhan <lvhan@pjlab.org>
2022-02-24 20:08:44 +08:00

59 lines
1.9 KiB
C++

// Copyright (c) OpenMMLab. All rights reserved.
#include <opencv2/core.hpp>
#include "codebase/mmedit/mmedit.h"
#include "core/tensor.h"
#include "core/utils/device_utils.h"
namespace mmdeploy::mmedit {
class TensorToImg : public MMEdit {
public:
explicit TensorToImg(const Value& cfg) : MMEdit(cfg) {}
Result<Value> operator()(const Value& input) {
auto upscale = input["output"].get<Tensor>();
OUTCOME_TRY(auto upscale_cpu, MakeAvailableOnDevice(upscale, kHOST, stream()));
OUTCOME_TRY(stream().Wait());
if (upscale.shape().size() == 4 && upscale.data_type() == DataType::kFLOAT) {
auto channels = static_cast<int>(upscale.shape(1));
auto height = static_cast<int>(upscale.shape(2));
auto width = static_cast<int>(upscale.shape(3));
// TODO: handle BGR <-> RGB conversion
OUTCOME_TRY(auto format, ChannelsToFormat(channels));
Mat mat(height, width, format, DataType::kINT8, kHOST);
cv::Mat_<float> mat_chw(channels, height * width, upscale_cpu.data<float>());
cv::Mat mat_hwc(height * width, channels, CV_32F);
cv::transpose(mat_chw, mat_hwc);
cv::Mat rescale_uint8(height, width, CV_8UC(channels), mat.data<uint8_t>());
mat_hwc = mat_hwc.reshape(channels, height);
// convert has saturate_cast inside
mat_hwc.convertTo(rescale_uint8, CV_8UC(channels), 255.f);
return mat;
} else {
MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", upscale.shape(),
(int)upscale.data_type());
return Status(eNotSupported);
}
}
protected:
static Result<PixelFormat> ChannelsToFormat(int channels) {
switch (channels) {
case 1:
return PixelFormat::kGRAYSCALE;
case 3:
return PixelFormat::kRGB;
default:
return Status(eNotSupported);
}
}
static constexpr const Device kHOST{0, 0};
};
REGISTER_CODEBASE_COMPONENT(MMEdit, TensorToImg);
} // namespace mmdeploy::mmedit