// Copyright (c) OpenMMLab. All rights reserved. #include "core/utils/device_utils.h" #include "core/utils/formatter.h" #include "ppl/cv/cuda/copymakeborder.h" #include "preprocess/transform/pad.h" using namespace std; using namespace ppl::cv::cuda; namespace mmdeploy { namespace cuda { class PadImpl : public ::mmdeploy::PadImpl { public: explicit PadImpl(const Value& args) : ::mmdeploy::PadImpl(args) { #if PPLCV_VERSION_MAJOR >= 0 && PPLCV_VERSION_MINOR >= 6 && PPLCV_VERSION_PATCH >= 2 map border_map{{"constant", ppl::cv::BORDER_CONSTANT}, {"edge", ppl::cv::BORDER_REPLICATE}, {"reflect", ppl::cv::BORDER_REFLECT_101}, { "symmetric", ppl::cv::BORDER_REFLECT }}; #else map border_map{{"constant", ppl::cv::BORDER_TYPE_CONSTANT}, {"edge", ppl::cv::BORDER_TYPE_REPLICATE}, {"reflect", ppl::cv::BORDER_TYPE_REFLECT_101}, {"symmetric", ppl::cv::BORDER_TYPE_REFLECT}}; #endif if (border_map.find(arg_.padding_mode) == border_map.end()) { MMDEPLOY_ERROR("unsupported padding_mode '{}'", arg_.padding_mode); throw_exception(eNotSupported); } padding_mode_ = border_map[arg_.padding_mode]; } protected: Result PadImage(const Tensor& img, const array& padding) override { OUTCOME_TRY(auto src_tensor, MakeAvailableOnDevice(img, device_, stream_)); SyncOnScopeExit sync(stream_, src_tensor.buffer() != img.buffer(), src_tensor); auto desc = src_tensor.desc(); int height = desc.shape[1]; int width = desc.shape[2]; int c = desc.shape[3]; auto dst_height = height + padding[1] + padding[3]; auto dst_width = width + padding[0] + padding[2]; TensorShape dst_shape{1, dst_height, dst_width, c}; TensorDesc dst_desc{device_, desc.data_type, dst_shape, ""}; Tensor dst_tensor(dst_desc); ppl::common::RetCode ret = 0; cudaStream_t stream = ::mmdeploy::GetNative(stream_); if (desc.data_type == DataType::kFLOAT) { auto src_buffer = src_tensor.data(); auto dst_buffer = dst_tensor.data(); if (3 == c) { ret = CopyMakeBorder(stream, height, width, width * c, src_buffer, dst_width * c, dst_buffer, padding[1], padding[3], padding[0], padding[2], padding_mode_, arg_.pad_val); } else if (1 == c) { ret = CopyMakeBorder(stream, height, width, width * c, src_buffer, dst_width * c, dst_buffer, padding[1], padding[3], padding[0], padding[2], padding_mode_, arg_.pad_val); } else { MMDEPLOY_ERROR("unsupported channels {}", c); assert(0); return Status(eNotSupported); } } else if (desc.data_type == DataType::kINT8) { auto src_buffer = src_tensor.data(); auto dst_buffer = dst_tensor.data(); if (3 == c) { ret = CopyMakeBorder( stream, height, width, width * c, src_buffer, dst_width * c, dst_buffer, padding[1], padding[3], padding[0], padding[2], padding_mode_, (ppl::cv::uchar)arg_.pad_val); } else if (1 == c) { ret = CopyMakeBorder( stream, height, width, width * c, src_buffer, dst_width * c, dst_buffer, padding[1], padding[3], padding[0], padding[2], padding_mode_, (ppl::cv::uchar)arg_.pad_val); } else { MMDEPLOY_ERROR("unsupported channels {}", c); assert(0); return Status(eNotSupported); } } else { MMDEPLOY_ERROR("unsupported data type {}", desc.data_type); assert(0); return Status(eNotSupported); } if (ret != 0) { MMDEPLOY_ERROR("unexpected exception happened"); assert(0); return Status(eNotSupported); } return dst_tensor; } private: ppl::cv::BorderType padding_mode_; }; class PadCreator : public Creator<::mmdeploy::PadImpl> { public: const char* GetName() const override { return "cuda"; } int GetVersion() const override { return 1; } ReturnType Create(const Value& args) override { return make_unique(args); } }; } // namespace cuda } // namespace mmdeploy using ::mmdeploy::PadImpl; using mmdeploy::cuda::PadCreator; REGISTER_MODULE(PadImpl, PadCreator);