lvhan028 36124f6205
Merge sdk (#251)
* check in cmake

* move backend_ops to csrc/backend_ops

* check in preprocess, model, some codebase and their c-apis

* check in CMakeLists.txt

* check in parts of test_csrc

* commit everything else

* add readme

* update core's BUILD_INTERFACE directory

* skip codespell on third_party

* update trt_net and ort_net's CMakeLists

* ignore clion's build directory

* check in pybind11

* add onnx.proto. Remove MMDeploy's dependency on ncnn's source code

* export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON

* remove useless message

* target include directory is wrong

* change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net

* skip install directory

* update project's cmake

* remove useless code

* set CMAKE_BUILD_TYPE to Release by force if it isn't set by user

* update custom ops CMakeLists

* pass object target's source lists

* fix lint end-of-file

* fix lint: trailing whitespace

* fix codespell hook

* remove bicubic_interpolate to csrc/backend_ops/

* set MMDEPLOY_BUILD_SDK OFF

* change custom ops build command

* add spdlog installation command

* update docs on how to checkout pybind11

* move bicubic_interpolate to backend_ops/tensorrt directory

* remove useless code

* correct cmake

* fix typo

* fix typo

* fix install directory

* correct sdk's readme

* set cub dir when cuda version < 11.0

* change directory where clang-format will apply to

* fix build command

* add .clang-format

* change clang-format style from google to file

* reformat csrc/backend_ops

* format sdk's code

* turn off clang-format for some files

* add -Xcompiler=-fno-gnu-unique

* fix trt topk initialize

* check in config for sdk demo

* update cmake script and csrc's readme

* correct config's path

* add cuda include directory, otherwise compile failed in case of tensorrt8.2

* clang-format onnx2ncnn.cpp

Co-authored-by: zhangli <lzhang329@gmail.com>
Co-authored-by: grimoire <yaoqian@sensetime.com>
2021-12-07 10:57:55 +08:00

104 lines
3.4 KiB
C++

// Copyright (c) OpenMMLab. All rights reserved.
#include "pad.h"
#include "archive/json_archive.h"
using namespace std;
namespace mmdeploy {
PadImpl::PadImpl(const Value& args) : TransformImpl(args) {
arg_.size[0] = arg_.size[1] = 0;
if (args.contains("size") && args["size"].is_number_integer()) {
arg_.size[0] = arg_.size[1] = (args["size"].get<int>());
}
if (args.contains("size") && args["size"].is_array()) {
if (args["size"].size() != 2) {
throw std::invalid_argument("the length of size should be 2");
}
arg_.size[0] = args["size"][0].get<int>();
arg_.size[1] = args["size"][1].get<int>();
}
arg_.size_divisor = args.value("size_divisor", 1);
arg_.pad_val = args.value("pad_val", 0.0f);
arg_.pad_to_square = args.value("pad_to_square", false);
arg_.padding_mode = args.value("padding_mode", std::string("constant"));
}
Result<Value> PadImpl::Process(const Value& input) {
INFO("input: {}", to_json(input).dump(2));
Value output = input;
auto img_fields = GetImageFields(input);
for (auto& key : img_fields) {
Tensor output_tensor;
auto tensor = input[key].get<Tensor>();
assert(tensor.desc().shape.size() == 4);
assert(tensor.desc().shape[0] == 1);
assert(tensor.desc().shape[3] == 3 or tensor.desc().shape[3] == 1);
int height = tensor.desc().shape[1];
int width = tensor.desc().shape[2];
if (arg_.pad_to_square) {
int max_size = std::max(tensor.desc().shape[1], tensor.desc().shape[2]);
std::array padding{0, 0, max_size - width, max_size - height};
OUTCOME_TRY(output_tensor, PadImage(tensor, padding));
output["pad_fixed_size"].push_back(max_size);
output["pad_fixed_size"].push_back(max_size);
} else if (arg_.size_divisor != 1) {
auto pad_h = (height + arg_.size_divisor - 1) / arg_.size_divisor * arg_.size_divisor;
auto pad_w = (width + arg_.size_divisor - 1) / arg_.size_divisor * arg_.size_divisor;
std::array padding{0, 0, pad_w - width, pad_h - height};
OUTCOME_TRY(output_tensor, PadImage(tensor, padding));
output["pad_size_divisor"] = arg_.size_divisor;
output["pad_fixed_size"].push_back(pad_h);
output["pad_fixed_size"].push_back(pad_w);
} else {
std::array padding{0, 0, arg_.size[1] - width, arg_.size[0] - height};
OUTCOME_TRY(output_tensor, PadImage(tensor, padding));
output["pad_fixed_size"].push_back(arg_.size[0]);
output["pad_fixed_size"].push_back(arg_.size[1]);
}
output[key] = output_tensor;
for (auto& v : output_tensor.desc().shape) {
output["pad_shape"].push_back(v);
}
}
INFO("output: {}", to_json(output).dump(2));
return output;
}
Pad::Pad(const Value& args, int version) : Transform(args) {
auto impl_creator = Registry<PadImpl>::Get().GetCreator(specified_platform_, version);
if (nullptr == impl_creator) {
ERROR("'Pad' is not supported on '{}' platform", specified_platform_);
throw std::domain_error("'Pad' is not supported on specified platform");
}
impl_ = impl_creator->Create(args);
}
class PadCreator : public Creator<Transform> {
public:
const char* GetName(void) const override { return "Pad"; }
int GetVersion(void) const override { return version_; }
ReturnType Create(const Value& args) override { return make_unique<Pad>(args, version_); }
private:
int version_{1};
};
REGISTER_MODULE(Transform, PadCreator);
} // namespace mmdeploy