140 lines
4.6 KiB
C++
Raw Normal View History

Merge sdk (#251) * check in cmake * move backend_ops to csrc/backend_ops * check in preprocess, model, some codebase and their c-apis * check in CMakeLists.txt * check in parts of test_csrc * commit everything else * add readme * update core's BUILD_INTERFACE directory * skip codespell on third_party * update trt_net and ort_net's CMakeLists * ignore clion's build directory * check in pybind11 * add onnx.proto. Remove MMDeploy's dependency on ncnn's source code * export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON * remove useless message * target include directory is wrong * change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net * skip install directory * update project's cmake * remove useless code * set CMAKE_BUILD_TYPE to Release by force if it isn't set by user * update custom ops CMakeLists * pass object target's source lists * fix lint end-of-file * fix lint: trailing whitespace * fix codespell hook * remove bicubic_interpolate to csrc/backend_ops/ * set MMDEPLOY_BUILD_SDK OFF * change custom ops build command * add spdlog installation command * update docs on how to checkout pybind11 * move bicubic_interpolate to backend_ops/tensorrt directory * remove useless code * correct cmake * fix typo * fix typo * fix install directory * correct sdk's readme * set cub dir when cuda version < 11.0 * change directory where clang-format will apply to * fix build command * add .clang-format * change clang-format style from google to file * reformat csrc/backend_ops * format sdk's code * turn off clang-format for some files * add -Xcompiler=-fno-gnu-unique * fix trt topk initialize * check in config for sdk demo * update cmake script and csrc's readme * correct config's path * add cuda include directory, otherwise compile failed in case of tensorrt8.2 * clang-format onnx2ncnn.cpp Co-authored-by: zhangli <lzhang329@gmail.com> Co-authored-by: grimoire <yaoqian@sensetime.com>
2021-12-07 10:57:55 +08:00
// Copyright (c) OpenMMLab. All rights reserved.
#include "resize.h"
#include <algorithm>
#include "archive/json_archive.h"
#include "core/tensor.h"
using namespace std;
namespace mmdeploy {
ResizeImpl::ResizeImpl(const Value& args) : TransformImpl(args) {
arg_.keep_ratio = args.value<bool>("keep_ratio", false);
if (args.contains("size")) {
if (args["size"].is_number_integer()) {
auto size = args["size"].get<int>();
arg_.img_scale = {size, size};
} else if (args["size"].is_array()) {
if (args["size"].size() != 2) {
ERROR("'size' expects an array of size 2, but got {}", args["size"].size());
throw std::length_error("'size' expects an array of size 2");
}
auto height = args["size"][0].get<int>();
auto width = args["size"][1].get<int>();
arg_.img_scale = {height, width};
} else {
ERROR("'size' is expected to be an integer or and array of size 2");
throw std::domain_error("'size' is expected to be an integer or and array of size 2");
}
}
arg_.interpolation = args.value<string>("interpolation", "bilinear");
vector<string> interpolations{"nearest", "bilinear", "bicubic", "area", "lanczos"};
if (std::find(interpolations.begin(), interpolations.end(), arg_.interpolation) ==
interpolations.end()) {
ERROR("'{}' interpolation is not supported", arg_.interpolation);
throw std::invalid_argument("unexpected interpolation");
}
}
Result<Value> ResizeImpl::Process(const Value& input) {
DEBUG("input: {}", to_json(input).dump(2));
Merge sdk (#251) * check in cmake * move backend_ops to csrc/backend_ops * check in preprocess, model, some codebase and their c-apis * check in CMakeLists.txt * check in parts of test_csrc * commit everything else * add readme * update core's BUILD_INTERFACE directory * skip codespell on third_party * update trt_net and ort_net's CMakeLists * ignore clion's build directory * check in pybind11 * add onnx.proto. Remove MMDeploy's dependency on ncnn's source code * export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON * remove useless message * target include directory is wrong * change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net * skip install directory * update project's cmake * remove useless code * set CMAKE_BUILD_TYPE to Release by force if it isn't set by user * update custom ops CMakeLists * pass object target's source lists * fix lint end-of-file * fix lint: trailing whitespace * fix codespell hook * remove bicubic_interpolate to csrc/backend_ops/ * set MMDEPLOY_BUILD_SDK OFF * change custom ops build command * add spdlog installation command * update docs on how to checkout pybind11 * move bicubic_interpolate to backend_ops/tensorrt directory * remove useless code * correct cmake * fix typo * fix typo * fix install directory * correct sdk's readme * set cub dir when cuda version < 11.0 * change directory where clang-format will apply to * fix build command * add .clang-format * change clang-format style from google to file * reformat csrc/backend_ops * format sdk's code * turn off clang-format for some files * add -Xcompiler=-fno-gnu-unique * fix trt topk initialize * check in config for sdk demo * update cmake script and csrc's readme * correct config's path * add cuda include directory, otherwise compile failed in case of tensorrt8.2 * clang-format onnx2ncnn.cpp Co-authored-by: zhangli <lzhang329@gmail.com> Co-authored-by: grimoire <yaoqian@sensetime.com>
2021-12-07 10:57:55 +08:00
Value output = input;
auto img_fields = GetImageFields(input);
for (auto& key : img_fields) {
Tensor src_img = input[key].get<Tensor>();
auto desc = src_img.desc();
assert(desc.shape.size() == 4);
int h = desc.shape[1];
int w = desc.shape[2];
int dst_h = 0;
int dst_w = 0;
float scale_factor = 0.f;
if (input.contains("scale")) {
assert(input["scale"].is_array() && input["scale"].size() == 2);
dst_h = input["scale"][0].get<int>();
dst_w = input["scale"][1].get<int>();
} else if (input.contains("scale_factor")) {
assert(input["scale_factor"].is_number());
scale_factor = input["scale_factor"].get<float>();
dst_h = int(h * scale_factor + 0.5);
dst_w = int(w * scale_factor + 0.5);
} else if (!arg_.img_scale.empty()) {
DEBUG(
Merge sdk (#251) * check in cmake * move backend_ops to csrc/backend_ops * check in preprocess, model, some codebase and their c-apis * check in CMakeLists.txt * check in parts of test_csrc * commit everything else * add readme * update core's BUILD_INTERFACE directory * skip codespell on third_party * update trt_net and ort_net's CMakeLists * ignore clion's build directory * check in pybind11 * add onnx.proto. Remove MMDeploy's dependency on ncnn's source code * export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON * remove useless message * target include directory is wrong * change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net * skip install directory * update project's cmake * remove useless code * set CMAKE_BUILD_TYPE to Release by force if it isn't set by user * update custom ops CMakeLists * pass object target's source lists * fix lint end-of-file * fix lint: trailing whitespace * fix codespell hook * remove bicubic_interpolate to csrc/backend_ops/ * set MMDEPLOY_BUILD_SDK OFF * change custom ops build command * add spdlog installation command * update docs on how to checkout pybind11 * move bicubic_interpolate to backend_ops/tensorrt directory * remove useless code * correct cmake * fix typo * fix typo * fix install directory * correct sdk's readme * set cub dir when cuda version < 11.0 * change directory where clang-format will apply to * fix build command * add .clang-format * change clang-format style from google to file * reformat csrc/backend_ops * format sdk's code * turn off clang-format for some files * add -Xcompiler=-fno-gnu-unique * fix trt topk initialize * check in config for sdk demo * update cmake script and csrc's readme * correct config's path * add cuda include directory, otherwise compile failed in case of tensorrt8.2 * clang-format onnx2ncnn.cpp Co-authored-by: zhangli <lzhang329@gmail.com> Co-authored-by: grimoire <yaoqian@sensetime.com>
2021-12-07 10:57:55 +08:00
"neither 'scale' or 'scale_factor' is provided in input value. "
"'img_scale' will be used");
if (-1 == arg_.img_scale[1]) {
if (w < h) {
dst_w = arg_.img_scale[0];
dst_h = dst_w * h / w;
} else {
dst_h = arg_.img_scale[0];
dst_w = dst_h * w / h;
}
} else {
dst_h = arg_.img_scale[0];
dst_w = arg_.img_scale[1];
}
} else {
ERROR("no resize related parameter is provided");
return Status(eInvalidArgument);
}
if (arg_.keep_ratio) {
int max_long_edge = dst_w;
int max_short_edge = dst_h;
if (max_long_edge < max_short_edge) {
std::swap(max_long_edge, max_short_edge);
}
scale_factor = std::min(max_long_edge * 1.0 / (1.0 * std::max(h, w)),
max_short_edge * 1.0 / (1.0 * std::min(h, w)));
dst_w = int(w * scale_factor + 0.5);
dst_h = int(h * scale_factor + 0.5);
}
Tensor dst_img;
if (dst_h != h || dst_w != w) {
OUTCOME_TRY(dst_img, ResizeImage(src_img, dst_h, dst_w));
} else {
dst_img = src_img;
}
auto w_scale = dst_w * 1.0 / w;
auto h_scale = dst_h * 1.0 / h;
output["scale_factor"] = {w_scale, h_scale, w_scale, h_scale};
output["img_shape"] = {1, dst_h, dst_w, desc.shape[3]};
// output["pad_shape"] = output["img_shape"];
output["keep_ratio"] = arg_.keep_ratio;
output[key] = dst_img;
}
DEBUG("output: {}", to_json(output).dump(2));
Merge sdk (#251) * check in cmake * move backend_ops to csrc/backend_ops * check in preprocess, model, some codebase and their c-apis * check in CMakeLists.txt * check in parts of test_csrc * commit everything else * add readme * update core's BUILD_INTERFACE directory * skip codespell on third_party * update trt_net and ort_net's CMakeLists * ignore clion's build directory * check in pybind11 * add onnx.proto. Remove MMDeploy's dependency on ncnn's source code * export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON * remove useless message * target include directory is wrong * change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net * skip install directory * update project's cmake * remove useless code * set CMAKE_BUILD_TYPE to Release by force if it isn't set by user * update custom ops CMakeLists * pass object target's source lists * fix lint end-of-file * fix lint: trailing whitespace * fix codespell hook * remove bicubic_interpolate to csrc/backend_ops/ * set MMDEPLOY_BUILD_SDK OFF * change custom ops build command * add spdlog installation command * update docs on how to checkout pybind11 * move bicubic_interpolate to backend_ops/tensorrt directory * remove useless code * correct cmake * fix typo * fix typo * fix install directory * correct sdk's readme * set cub dir when cuda version < 11.0 * change directory where clang-format will apply to * fix build command * add .clang-format * change clang-format style from google to file * reformat csrc/backend_ops * format sdk's code * turn off clang-format for some files * add -Xcompiler=-fno-gnu-unique * fix trt topk initialize * check in config for sdk demo * update cmake script and csrc's readme * correct config's path * add cuda include directory, otherwise compile failed in case of tensorrt8.2 * clang-format onnx2ncnn.cpp Co-authored-by: zhangli <lzhang329@gmail.com> Co-authored-by: grimoire <yaoqian@sensetime.com>
2021-12-07 10:57:55 +08:00
return output;
}
Resize::Resize(const Value& args, int version) : Transform(args) {
auto impl_creator = Registry<ResizeImpl>::Get().GetCreator(specified_platform_, version);
if (nullptr == impl_creator) {
ERROR("'Resize' is not supported on '{}' platform", specified_platform_);
throw std::domain_error("'Resize' is not supported on specified platform");
}
impl_ = impl_creator->Create(args);
}
class ResizeCreator : public Creator<Transform> {
public:
const char* GetName() const override { return "Resize"; }
int GetVersion() const override { return version_; }
ReturnType Create(const Value& args) override { return make_unique<Resize>(args, version_); }
private:
int version_{1};
};
REGISTER_MODULE(Transform, ResizeCreator);
} // namespace mmdeploy