mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* check in cmake * move backend_ops to csrc/backend_ops * check in preprocess, model, some codebase and their c-apis * check in CMakeLists.txt * check in parts of test_csrc * commit everything else * add readme * update core's BUILD_INTERFACE directory * skip codespell on third_party * update trt_net and ort_net's CMakeLists * ignore clion's build directory * check in pybind11 * add onnx.proto. Remove MMDeploy's dependency on ncnn's source code * export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON * remove useless message * target include directory is wrong * change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net * skip install directory * update project's cmake * remove useless code * set CMAKE_BUILD_TYPE to Release by force if it isn't set by user * update custom ops CMakeLists * pass object target's source lists * fix lint end-of-file * fix lint: trailing whitespace * fix codespell hook * remove bicubic_interpolate to csrc/backend_ops/ * set MMDEPLOY_BUILD_SDK OFF * change custom ops build command * add spdlog installation command * update docs on how to checkout pybind11 * move bicubic_interpolate to backend_ops/tensorrt directory * remove useless code * correct cmake * fix typo * fix typo * fix install directory * correct sdk's readme * set cub dir when cuda version < 11.0 * change directory where clang-format will apply to * fix build command * add .clang-format * change clang-format style from google to file * reformat csrc/backend_ops * format sdk's code * turn off clang-format for some files * add -Xcompiler=-fno-gnu-unique * fix trt topk initialize * check in config for sdk demo * update cmake script and csrc's readme * correct config's path * add cuda include directory, otherwise compile failed in case of tensorrt8.2 * clang-format onnx2ncnn.cpp Co-authored-by: zhangli <lzhang329@gmail.com> Co-authored-by: grimoire <yaoqian@sensetime.com>
82 lines
2.0 KiB
C++
82 lines
2.0 KiB
C++
// Copyright (c) OpenMMLab. All rights reserved.
|
|
|
|
#include "operator.h"
|
|
|
|
namespace mmdeploy::graph {
|
|
|
|
Result<Value> DistribOA(const Value& oa) {
|
|
if (!oa.is_object()) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
Value ao = ValueType::kArray;
|
|
for (auto inner = oa.begin(); inner != oa.end(); ++inner) {
|
|
if (!inner->is_array()) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
if (ao.empty()) {
|
|
for (int i = 0; i < inner->size(); ++i) ao.push_back(ValueType::kObject);
|
|
}
|
|
if (inner->size() != oa.size()) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
for (int i = 0; i < inner->size(); ++i) {
|
|
ao[i][inner.key()] = (*inner)[i];
|
|
}
|
|
}
|
|
return ao;
|
|
}
|
|
|
|
Result<Value> DistribAO(const Value& ao) {
|
|
if (!ao.is_array()) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
Value oa = ValueType::kObject;
|
|
for (const auto& inner : ao) {
|
|
if (inner.is_object()) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
if (oa.empty()) {
|
|
for (auto item = inner.begin(); item != inner.end(); ++item) {
|
|
oa[item.key()] = ValueType::kObject;
|
|
}
|
|
}
|
|
if (inner.size() != oa.size()) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
for (auto item = inner.begin(); item != inner.end(); ++item) {
|
|
if (!oa.contains(item.key())) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
oa[item.key()].push_back(*item);
|
|
}
|
|
}
|
|
return oa;
|
|
}
|
|
|
|
Result<Value> DistribAA(const Value& a) {
|
|
if (!a.is_array()) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
auto ta = Value::Array{};
|
|
for (const auto& inner : a.get_ref<const Value::Array&>()) {
|
|
if (!inner.is_array()) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
if (ta.empty()) {
|
|
ta.reserve(inner.size());
|
|
for (int i = 0; i < inner.size(); ++i) {
|
|
ta.emplace_back(Value::kArray);
|
|
}
|
|
}
|
|
if (inner.size() != ta.size()) {
|
|
return Status(eInvalidArgument);
|
|
}
|
|
for (int i = 0; i < inner.size(); ++i) {
|
|
ta[i].push_back(inner[i]);
|
|
}
|
|
}
|
|
return ta;
|
|
}
|
|
|
|
} // namespace mmdeploy::graph
|