mmdeploy/csrc/core/operator.cpp
lzhangzz 640aa03538
Support Windows (#106)
* minor changes

* support windows

* fix GCC build

* fix lint

* reformat

* fix Windows build

* fix GCC build

* search backend ops for onnxruntime

* fix lint

* fix lint

* code clean-up

* code clean-up

* fix clang build

* fix trt support

* fix cmake for ncnn

* fix cmake for openvino

* fix SDK Python API

* handle ops for other backends (ncnn, trt)

* handle SDK Python API library location

* robustify linkage

* fix cuda

* minor fix for openvino & ncnn

* use CMAKE_CUDA_ARCHITECTURES if set

* fix cuda preprocessor

* fix misc

* fix pplnn & pplcv, drop support for pplcv<0.6.0

* robustify cmake

* update build.md (#2)

* build dynamic modules as module library & fix demo (partially)

* fix candidate path for mmdeploy_python

* move "enable CUDA" to cmake config for demo

* refine demo cmake

* add comment

* fix ubuntu build

* revert docs/en/build.md

* fix C API

* fix lint

* Windows build doc (#3)

* check in docs related to mmdeploy build on windows

* update build guide on windows platform

* update build guide on windows platform

* make path of thirdparty libraries consistent

* make path consistency

* correct build command for custom ops

* correct build command for sdk

* update sdk build instructions

* update doc

* correct build command

* fix lint

* correct build command and fix lint

Co-authored-by: lvhan <lvhan@pjlab.org>

* trailing whitespace (#4)

* minor fix

* fix sr sdk model

* fix type deduction

* fix cudaFree after driver shutting down

* update ppl.cv installation warning (#5)

* fix device allocator threshold & fix lint

* update doc (#6)

* update ppl.cv installation warning

* missing 'git clone'

Co-authored-by: chenxin <chenxin2@sensetime.com>
Co-authored-by: zhangli <zhangli@sensetime.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: lvhan <lvhan@pjlab.org>
2022-02-24 20:08:44 +08:00

159 lines
4.0 KiB
C++

// Copyright (c) OpenMMLab. All rights reserved.
#include "operator.h"
#include <algorithm>
namespace mmdeploy::graph {
Result<void> Gather(const Value::Array& array, const vector<int>& idxs, Value::Array& output) {
if (idxs.empty()) {
return success();
}
auto max_idx = *max_element(begin(idxs), end(idxs));
if (array.size() <= max_idx) {
return Status(eOutOfRange);
}
output.reserve(output.size() + idxs.size());
for (const auto& idx : idxs) {
output.push_back(array[idx]);
}
return success();
}
Result<void> Gather(Value::Array&& array, const vector<int>& idxs, Value::Array& output) {
if (idxs.empty()) {
return success();
}
auto max_idx = *max_element(begin(idxs), end(idxs));
if (array.size() <= max_idx) {
return Status(eOutOfRange);
}
output.reserve(output.size() + idxs.size());
for (const auto& idx : idxs) {
output.push_back(std::move(array[idx]));
}
return success();
}
Result<void> Gather(const Value::Object& object, const vector<std::string>& keys,
Value::Array& output) {
output.reserve(output.size() + keys.size());
try {
for (const auto& key : keys) {
output.push_back(object.at(key));
}
} catch (const std::out_of_range& e) {
return Status(eOutOfRange);
}
return success();
}
Result<void> Gather(Value::Object&& object, const vector<std::string>& keys, Value::Array& output) {
output.reserve(output.size() + keys.size());
try {
for (const auto& key : keys) {
output.push_back(std::move(object.at(key)));
}
} catch (const std::out_of_range& e) {
return Status(eOutOfRange);
}
return success();
}
Result<void> Scatter(Value::Array array, const vector<int>& idxs, Value::Array& output) {
if (array.size() < idxs.size()) {
return Status(eOutOfRange);
}
for (int i = 0; i < idxs.size(); ++i) {
output[idxs[i]] = std::move(array[i]);
}
return success();
}
Result<void> Scatter(Value::Array array, const vector<std::string>& keys, Value::Object& output) {
if (array.size() < keys.size()) {
return Status(eOutOfRange);
}
for (int i = 0; i < keys.size(); ++i) {
output.emplace(keys[i], std::move(array[i]));
}
return success();
}
Result<Value> DistribOA(const Value& oa) {
if (!oa.is_object()) {
return Status(eInvalidArgument);
}
Value ao = ValueType::kArray;
for (auto inner = oa.begin(); inner != oa.end(); ++inner) {
if (!inner->is_array()) {
return Status(eInvalidArgument);
}
if (ao.empty()) {
for (int i = 0; i < inner->size(); ++i) ao.push_back(ValueType::kObject);
}
if (inner->size() != oa.size()) {
return Status(eInvalidArgument);
}
for (int i = 0; i < inner->size(); ++i) {
ao[i][inner.key()] = (*inner)[i];
}
}
return ao;
}
Result<Value> DistribAO(const Value& ao) {
if (!ao.is_array()) {
return Status(eInvalidArgument);
}
Value oa = ValueType::kObject;
for (const auto& inner : ao) {
if (inner.is_object()) {
return Status(eInvalidArgument);
}
if (oa.empty()) {
for (auto item = inner.begin(); item != inner.end(); ++item) {
oa[item.key()] = ValueType::kObject;
}
}
if (inner.size() != oa.size()) {
return Status(eInvalidArgument);
}
for (auto item = inner.begin(); item != inner.end(); ++item) {
if (!oa.contains(item.key())) {
return Status(eInvalidArgument);
}
oa[item.key()].push_back(*item);
}
}
return oa;
}
Result<Value> DistribAA(const Value& a) {
if (!a.is_array()) {
return Status(eInvalidArgument);
}
auto ta = Value::Array{};
for (const auto& inner : a.get_ref<const Value::Array&>()) {
if (!inner.is_array()) {
return Status(eInvalidArgument);
}
if (ta.empty()) {
ta.reserve(inner.size());
for (int i = 0; i < inner.size(); ++i) {
ta.emplace_back(Value::kArray);
}
}
if (inner.size() != ta.size()) {
return Status(eInvalidArgument);
}
for (int i = 0; i < inner.size(); ++i) {
ta[i].push_back(inner[i]);
}
}
return ta;
}
} // namespace mmdeploy::graph