mmdeploy/csrc/core/operator.h
lzhangzz 640aa03538
Support Windows (#106)
* minor changes

* support windows

* fix GCC build

* fix lint

* reformat

* fix Windows build

* fix GCC build

* search backend ops for onnxruntime

* fix lint

* fix lint

* code clean-up

* code clean-up

* fix clang build

* fix trt support

* fix cmake for ncnn

* fix cmake for openvino

* fix SDK Python API

* handle ops for other backends (ncnn, trt)

* handle SDK Python API library location

* robustify linkage

* fix cuda

* minor fix for openvino & ncnn

* use CMAKE_CUDA_ARCHITECTURES if set

* fix cuda preprocessor

* fix misc

* fix pplnn & pplcv, drop support for pplcv<0.6.0

* robustify cmake

* update build.md (#2)

* build dynamic modules as module library & fix demo (partially)

* fix candidate path for mmdeploy_python

* move "enable CUDA" to cmake config for demo

* refine demo cmake

* add comment

* fix ubuntu build

* revert docs/en/build.md

* fix C API

* fix lint

* Windows build doc (#3)

* check in docs related to mmdeploy build on windows

* update build guide on windows platform

* update build guide on windows platform

* make path of thirdparty libraries consistent

* make path consistency

* correct build command for custom ops

* correct build command for sdk

* update sdk build instructions

* update doc

* correct build command

* fix lint

* correct build command and fix lint

Co-authored-by: lvhan <lvhan@pjlab.org>

* trailing whitespace (#4)

* minor fix

* fix sr sdk model

* fix type deduction

* fix cudaFree after driver shutting down

* update ppl.cv installation warning (#5)

* fix device allocator threshold & fix lint

* update doc (#6)

* update ppl.cv installation warning

* missing 'git clone'

Co-authored-by: chenxin <chenxin2@sensetime.com>
Co-authored-by: zhangli <zhangli@sensetime.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: lvhan <lvhan@pjlab.org>
2022-02-24 20:08:44 +08:00

114 lines
3.8 KiB
C++

// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_EXPERIMENTAL_PIPELINE_OPERATOR_H_
#define MMDEPLOY_SRC_EXPERIMENTAL_PIPELINE_OPERATOR_H_
#include "core/value.h"
namespace mmdeploy::graph {
using std::string;
using std::tuple;
using std::vector;
MMDEPLOY_API Result<void> Gather(const Value::Array& array, const vector<int>& idxs,
Value::Array& output);
MMDEPLOY_API Result<void> Gather(Value::Array&& array, const vector<int>& idxs,
Value::Array& output);
MMDEPLOY_API Result<void> Gather(const Value::Object& object, const vector<std::string>& keys,
Value::Array& output);
MMDEPLOY_API Result<void> Gather(Value::Object&& object, const vector<std::string>& keys,
Value::Array& output);
MMDEPLOY_API Result<void> Scatter(Value::Array array, const vector<int>& idxs,
Value::Array& output);
MMDEPLOY_API Result<void> Scatter(Value::Array array, const vector<std::string>& keys,
Value::Object& output);
inline Result<Value::Array> Gather(const Value::Array& array, const vector<int>& idxs) {
Value::Array output;
OUTCOME_TRY(Gather(array, idxs, output));
return output;
}
inline Result<Value::Array> Gather(Value::Array&& array, const vector<int>& idxs) {
Value::Array output;
OUTCOME_TRY(Gather(std::move(array), idxs, output));
return output;
}
inline Result<Value::Array> Gather(const Value::Object& object, const vector<std::string>& keys) {
Value::Array output;
OUTCOME_TRY(Gather(object, keys, output));
return output;
}
inline Result<Value::Array> Gather(Value::Object&& object, const vector<std::string>& keys) {
Value::Array output;
OUTCOME_TRY(Gather(std::move(object), keys, output));
return output;
}
inline Result<Value::Array> Scatter(Value::Array array, const vector<int>& idxs) {
Value::Array output(idxs.size(), Value::kNull);
OUTCOME_TRY(Scatter(std::move(array), idxs, output));
return output;
}
inline Result<Value::Object> Scatter(Value::Array array, const vector<std::string>& keys) {
Value::Object output;
OUTCOME_TRY(Scatter(std::move(array), keys, output));
return output;
}
template <class V, std::enable_if_t<is_value_v<std::decay_t<V> >, bool> = true>
Result<tuple<Value, vector<int> > > Flatten(V&& input) {
if (!input.is_array()) {
return Status(eInvalidArgument);
}
Value output = ValueType::kArray;
std::vector<int> idxs;
for (int i = 0; i < input.size(); ++i) {
auto inner = std::forward<V>(input)[i];
if (!inner.is_array()) {
return Status(eInvalidArgument);
}
for (auto& item : inner) {
output.push_back(std::move(item));
idxs.push_back(i);
}
}
idxs.push_back(input.size());
return {output, idxs};
}
template <class V, std::enable_if_t<is_value_v<std::decay_t<V> >, bool> = true>
Result<Value> Unflatten(V&& input, const vector<int>& idxs) {
if (!input.is_array()) {
return Status(eInvalidArgument);
}
Value output = ValueType::kArray;
for (int i = 0; i < idxs.back(); ++i) {
output.push_back(ValueType::kArray);
}
for (int i = 0; i < input.size(); ++i) {
if (idxs[i] >= output.size()) {
return Status(eInvalidArgument);
}
output[idxs[i]].push_back(std::forward<V>(input)[i]);
}
return output;
}
// object of arrays -> array of objects, all arrays must be of same length
MMDEPLOY_API Result<Value> DistribOA(const Value& oa);
// array of objects -> object of arrays, all objects must be isomorphic
MMDEPLOY_API Result<Value> DistribAO(const Value& ao);
// array of arrays -> array of arrays, this is equivalent to transpose
MMDEPLOY_API Result<Value> DistribAA(const Value& a);
} // namespace mmdeploy::graph
#endif // MMDEPLOY_SRC_EXPERIMENTAL_PIPELINE_OPERATOR_H_