mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
* minor changes * support windows * fix GCC build * fix lint * reformat * fix Windows build * fix GCC build * search backend ops for onnxruntime * fix lint * fix lint * code clean-up * code clean-up * fix clang build * fix trt support * fix cmake for ncnn * fix cmake for openvino * fix SDK Python API * handle ops for other backends (ncnn, trt) * handle SDK Python API library location * robustify linkage * fix cuda * minor fix for openvino & ncnn * use CMAKE_CUDA_ARCHITECTURES if set * fix cuda preprocessor * fix misc * fix pplnn & pplcv, drop support for pplcv<0.6.0 * robustify cmake * update build.md (#2) * build dynamic modules as module library & fix demo (partially) * fix candidate path for mmdeploy_python * move "enable CUDA" to cmake config for demo * refine demo cmake * add comment * fix ubuntu build * revert docs/en/build.md * fix C API * fix lint * Windows build doc (#3) * check in docs related to mmdeploy build on windows * update build guide on windows platform * update build guide on windows platform * make path of thirdparty libraries consistent * make path consistency * correct build command for custom ops * correct build command for sdk * update sdk build instructions * update doc * correct build command * fix lint * correct build command and fix lint Co-authored-by: lvhan <lvhan@pjlab.org> * trailing whitespace (#4) * minor fix * fix sr sdk model * fix type deduction * fix cudaFree after driver shutting down * update ppl.cv installation warning (#5) * fix device allocator threshold & fix lint * update doc (#6) * update ppl.cv installation warning * missing 'git clone' Co-authored-by: chenxin <chenxin2@sensetime.com> Co-authored-by: zhangli <zhangli@sensetime.com> Co-authored-by: lvhan028 <lvhan_028@163.com> Co-authored-by: lvhan <lvhan@pjlab.org>
82 lines
1.8 KiB
C++
82 lines
1.8 KiB
C++
// Copyright (c) OpenMMLab. All rights reserved.
|
|
|
|
#ifndef CORE_TENSOR_H
|
|
#define CORE_TENSOR_H
|
|
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#include "device.h"
|
|
#include "types.h"
|
|
|
|
namespace mmdeploy {
|
|
|
|
using TensorShape = std::vector<int64_t>;
|
|
struct TensorDesc {
|
|
Device device;
|
|
DataType data_type{DataType::kFLOAT};
|
|
TensorShape shape;
|
|
std::string name;
|
|
};
|
|
|
|
class MMDEPLOY_API Tensor {
|
|
public:
|
|
Tensor() = default;
|
|
Tensor(const Tensor&) = default;
|
|
Tensor(Tensor&&) noexcept = default;
|
|
Tensor& operator=(const Tensor&) = default;
|
|
Tensor& operator=(Tensor&&) noexcept = default;
|
|
|
|
Tensor(const TensorDesc& desc, Allocator allocator = {}); // NOLINT
|
|
Tensor(const TensorDesc& desc, Buffer buffer);
|
|
Tensor(const TensorDesc& desc, std::shared_ptr<void> data);
|
|
~Tensor() = default;
|
|
|
|
const TensorDesc& desc() const;
|
|
const TensorShape& shape() const;
|
|
TensorShape::value_type shape(int dim) const;
|
|
DataType data_type() const;
|
|
const char* name() const;
|
|
int64_t size() const;
|
|
int64_t byte_size() const;
|
|
|
|
const Buffer& buffer() const;
|
|
Buffer& buffer();
|
|
Device device() const;
|
|
|
|
void Reshape(const TensorShape& shape);
|
|
|
|
Tensor Slice(int index);
|
|
|
|
Result<void> CopyFrom(const Tensor& tensor, Stream stream = {});
|
|
Result<void> CopyTo(Tensor& tensor, Stream stream = {}) const;
|
|
|
|
Result<void> CopyFrom(void* host_ptr, Stream stream = {});
|
|
Result<void> CopyTo(void* host_ptr, Stream stream = {}) const;
|
|
|
|
Allocator allocator() { return allocator_; }
|
|
|
|
template <typename T = void>
|
|
T* data() {
|
|
return GetNative<T*>(buffer());
|
|
}
|
|
|
|
template <typename T = void, typename U = std::add_const_t<T> >
|
|
U* data() const {
|
|
return GetNative<U*>(buffer());
|
|
}
|
|
|
|
private:
|
|
void Allocate();
|
|
|
|
TensorDesc desc_;
|
|
Allocator allocator_;
|
|
Buffer buffer_;
|
|
};
|
|
|
|
// static_assert(sizeof(Tensor) == 80);
|
|
|
|
} // namespace mmdeploy
|
|
|
|
#endif // !CORE_TENSOR_H
|