lvhan028 36124f6205
Merge sdk (#251)
* check in cmake

* move backend_ops to csrc/backend_ops

* check in preprocess, model, some codebase and their c-apis

* check in CMakeLists.txt

* check in parts of test_csrc

* commit everything else

* add readme

* update core's BUILD_INTERFACE directory

* skip codespell on third_party

* update trt_net and ort_net's CMakeLists

* ignore clion's build directory

* check in pybind11

* add onnx.proto. Remove MMDeploy's dependency on ncnn's source code

* export MMDeployTargets only when MMDEPLOY_BUILD_SDK is ON

* remove useless message

* target include directory is wrong

* change target name from mmdeploy_ppl_net to mmdeploy_pplnn_net

* skip install directory

* update project's cmake

* remove useless code

* set CMAKE_BUILD_TYPE to Release by force if it isn't set by user

* update custom ops CMakeLists

* pass object target's source lists

* fix lint end-of-file

* fix lint: trailing whitespace

* fix codespell hook

* remove bicubic_interpolate to csrc/backend_ops/

* set MMDEPLOY_BUILD_SDK OFF

* change custom ops build command

* add spdlog installation command

* update docs on how to checkout pybind11

* move bicubic_interpolate to backend_ops/tensorrt directory

* remove useless code

* correct cmake

* fix typo

* fix typo

* fix install directory

* correct sdk's readme

* set cub dir when cuda version < 11.0

* change directory where clang-format will apply to

* fix build command

* add .clang-format

* change clang-format style from google to file

* reformat csrc/backend_ops

* format sdk's code

* turn off clang-format for some files

* add -Xcompiler=-fno-gnu-unique

* fix trt topk initialize

* check in config for sdk demo

* update cmake script and csrc's readme

* correct config's path

* add cuda include directory, otherwise compile failed in case of tensorrt8.2

* clang-format onnx2ncnn.cpp

Co-authored-by: zhangli <lzhang329@gmail.com>
Co-authored-by: grimoire <yaoqian@sensetime.com>
2021-12-07 10:57:55 +08:00

120 lines
4.0 KiB
C++

// Copyright (c) OpenMMLab. All rights reserved.
#ifndef MMDEPLOY_SRC_CORE_MPL_SPAN_H_
#define MMDEPLOY_SRC_CORE_MPL_SPAN_H_
#include <iterator>
#include <type_traits>
#include "detected.h"
#include "iterator.h"
namespace mmdeploy {
namespace detail {
template <typename T>
using arrow_t = decltype(std::declval<T>().operator->());
template <typename T>
constexpr auto to_address(const T& p) noexcept {
if constexpr (std::is_pointer_v<T>) {
return p;
} else if (detail::is_detected_v<arrow_t, T>) {
return to_address(p.operator->());
}
}
} // namespace detail
template <typename T>
class Span {
public:
using element_type = T;
using value_type = std::remove_cv_t<T>;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
using iterator = T*;
using reverse_iterator = std::reverse_iterator<iterator>;
public:
constexpr Span() noexcept : data_(nullptr), size_(0) {}
// clang-format off
template <typename It,
std::void_t<decltype(std::addressof(std::declval<It&>()))>* = nullptr>
// clang-format on
constexpr Span(It first, size_type size) : data_(detail::to_address(first)), size_(size) {}
template <typename It, typename End,
std::enable_if_t<!std::is_convertible_v<End, std::size_t>, int> = 0>
constexpr Span(It first, End last) : data_(detail::to_address(first)), size_(last - first) {}
template <typename U, typename = std::void_t<decltype(std::data(std::declval<U>()))>,
typename = std::void_t<decltype(std::size(std::declval<U>()))>>
constexpr Span(U& v) : data_(std::data(v)), size_(std::size(v)) {}
template <std::size_t N>
constexpr Span(element_type (&arr)[N]) noexcept : data_(std::data(arr)), size_(N) {}
template <std::size_t N>
constexpr Span(const Span& other) noexcept : data_(std::data(other)), size_(std::size(other)) {}
constexpr iterator begin() const noexcept { return data_; }
constexpr iterator end() const noexcept { return data_ + size_; }
constexpr reverse_iterator rbegin() const noexcept { return std::make_reverse_iterator(end()); }
constexpr reverse_iterator rend() const noexcept { return std::make_reverse_iterator(begin()); }
constexpr reference front() const { return data_[0]; }
constexpr reference back() const { return data_[size_ - 1]; }
constexpr reference operator[](size_type idx) const { return data_[idx]; }
constexpr pointer data() const noexcept { return data_; }
constexpr size_type size() const noexcept { return size_; }
constexpr size_type size_bytes() const noexcept { return sizeof(value_type) * size(); }
constexpr bool empty() const noexcept { return size_ == 0; }
constexpr Span<element_type> first(size_type count) const { return {begin(), count}; }
constexpr Span<element_type> last(size_type count) const { return {end() - count, count}; }
constexpr Span<element_type> subspan(size_type offset, size_type count = -1) const {
if (count == -1) {
return {begin() + offset, end()};
} else {
return {begin() + offset, begin() + offset + count};
}
}
constexpr Span& operator=(const Span& other) noexcept = default;
friend bool operator==(const Span& a, const Span& b) {
if (a.size() != b.size()) return false;
for (size_type i = 0; i < a.size(); ++i) {
if (a[i] != b[i]) {
return false;
}
}
return true;
}
friend bool operator!=(const Span& a, const Span& b) { return !(a == b); }
private:
T* data_;
size_type size_;
};
// clang-format off
template <typename It, typename EndOrSize>
Span(It, EndOrSize) -> Span<std::remove_reference_t<iter_reference_t<It>>>;
template <typename T, std::size_t N>
Span(T (&)[N]) -> Span<T>;
template <typename U, typename = std::void_t<decltype(std::declval<U>().data())>,
typename = std::void_t<decltype(std::declval<U>().size())>>
Span(U& v) -> Span<typename uncvref_t<U>::value_type>;
// clang-format on
} // namespace mmdeploy
#endif // MMDEPLOY_SRC_CORE_MPL_SPAN_H_