apply cmake-format and refactor cmake scripts of building onnx and te… (#99)

* apply cmake-format and refactor cmake scripts of building onnx and tensorrt ops

* add static target

* suppress 'CMAKE_CUDA_ARCHITECTURES' warning when cmake with version 18 or later is used

* fix typo
pull/12/head
lvhan028 2021-10-13 16:42:11 +08:00 committed by GitHub
parent 1ed6bc0fb5
commit 6fdf6b8616
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 146 additions and 123 deletions

View File

@ -1,18 +1,18 @@
cmake_minimum_required (VERSION 3.10)
project (mmdeploy_backend_ops)
cmake_minimum_required(VERSION 3.10)
project(mmdeploy_backend_ops)
# ONNXRUNTIME config
# enable onnxruntime
option(BUILD_ONNXRUNTIME_OPS "enable ONNXRUNTIME ops" OFF)
# ONNXRUNTIME search path
if (BUILD_ONNXRUNTIME_OPS)
if (NOT DEFINED ONNXRUNTIME_DIR)
set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR})
endif()
if (NOT ONNXRUNTIME_DIR)
if(BUILD_ONNXRUNTIME_OPS)
if(NOT DEFINED ONNXRUNTIME_DIR)
set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR})
endif()
if(NOT ONNXRUNTIME_DIR)
message(ERROR " ONNXRUNTIME_DIR is not found.")
endif()
endif()
endif()
# TensorRT config
@ -20,10 +20,10 @@ endif()
# enable tensorrt
option(BUILD_TENSORRT_OPS "enable TensorRT ops" OFF)
# TensorRT search path
if (BUILD_TENSORRT_OPS)
if (NOT DEFINED TENSORRT_DIR)
set(TENSORRT_DIR $ENV{TENSORRT_DIR})
endif()
if(BUILD_TENSORRT_OPS)
if(NOT DEFINED TENSORRT_DIR)
set(TENSORRT_DIR $ENV{TENSORRT_DIR})
endif()
endif()
# NCNN config
@ -31,13 +31,13 @@ endif()
# enable ncnn
option(BUILD_NCNN_OPS "enable NCNN ops" OFF)
# NCNN search path
if (BUILD_NCNN_OPS)
if (NOT DEFINED NCNN_DIR)
set(NCNN_DIR $ENV{NCNN_DIR})
endif()
if (NOT NCNN_DIR)
if(BUILD_NCNN_OPS)
if(NOT DEFINED NCNN_DIR)
set(NCNN_DIR $ENV{NCNN_DIR})
endif()
if(NOT NCNN_DIR)
message(ERROR " NCNN_DIR is not found.")
endif()
endif()
endif()
add_subdirectory (backend_ops)
add_subdirectory(backend_ops)

View File

@ -6,19 +6,19 @@ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
# build ONNXRUNTIME ops
if (BUILD_ONNXRUNTIME_OPS)
message("Build ONNXRUNTIME custom ops.")
add_subdirectory (onnxruntime)
if(BUILD_ONNXRUNTIME_OPS)
message("Build ONNXRUNTIME custom ops.")
add_subdirectory(onnxruntime)
endif()
# build TensorRT ops
if (BUILD_TENSORRT_OPS)
message("Build TensorRT custom ops.")
add_subdirectory (tensorrt)
if(BUILD_TENSORRT_OPS)
message("Build TensorRT custom ops.")
add_subdirectory(tensorrt)
endif()
# build NCNN ops
if (BUILD_NCNN_OPS)
message("Build NCNN custom ops")
add_subdirectory (ncnn)
if(BUILD_NCNN_OPS)
message("Build NCNN custom ops")
add_subdirectory(ncnn)
endif()

View File

@ -5,12 +5,19 @@ set(SHARED_TARGET ${TARGET_NAME})
set(ncnn_DIR ${NCNN_DIR}/build/install/lib/cmake/ncnn)
find_package(ncnn)
if (ncnn_FOUND)
message(STATUS "ncnn library found!")
else ()
message(FATAL_ERROR "Could not locate ncnn")
if(ncnn_FOUND)
message(STATUS "ncnn library found!")
else()
message(FATAL_ERROR "Could not locate ncnn")
endif()
add_subdirectory (ops)
add_subdirectory (onnx2ncnn)
add_subdirectory (pyncnn_ext)
if(NOT ANDROID AND NOT IOS)
add_subdirectory(ops)
add_subdirectory(onnx2ncnn)
add_subdirectory(pyncnn_ext)
else()
# In case of embedded platform, like android, or ios, we only build custom ncnn
# ops, and leave the executable converter(onnx2ncnn, pyncnn_ext) built under
# the host platforms
add_subdirectory(ops)
endif()

View File

@ -1,15 +1,14 @@
find_package(Protobuf)
if(PROTOBUF_FOUND)
protobuf_generate_cpp(ONNX_PROTO_SRCS ONNX_PROTO_HDRS ${NCNN_DIR}/tools/onnx/onnx.proto)
add_executable(onnx2ncnn onnx2ncnn.cpp ${ONNX_PROTO_SRCS} ${ONNX_PROTO_HDRS})
target_include_directories(onnx2ncnn
PRIVATE
${PROTOBUF_INCLUDE_DIR}
${CMAKE_CURRENT_BINARY_DIR})
target_link_libraries(onnx2ncnn PRIVATE ${PROTOBUF_LIBRARIES})
protobuf_generate_cpp(ONNX_PROTO_SRCS ONNX_PROTO_HDRS
${NCNN_DIR}/tools/onnx/onnx.proto)
add_executable(onnx2ncnn onnx2ncnn.cpp ${ONNX_PROTO_SRCS} ${ONNX_PROTO_HDRS})
target_include_directories(onnx2ncnn PRIVATE ${PROTOBUF_INCLUDE_DIR}
${CMAKE_CURRENT_BINARY_DIR})
target_link_libraries(onnx2ncnn PRIVATE ${PROTOBUF_LIBRARIES})
else()
message(FATAL_ERROR "Protobuf not found, onnx model convert tool won't be built")
message(
FATAL_ERROR "Protobuf not found, onnx model convert tool won't be built")
endif()

View File

@ -1,18 +1,5 @@
# add plugin source
set(PLUGIN_LISTS constantofshape
expand
gather
shape
tensorslice
topk)
foreach(PLUGIN_ITER ${PLUGIN_LISTS})
file(GLOB PLUGIN_OPS_SRCS ${PLUGIN_ITER}/*.cpp)
file(GLOB PLUGIN_OPS_HEADS ${PLUGIN_ITER}/*.h)
set(BACKEND_OPS_SRCS ${BACKEND_OPS_SRCS} ${PLUGIN_OPS_SRCS} ${PLUGIN_OPS_HEADS})
endforeach(PLUGIN_ITER)
set(BACKEND_OPS_SRCS ${BACKEND_OPS_SRCS} ncnn_ops_register.cpp)
file(GLOB_RECURSE BACKEND_OPS_SRCS *.cpp)
add_library(${SHARED_TARGET} SHARED ${BACKEND_OPS_SRCS})
target_link_libraries(${SHARED_TARGET} ncnn)

View File

@ -9,4 +9,6 @@ add_subdirectory(${PYBIND11_DIR} pybind11)
include_directories(${pybind11_INCLUDE_DIR} ${PYTHON_INCLUDE_DIRS})
pybind11_add_module(ncnn_ext ncnn_ext.cpp)
target_link_libraries(ncnn_ext PUBLIC ncnn ${SHARED_TARGET})
set_target_properties(ncnn_ext PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_SOURCE_DIR}/mmdeploy/apis/ncnn)
set_target_properties(
ncnn_ext PROPERTIES LIBRARY_OUTPUT_DIRECTORY
${CMAKE_SOURCE_DIR}/mmdeploy/apis/ncnn)

View File

@ -5,24 +5,12 @@ set(SHARED_TARGET ${TARGET_NAME})
include_directories(${ONNXRUNTIME_DIR}/include)
link_directories(${ONNXRUNTIME_DIR}/lib)
# add plugin source
set(PLUGIN_LISTS grid_sample
roi_align
modulated_deform_conv)
foreach(PLUGIN_ITER ${PLUGIN_LISTS})
file(GLOB PLUGIN_OPS_SRCS ${PLUGIN_ITER}/*.cpp ${PLUGIN_ITER}/*.cu)
file(GLOB PLUGIN_OPS_HEADS ${PLUGIN_ITER}/*.h ${PLUGIN_ITER}/*.hpp ${PLUGIN_ITER}/*.cuh)
set(BACKEND_OPS_SRCS ${BACKEND_OPS_SRCS} ${PLUGIN_OPS_SRCS} ${PLUGIN_OPS_HEADS})
endforeach(PLUGIN_ITER)
file(GLOB COMMON_SRCS common/*.cpp common/*.cu)
set(BACKEND_OPS_SRCS ${BACKEND_OPS_SRCS} ${COMMON_SRCS})
file(GLOB_RECURSE BACKEND_OPS_SRCS *.cpp)
set(INFER_PLUGIN_LIB onnxruntime)
list(APPEND BACKEND_OPS_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime_register.cpp")
add_library(${SHARED_TARGET} SHARED ${BACKEND_OPS_SRCS})
target_link_libraries(${SHARED_TARGET} ${INFER_PLUGIN_LIB})
target_include_directories(${SHARED_TARGET} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common)
target_include_directories(${SHARED_TARGET}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common)

View File

@ -1,72 +1,112 @@
set(TARGET_NAME mmlab_tensorrt_ops)
set(SHARED_TARGET ${TARGET_NAME})
set(STATIC_TARGET ${TARGET_NAME}_static)
# to suppress 'CMAKE_CUDA_ARCHITECTURES' warning when cmake version is >= 18.0
cmake_policy(SET CMP0104 OLD)
# cuda
FIND_PACKAGE(CUDA REQUIRED)
INCLUDE_DIRECTORIES(/usr/local/cuda/include)
find_package(CUDA REQUIRED)
include_directories(${CUDA_INCLUDE_DIRS})
enable_language(CUDA)
# tensorrt
find_path(TENSORRT_INCLUDE_DIR NvInfer.h
HINTS ${TENSORRT_DIR} ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES include)
if (TENSORRT_INCLUDE_DIR)
MESSAGE(STATUS " Found TensorRT headers at ${TENSORRT_INCLUDE_DIR}")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
if(MSVC)
set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc.exe)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=/wd4819,/wd4828")
if(HAVE_CXX_FLAG_UTF_8)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=/utf-8")
endif()
else()
MESSAGE(ERROR " Cannot found TensorRT headers")
set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc)
# Explicitly set the cuda host compiler. Because the default host compiler #
# selected by cmake maybe wrong.
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
set(CUDA_NVCC_FLAGS
"${CUDA_NVCC_FLAGS} -Xcompiler=-fPIC,-Wall,-fvisibility=hidden")
endif()
find_library(TENSORRT_LIBRARY_INFER nvinfer
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib lib64 lib/x64)
find_library(TENSORRT_LIBRARY_PARSERS nvparsers
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib lib64 lib/x64)
find_library(TENSORRT_LIBRARY_INFER_PLUGIN nvinfer_plugin
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib lib64 lib/x64)
set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER}
${TENSORRT_LIBRARY_PARSERS}
${TENSORRT_LIBRARY_INFER_PLUGIN}
)
if (TENSORRT_LIBRARY_INFER AND TENSORRT_LIBRARY_PARSERS AND TENSORRT_LIBRARY_INFER_PLUGIN)
MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
# set virtual compute architecture and real ones
set(_NVCC_FLAGS)
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_52,code=sm_52")
if(CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "8")
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_60,code=sm_60")
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_61,code=sm_61")
endif()
if(CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "9")
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_70,code=sm_70")
endif()
if(CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "10")
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_72,code=sm_72")
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_75,code=sm_75")
endif()
if(CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "11")
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_80,code=sm_80")
set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_86,code=sm_86")
endif()
set(CUDA_NVCC_FLAGS_DEBUG "-g -O0")
set(CUDA_NVCC_FLAGS_RELEASE "-O3")
set(CUDA_NVCC_FLAGS "-std=c++11 ${CUDA_NVCC_FLAGS}")
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} ${CUDA_NVCC_FLAGS} ${_NVCC_FLAGS}")
# tensorrt
find_path(
TENSORRT_INCLUDE_DIR NvInfer.h
HINTS ${TENSORRT_DIR} ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES include)
if(TENSORRT_INCLUDE_DIR)
message(STATUS " Found TensorRT headers at ${TENSORRT_INCLUDE_DIR}")
else()
MESSAGE(ERROR " Cannot found TensorRT libs")
message(ERROR " Cannot found TensorRT headers")
endif()
find_package_handle_standard_args(
TENSORRT DEFAULT_MSG TENSORRT_INCLUDE_DIR TENSORRT_LIBRARY)
find_library(
TENSORRT_LIBRARY_INFER nvinfer
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib lib64 lib/x64)
find_library(
TENSORRT_LIBRARY_INFER_PLUGIN nvinfer_plugin
HINTS ${TENSORRT_DIR} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR}
PATH_SUFFIXES lib lib64 lib/x64)
set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER}
${TENSORRT_LIBRARY_INFER_PLUGIN})
if(TENSORRT_LIBRARY_INFER
AND TENSORRT_LIBRARY_INFER_PLUGIN)
message(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
else()
message(FATAL_ERROR " Cannot found TensorRT libs")
endif()
find_package_handle_standard_args(TENSORRT DEFAULT_MSG TENSORRT_INCLUDE_DIR
TENSORRT_LIBRARY)
if(NOT TENSORRT_FOUND)
message(ERROR " Cannot find TensorRT library.")
message(ERROR " Cannot find TensorRT library.")
endif()
INCLUDE_DIRECTORIES(${TENSORRT_INCLUDE_DIR})
include_directories(${TENSORRT_INCLUDE_DIR})
# cudnn
include_directories(${CUDNN_DIR}/include)
link_directories(${CUDNN_DIR}/lib64)
# cub
if (NOT DEFINED CUB_ROOT_DIR)
add_definitions(-DTHRUST_IGNORE_DEPRECATED_CPP_DIALECT)
if(NOT DEFINED CUB_ROOT_DIR)
set(CUB_ROOT_DIR "${PROJECT_SOURCE_DIR}/third_party/cub")
endif()
INCLUDE_DIRECTORIES(${CUB_ROOT_DIR})
include_directories(${CUB_ROOT_DIR})
# add plugin source
set(PLUGIN_LISTS scatternd
nms
roi_align
batched_nms
instance_norm
modulated_deform_conv
multi_level_roi_align
grid_sampler)
# include_directories(${CMAKE_CURRENT_SOURCE_DIR}/common)
file(GLOB_RECURSE BACKEND_OPS_SRCS *.cpp *.cu)
foreach(PLUGIN_ITER ${PLUGIN_LISTS})
file(GLOB PLUGIN_OPS_SRCS ${PLUGIN_ITER}/*.cpp ${PLUGIN_ITER}/*.cu)
file(GLOB PLUGIN_OPS_HEADS ${PLUGIN_ITER}/*.h ${PLUGIN_ITER}/*.hpp ${PLUGIN_ITER}/*.cuh)
set(BACKEND_OPS_SRCS ${BACKEND_OPS_SRCS} ${PLUGIN_OPS_SRCS} ${PLUGIN_OPS_HEADS})
endforeach(PLUGIN_ITER)
list(APPEND BACKEND_OPS_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/common_impl/trt_cuda_helper.cu")
set(INFER_PLUGIN_LIB ${TENSORRT_LIBRARY} cublas cudnn)
set(INFER_PLUGIN_LIB ${TENSORRT_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} cudnn)
cuda_add_library(${SHARED_TARGET} MODULE ${BACKEND_OPS_SRCS})
target_link_libraries(${SHARED_TARGET} ${INFER_PLUGIN_LIB})
target_include_directories(${SHARED_TARGET} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common)
target_include_directories(${SHARED_TARGET}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common)
cuda_add_library(${STATIC_TARGET} STATIC ${BACKEND_OPS_SRCS})
target_link_libraries(${STATIC_TARGET} ${INFER_PLUGIN_LIB})
target_include_directories(${STATIC_TARGET}
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common)