mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
[Enhancement]: Update dockerfile and add github release note (#1952)
* update dockerfile and add github release note * fix typo in get_started * fix comment
This commit is contained in:
parent
8cab33c4f9
commit
0196cd0048
32
.github/release.yml
vendored
Normal file
32
.github/release.yml
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
changelog:
|
||||||
|
categories:
|
||||||
|
- title: 🚀 Features
|
||||||
|
labels:
|
||||||
|
- feature
|
||||||
|
- enhancement
|
||||||
|
- title: 💥 Improvements
|
||||||
|
labels:
|
||||||
|
- improvement
|
||||||
|
- title: 🐞 Bug fixes
|
||||||
|
labels:
|
||||||
|
- bug
|
||||||
|
- Bug:P0
|
||||||
|
- Bug:P1
|
||||||
|
- Bug:P2
|
||||||
|
- Bug:P3
|
||||||
|
- title: 📚 Documentations
|
||||||
|
labels:
|
||||||
|
- documentation
|
||||||
|
- title: 🌐 Other
|
||||||
|
labels:
|
||||||
|
- '*'
|
||||||
|
exclude:
|
||||||
|
labels:
|
||||||
|
- feature
|
||||||
|
- enhancement
|
||||||
|
- bug
|
||||||
|
- documentation
|
||||||
|
- Bug:P0
|
||||||
|
- Bug:P1
|
||||||
|
- Bug:P2
|
||||||
|
- Bug:P3
|
2
.github/workflows/prebuild.yml
vendored
2
.github/workflows/prebuild.yml
vendored
@ -61,7 +61,7 @@ jobs:
|
|||||||
linux_build_cxx11abi:
|
linux_build_cxx11abi:
|
||||||
runs-on: [self-hosted, linux-3090]
|
runs-on: [self-hosted, linux-3090]
|
||||||
container:
|
container:
|
||||||
image: openmmlab/mmdeploy:build-ubuntu18.04-cuda11.3
|
image: openmmlab/mmdeploy:build-ubuntu16.04-cuda11.3
|
||||||
options: "--gpus=all --ipc=host"
|
options: "--gpus=all --ipc=host"
|
||||||
volumes:
|
volumes:
|
||||||
- /data2/actions-runner/prebuild:/__w/mmdeploy/prebuild
|
- /data2/actions-runner/prebuild:/__w/mmdeploy/prebuild
|
||||||
|
@ -27,9 +27,11 @@ WORKDIR /root/workspace
|
|||||||
ENV FORCE_CUDA="1"
|
ENV FORCE_CUDA="1"
|
||||||
|
|
||||||
# install toolset
|
# install toolset
|
||||||
RUN yum install devtoolset-${TOOLSET_VERSION}-gcc devtoolset-${TOOLSET_VERSION}-gcc-c++ -y
|
RUN yum install centos-release-scl devtoolset-${TOOLSET_VERSION}-gcc* -y
|
||||||
|
|
||||||
ENV PATH=/opt/rh/devtoolset-${TOOLSET_VERSION}/root/usr/bin:$PATH
|
ENV TOOLSET_DIR=/opt/rh/devtoolset-${TOOLSET_VERSION}/root/usr
|
||||||
|
ENV PATH=$TOOLSET_DIR/bin:$PATH
|
||||||
|
ENV LD_LIBRARY_PATH=$TOOLSET_DIR/lib:$TOOLSET_DIR/lib64:/usr/local/lib64
|
||||||
|
|
||||||
# install cuda cudnn
|
# install cuda cudnn
|
||||||
RUN curl -fsSL -v -o ./cuda_install.run -O $CUDA_URL &&\
|
RUN curl -fsSL -v -o ./cuda_install.run -O $CUDA_URL &&\
|
||||||
@ -46,6 +48,10 @@ RUN curl -fsSL -v -o ./onnxruntime.tgz -O https://github.com/microsoft/onnxrunti
|
|||||||
tar -xzvf onnxruntime.tgz &&\
|
tar -xzvf onnxruntime.tgz &&\
|
||||||
rm onnxruntime.tgz &&\
|
rm onnxruntime.tgz &&\
|
||||||
mv onnxruntime* /opt/onnxruntime &&\
|
mv onnxruntime* /opt/onnxruntime &&\
|
||||||
|
curl -fsSL -v -o ./onnxruntime.tgz -O https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}.tgz &&\
|
||||||
|
tar -xzvf onnxruntime.tgz &&\
|
||||||
|
rm onnxruntime.tgz &&\
|
||||||
|
mv onnxruntime* /opt/onnxruntime-gpu &&\
|
||||||
curl -fsSL -v -o ./tensorrt.tgz -O $TENSORRT_URL &&\
|
curl -fsSL -v -o ./tensorrt.tgz -O $TENSORRT_URL &&\
|
||||||
tar -xzvf ./tensorrt.tgz &&\
|
tar -xzvf ./tensorrt.tgz &&\
|
||||||
rm -f ./tensorrt.tgz &&\
|
rm -f ./tensorrt.tgz &&\
|
||||||
@ -56,10 +62,11 @@ RUN curl -fsSL -v -o ./onnxruntime.tgz -O https://github.com/microsoft/onnxrunti
|
|||||||
ENV CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda
|
ENV CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda
|
||||||
ENV CUDNN_DIR=/opt/cudnn
|
ENV CUDNN_DIR=/opt/cudnn
|
||||||
ENV ONNXRUNTIME_DIR=/opt/onnxruntime
|
ENV ONNXRUNTIME_DIR=/opt/onnxruntime
|
||||||
|
ENV ONNXRUNTIME_GPU_DIR=/opt/onnxruntime-gpu
|
||||||
ENV TENSORRT_DIR=/opt/TensorRT
|
ENV TENSORRT_DIR=/opt/TensorRT
|
||||||
|
|
||||||
ENV LD_LIBRARY_PATH=$CUDA_TOOLKIT_ROOT_DIR/lib64:$CUDNN_DIR/lib64:$LD_LIBRARY_PATH
|
ENV LD_LIBRARY_PATH=$CUDA_TOOLKIT_ROOT_DIR/lib64:$CUDNN_DIR/lib64:$LD_LIBRARY_PATH
|
||||||
ENV LD_LIBRARY_PATH=${ONNXRUNTIME_DIR}/lib:$TENSORRT_DIR/lib:$LD_LIBRARY_PATH
|
ENV LD_LIBRARY_PATH=${ONNXRUNTIME_GPU_DIR}/lib:$TENSORRT_DIR/lib:$LD_LIBRARY_PATH
|
||||||
ENV PATH=$TENSORRT_DIR/bin:$PATH
|
ENV PATH=$TENSORRT_DIR/bin:$PATH
|
||||||
|
|
||||||
### install ppl.cv
|
### install ppl.cv
|
||||||
@ -106,7 +113,7 @@ RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Mini
|
|||||||
/opt/conda/envs/torch${TORCH_VERSION}/bin/pip install --no-cache-dir torch==${TORCH_VERSION}+cu${CUDA_INT} \
|
/opt/conda/envs/torch${TORCH_VERSION}/bin/pip install --no-cache-dir torch==${TORCH_VERSION}+cu${CUDA_INT} \
|
||||||
torchvision==${TORCHVISION_VERSION}+cu${CUDA_INT} -f https://download.pytorch.org/whl/torch_stable.html &&\
|
torchvision==${TORCHVISION_VERSION}+cu${CUDA_INT} -f https://download.pytorch.org/whl/torch_stable.html &&\
|
||||||
/opt/conda/bin/conda init bash &&\
|
/opt/conda/bin/conda init bash &&\
|
||||||
/opt/conda/bin/conda clean -ya &&\
|
/opt/conda/bin/conda clean -ya
|
||||||
|
|
||||||
ENV CONDA=/opt/conda
|
ENV CONDA=/opt/conda
|
||||||
ENV PATH=$CONDA/bin:$PATH
|
ENV PATH=$CONDA/bin:$PATH
|
||||||
|
@ -125,7 +125,7 @@ pip install mmdeploy==1.0.0rc3
|
|||||||
# 2.1 support onnxruntime
|
# 2.1 support onnxruntime
|
||||||
pip install mmdeploy-runtime==1.0.0rc3
|
pip install mmdeploy-runtime==1.0.0rc3
|
||||||
# 2.2 support onnxruntime-gpu, tensorrt
|
# 2.2 support onnxruntime-gpu, tensorrt
|
||||||
pip install mmdeploy-runtime-cuda==1.0.0rc3
|
pip install mmdeploy-runtime-gpu==1.0.0rc3
|
||||||
|
|
||||||
# 3. install inference engine
|
# 3. install inference engine
|
||||||
# 3.1 install TensorRT
|
# 3.1 install TensorRT
|
||||||
|
@ -120,7 +120,7 @@ pip install mmdeploy==1.0.0rc3
|
|||||||
# 2.1 支持 onnxruntime 推理
|
# 2.1 支持 onnxruntime 推理
|
||||||
pip install mmdeploy-runtime==1.0.0rc3
|
pip install mmdeploy-runtime==1.0.0rc3
|
||||||
# 2.2 支持 onnxruntime-gpu tensorrt 推理
|
# 2.2 支持 onnxruntime-gpu tensorrt 推理
|
||||||
pip install mmdeploy-runtime-cuda==1.0.0rc3
|
pip install mmdeploy-runtime-gpu==1.0.0rc3
|
||||||
|
|
||||||
# 3. 安装推理引擎
|
# 3. 安装推理引擎
|
||||||
# 3.1 安装推理引擎 TensorRT
|
# 3.1 安装推理引擎 TensorRT
|
||||||
|
Loading…
x
Reference in New Issue
Block a user