diff --git a/docker/prebuild/Dockerfile b/docker/prebuild/Dockerfile index 1a2505c9c..52888794d 100644 --- a/docker/prebuild/Dockerfile +++ b/docker/prebuild/Dockerfile @@ -19,11 +19,18 @@ ARG TENSORRT_VERSION=8.2.3.0 ARG TORCH_VERSION=1.10.0 ARG TORCHVISION_VERSION=0.11.0 +ARG TOOLSET_VERSION=7 + USER root WORKDIR /root/workspace ENV FORCE_CUDA="1" +# install toolset +RUN yum install devtoolset-${TOOLSET_VERSION}-gcc devtoolset-${TOOLSET_VERSION}-gcc-c++ -y + +ENV PATH=/opt/rh/devtoolset-${TOOLSET_VERSION}/root/usr/bin:$PATH + # install cuda cudnn RUN curl -fsSL -v -o ./cuda_install.run -O $CUDA_URL &&\ chmod +x ./cuda_install.run &&\ @@ -93,12 +100,13 @@ RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Mini /opt/conda/bin/conda create -n mmdeploy-3.10 python=3.10 -y &&\ export CUDA_INT=$(echo $CUDA_VERSION | awk '{split($0, a, "."); print a[1]a[2]}') &&\ /opt/conda/bin/conda create -n torch${TORCH_VERSION} python=3.8 -y &&\ - /opt/conda/envs/mmdeploy-3.6/bin/pip install --no-cache-dir setuptools wheel pyyaml &&\ + /opt/conda/envs/mmdeploy-3.6/bin/pip install --no-cache-dir setuptools wheel pyyaml packaging &&\ /opt/conda/envs/torch${TORCH_VERSION}/bin/pip install --no-cache-dir onnxruntime-gpu==${ONNXRUNTIME_VERSION} &&\ /opt/conda/envs/torch${TORCH_VERSION}/bin/pip install ${TENSORRT_DIR}/python/tensorrt-*-cp38-none-linux_x86_64.whl &&\ /opt/conda/envs/torch${TORCH_VERSION}/bin/pip install --no-cache-dir torch==${TORCH_VERSION}+cu${CUDA_INT} \ torchvision==${TORCHVISION_VERSION}+cu${CUDA_INT} -f https://download.pytorch.org/whl/torch_stable.html &&\ - /opt/conda/bin/conda clean -ya + /opt/conda/bin/conda init bash &&\ + /opt/conda/bin/conda clean -ya &&\ ENV CONDA=/opt/conda ENV PATH=$CONDA/bin:$PATH diff --git a/tools/scripts/build_prebuild_image.sh b/tools/scripts/build_prebuild_image.sh new file mode 100644 index 000000000..41ce4e5bd --- /dev/null +++ b/tools/scripts/build_prebuild_image.sh @@ -0,0 +1,29 @@ +#!/bin/sh + +set -e + +ip=${1} +port=${2:8585} + +date_today=`date +'%Y%m%d'` + +# create http server +nohup python3 -m http.server --directory /data2/shared/mmdeploy-manylinux2014_x86_64-cuda11.3 $port > tmp.log 2>&1 + +export ip=10.1.52.36 +export port=8585 +export CUDA_URL=http://$ip:$port/cuda_11.3.0_465.19.01_linux.run +export CUDNN_URL=http://$ip:$port/cudnn-11.3-linux-x64-v8.2.1.32.tgz +export TENSORRT_URL=http://$ip:$port/TensorRT-8.2.3.0.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz +export TENSORRT_VERSION=8.2.3.0 +export TAG=manylinux2014_x86_64-cuda11.3 + +# build docker image +docker build ./docker/prebuild/ -t openmmlab/mmdeploy:$TAG \ + --build-arg CUDA_URL=$CUDA_URL \ + --build-arg CUDNN_URL=$CUDNN_URL \ + --build-arg TENSORRT_URL=${TENSORRT_URL} + +# push to docker hub +docker login +docker push openmmlab/mmdeploy:$TAG