diff --git a/.circleci/scripts/linux/build.sh b/.circleci/scripts/linux/build.sh index fc7827d0a..cd48e9e2c 100644 --- a/.circleci/scripts/linux/build.sh +++ b/.circleci/scripts/linux/build.sh @@ -2,8 +2,10 @@ ARGS=("$@") -cd mmdeploy -MMDEPLOY_DIR=$(pwd) +SCRIPT_DIR=$(cd `dirname $0`; pwd) +MMDEPLOY_DIR=$SCRIPT_DIR/../../.. + +cd $MMDEPLOY_DIR mkdir -p build && cd build cmake .. -DMMDEPLOY_BUILD_SDK=ON -DMMDEPLOY_BUILD_TEST=ON -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ -DMMDEPLOY_BUILD_EXAMPLES=ON -DMMDEPLOY_BUILD_SDK_CSHARP_API=ON \ diff --git a/.circleci/scripts/linux/test_full_pipeline.sh b/.circleci/scripts/linux/test_full_pipeline.sh new file mode 100644 index 000000000..468699dd0 --- /dev/null +++ b/.circleci/scripts/linux/test_full_pipeline.sh @@ -0,0 +1,104 @@ +#!/bin/sh + +set -e +# print env +#python3 tools/check_env.py +backend=${1:-ort} +device=${2:-cpu} +current_dir=$(cd `dirname $0`; pwd) +mmdeploy_dir=$current_dir/../../.. +cd $mmdeploy_dir + +work_dir=$mmdeploy_dir/work_dir +mkdir -p $work_dir $mmdeploy_dir/data + +model_cfg=$work_dir/resnet18_8xb32_in1k.py +checkpoint=$work_dir/resnet18_8xb32_in1k_20210831-fbbb1da6.pth +sdk_cfg=configs/mmcls/classification_sdk_dynamic.py +input_img=tests/data/tiger.jpeg + +python3 -m mim download mmcls --config resnet18_8xb32_in1k --dest $work_dir + +if [ $backend == "ort" ]; then + deploy_cfg=configs/mmcls/classification_onnxruntime_dynamic.py + model=$work_dir/end2end.onnx +elif [ $backend == "trt" ]; then + deploy_cfg=configs/mmcls/classification_tensorrt-fp16_dynamic-224x224-224x224.py + model=$work_dir/end2end.engine +elif [ $backend == "ncnn" ]; then + deploy_cfg=configs/mmcls/classification_ncnn_static.py + model="$work_dir/end2end.param $work_dir/end2end.bin" +else + echo "Unsupported Backend=$backend" + exit +fi + +echo "------------------------------------------------------------------------------------------------------------" +echo "deploy_cfg=$deploy_cfg" +echo "model_cfg=$model_cfg" +echo "checkpoint=$checkpoint" +echo "device=$device" +echo "------------------------------------------------------------------------------------------------------------" + +python3 tools/deploy.py \ + $deploy_cfg \ + $model_cfg \ + $checkpoint \ + $input_img \ + --device $device \ + --work-dir $work_dir \ + --dump-info + +if [ $backend == "trt" ]; then + echo "Running onnx2tensorrt" + python3 tools/onnx2tensorrt.py \ + $deploy_cfg \ + $work_dir/end2end.onnx \ + $work_dir/temp +fi + +# prepare dataset +wget -P data/ https://github.com/open-mmlab/mmdeploy/files/9401216/imagenet-val100.zip +unzip data/imagenet-val100.zip -d data/ + +echo "Running test with $backend" + +python3 tools/test.py \ + $deploy_cfg \ + $model_cfg \ + --model $model \ + --device $device \ + --log2file $work_dir/test_ort.log \ + --speed-test \ + --log-interval 50 \ + --warmup 20 \ + --batch-size 8 + +echo "Running test with sdk" + +# change topk for test +sed -i 's/"topk": 5/"topk": 1000/g' work_dir/pipeline.json + +python3 tools/test.py \ + $sdk_cfg \ + $model_cfg \ + --model $work_dir \ + --device $device \ + --log2file $work_dir/test_sdk.log \ + --speed-test \ + --log-interval 50 \ + --warmup 20 \ + --batch-size 8 + +# test profiler +echo "Profile sdk model" +python3 tools/profiler.py \ + $sdk_cfg \ + $model_cfg \ + ./data \ + --model $work_dir \ + --device $device \ + --batch-size 8 \ + --shape 224x224 + +echo "All done" diff --git a/.circleci/scripts/windows/test_full_pipeline.ps1 b/.circleci/scripts/windows/test_full_pipeline.ps1 new file mode 100644 index 000000000..001f96aed --- /dev/null +++ b/.circleci/scripts/windows/test_full_pipeline.ps1 @@ -0,0 +1,110 @@ +<# +.SYNOPSIS +A helper script to test tools of MMDeploy Converter on windows. + +.Description + -Backend: support ort, trt + -Device: support cpu, cuda, cuda:0 + +.EXAMPLE +PS> .circleci/scripts/linux/test_full_pipeline.sh -Backend ort -Device cpu +#> + +param( + [Parameter(Mandatory = $true)] + [string] $Backend, + [string] $Device +) + +$MMDeploy_DIR="$PSScriptRoot\..\..\.." +Set-Location $MMDeploy_DIR + +$work_dir="work_dir" +New-Item -Path $work_dir, .\data -ItemType Directory -Force +$model_cfg="$work_dir\resnet18_8xb32_in1k.py" +$checkpoint="$work_dir\resnet18_8xb32_in1k_20210831-fbbb1da6.pth" +$sdk_cfg="configs\mmcls\classification_sdk_dynamic.py" +$input_img="tests\data\tiger.jpeg" + +python -m mim download mmcls --config resnet18_8xb32_in1k --dest $work_dir + +if ($Backend -eq "ort") { + $deploy_cfg="configs\mmcls\classification_onnxruntime_dynamic.py" + $model="$work_dir\end2end.onnx" +} elseif ($Backend -eq "trt") { + $deploy_cfg="configs\mmcls\classification_tensorrt-fp16_dynamic-224x224-224x224.py" + $model="$work_dir\end2end.engine" +} else { + Write-Host "Unsupported Backend=$Backend" + Exit +} + +Write-Host "--------------------------------------" +Write-Host "deploy_cfg=$deploy_cfg" +Write-Host "$model_cfg=$model_cfg" +Write-Host "$checkpoint=$checkpoint" +Write-Host "device=$device" +Write-Host "--------------------------------------" + +python tools\deploy.py ` + $deploy_cfg ` + $model_cfg ` + $checkpoint ` + $input_img ` + --device $device ` + --work-dir $work_dir ` + --dump-info + +# prepare dataset +Invoke-WebRequest -Uri https://github.com/open-mmlab/mmdeploy/files/9401216/imagenet-val100.zip -OutFile $pwd\data\imagenet-val100.zip +Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory("$pwd\data\imagenet-val100.zip", "$pwd\data\") + +Write-Host "Running test with ort" +python tools\test.py ` + $deploy_cfg ` + $model_cfg ` + --model $model ` + --device $device ` + --log2file $work_dir\test_ort.log ` + --speed-test ` + --log-interval 50 ` + --warmup 20 ` + --batch-size 8 + + +Write-Host "Prepare dataset" +# change topk for test +$src_topk='"topk": 5' +$dst_topk='"topk": 1000' +$src_pipeline_file="$work_dir\pipeline.json" +$tmp_pipeline_file="$work_dir\pipeline_tmp.json" +Move-Item $src_pipeline_file $tmp_pipeline_file -force +(Get-Content -Path $tmp_pipeline_file) -replace $src_topk, $dst_topk | Add-Content -Path $src_pipeline_file + +Write-Host "test sdk model" + +python tools\test.py ` + $sdk_cfg ` + $model_cfg ` + --model $work_dir ` + --device $device ` + --log2file $work_dir\test_sdk.log ` + --speed-test ` + --log-interval 50 ` + --warmup 20 ` + --batch-size 8` + +# test profiler +Write-Host "Profile sdk model" +python tools\profiler.py ` + $sdk_cfg ` + $model_cfg ` + .\data ` + --model $work_dir ` + --device $device ` + --batch-size 8 ` + --shape 224x224 + +# remove temp data +Remove-Item -Path "$work_dir" -Force -Recurse +Write-Host "All done" diff --git a/.github/scripts/test_mmcls_full_pipeline.sh b/.github/scripts/test_mmcls_full_pipeline.sh deleted file mode 100644 index 178b784f7..000000000 --- a/.github/scripts/test_mmcls_full_pipeline.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/sh - -set -e -# print env -python tools/check_env.py - -deploy_cfg=configs/mmcls/classification_onnxruntime_dynamic.py -device=cpu -python -m mim download mmcls --config resnet18_8xb32_in1k --dest ../ -model_cfg=../resnet18_8xb32_in1k.py -checkpoint=../resnet18_8xb32_in1k_20210831-fbbb1da6.pth -sdk_cfg=configs/mmcls/classification_sdk_dynamic.py -input_img=tests/data/tiger.jpeg -work_dir=work_dir - -echo "------------------------------------------------------------------------------------------------------------" -echo "deploy_cfg=$deploy_cfg" -echo "model_cfg=$model_cfg" -echo "checkpoint=$checkpoint" -echo "device=$device" -echo "------------------------------------------------------------------------------------------------------------" - -mkdir -p $work_dir - -python tools/deploy.py \ - $deploy_cfg \ - $model_cfg \ - $checkpoint \ - $input_img \ - --device $device \ - --work-dir $work_dir \ - --dump-info - -# prepare dataset -wget -P data/ https://github.com/open-mmlab/mmdeploy/files/9401216/imagenet-val100.zip -unzip data/imagenet-val100.zip -d data/ - -echo "Running test with ort" - -python tools/test.py \ - $deploy_cfg \ - $model_cfg \ - --model $work_dir/end2end.onnx \ - --device $device \ - --device $device \ - --log2file $work_dir/test_ort.log \ - --speed-test \ - --log-interval 50 \ - --warmup 20 \ - --batch-size 32 - -echo "Running test with sdk" - -# change topk for test -sed -i 's/"topk": 5/"topk": 1000/g' work_dir/pipeline.json - -python tools/test.py \ - $sdk_cfg \ - $model_cfg \ - --model $work_dir \ - --device $device \ - --device $device \ - --log2file $work_dir/test_sdk.log \ - --speed-test \ - --log-interval 50 \ - --warmup 20 \ - --batch-size 1 diff --git a/.github/workflows/backend-ort.yml b/.github/workflows/backend-ort.yml index ccf3af51f..c36a3ad0a 100644 --- a/.github/workflows/backend-ort.yml +++ b/.github/workflows/backend-ort.yml @@ -43,4 +43,4 @@ jobs: export MMDEPLOY_DIR=$(pwd) export ONNXRUNTIME_DIR=$MMDEPLOY_DIR/../mmdeploy-dep/onnxruntime-linux-x64-1.8.1 export LD_LIBRARY_PATH=$ONNXRUNTIME_DIR/lib:$MMDEPLOY_DIR/build/install/lib:$LD_LIBRARY_PATH - bash .github/scripts/test_mmcls_full_pipeline.sh + bash .circleci/scripts/linux/test_full_pipeline.sh ort cpu diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e7fe07167..5a9afee1b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -218,3 +218,106 @@ jobs: env_vars: OS,PYTHON,CPLUS name: codecov-umbrella fail_ci_if_error: false + + build_cuda113_linux: + runs-on: [self-hosted, linux-3090] + container: + image: openmmlab/mmdeploy:ubuntu20.04-cuda11.3 + options: "--gpus=all --ipc=host" + #volumes: + # - /data2/checkpoints:/__w/mmdeploy/mmdeploy_checkpoints + # - /data2/benchmark:/__w/mmdeploy/data + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + submodules: recursive + - name: Install dependencies + run: | + apt update && apt install unzip + python3 -V + python3 -m pip install opencv-python==4.5.4.60 opencv-python-headless==4.5.4.60 opencv-contrib-python==4.5.4.60 + python3 -m pip install openmim numpy pycuda clip + python3 -m pip install -r requirements.txt + python3 -m mim install $(cat requirements/codebases.txt | grep mmcls) + python3 -m pip list + - name: Build SDK + run: | + export Torch_DIR=$(python3 -c "import torch;print(torch.utils.cmake_prefix_path + '/Torch')") + bash .circleci/scripts/linux/build.sh "cpu;cuda" "ort;trt;ncnn;torchscript" \ + -Dpplcv_DIR=${pplcv_DIR} \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \ + -Dncnn_DIR=${ncnn_DIR} \ + -DTorch_DIR=${Torch_DIR} + ls build/lib + - name: Install converter + run: | + rm -rf .eggs && python3 -m pip install -e . + export LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" + python3 tools/check_env.py + - name: Test TensorRT pipeline + run: | + export LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" + export LD_LIBRARY_PATH="/root/workspace/mmdeploy/mmdeploy/lib:${LD_LIBRARY_PATH}" + bash .circleci/scripts/linux/test_full_pipeline.sh trt cuda + + build_cuda113_windows: + runs-on: [self-hosted, win10-3080] + env: + BASE_ENV: cuda11.3-cudnn8.2-py3.8-torch1.10 + defaults: + run: + shell: powershell + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + - name: Setup Python Environment + run: | + echo "============================== Info ==============================" + echo "env:path= $env:path" + echo "============================== Info ==============================" + conda info + conda info -e + conda create -p $pwd\tmp_env --clone $env:BASE_ENV -y + conda activate $pwd\tmp_env + python -V + python -m pip install openmim + python -m pip install -r requirements.txt -r requirements/backends.txt + python -m mim install "mmcls>=1.0.0rc2" + python -m pip list + - name: Build mmdeploy + run: | + conda activate $pwd\tmp_env + python -V + mkdir build + cd build + cmake .. -A x64 -T v142 ` + -DMMDEPLOY_BUILD_TEST=ON ` + -DMMDEPLOY_BUILD_SDK_CSHARP_API=ON ` + -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON ` + -DMMDEPLOY_BUILD_SDK=ON ` + -DMMDEPLOY_TARGET_DEVICES='cuda' ` + -DMMDEPLOY_TARGET_BACKENDS='ort;trt' ` + -DMMDEPLOY_CODEBASES='all' ` + -Dpplcv_DIR="$env:PPLCV_DIR\pplcv-build\install\lib\cmake\ppl" ` + -DOpenCV_DIR="$env:OPENCV_DIR\build\x64\vc15\lib" ` + -DTENSORRT_DIR="$env:TENSORRT_DIR" ` + -DONNXRUNTIME_DIR="$env:ONNXRUNTIME_DIR" ` + -DMMDEPLOY_BUILD_EXAMPLES=ON ` + -DCUDNN_DIR="$env:CUDNN_DIR" + cmake --build . --config Release -- /m + cmake --install . --config Release + ls $pwd\bin\Release + - name: Install mmdeploy converter + run: | + conda activate $pwd\tmp_env + python -m pip install -e . + python .\tools\check_env.py + - name: Test trt full pipeline + run: | + conda activate $pwd\tmp_env + $env:path = "$pwd\build\bin\Release;" + $env:path + .circleci\scripts\windows\test_full_pipeline.ps1 -Backend trt -Device cuda diff --git a/docker/Base/Dockerfile b/docker/Base/Dockerfile new file mode 100644 index 000000000..c93c0e8c9 --- /dev/null +++ b/docker/Base/Dockerfile @@ -0,0 +1,110 @@ +ARG CUDA_INT=113 + +FROM nvidia/cuda:11.3.0-cudnn8-devel-ubuntu20.04 AS cuda-113 +FROM nvidia/cuda:10.2-cudnn8-devel-ubuntu18.04 AS cuda-102 + +FROM cuda-${CUDA_INT} AS final + +ARG TORCH_VERSION=1.10.0 +ARG TORCHVISION_VERSION=0.11.0 + +# important dependencies +ARG OPENCV_VERSION==4.5.4.60 +ARG PPLCV_VERSION=0.7.0 + +# backends +ARG ONNXRUNTIME_VERSION=1.8.1 +ARG PPLNN_VERSION=0.8.1 +ARG NCNN_VERSION=20221128 +ARG TENSORRT_VERSION=8.2.3.0 +# tensorrt tar file url +ARG TENSORRT_URL + +USER root +WORKDIR /root/workspace + +ENV DEBIAN_FRONTEND=nointeractive +ENV FORCE_CUDA="1" + +RUN apt-get update && apt-get install -y --no-install-recommends \ + apt-utils \ + ca-certificates \ + gcc-7 \ + g++-7 \ + git \ + vim \ + wget \ + libopencv-dev \ + libprotobuf-dev protobuf-compiler \ + unzip \ + python3-dev python3-pip \ + && rm -rf /var/lib/apt/lists/* + +ENV CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda +ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH + +# install jdk, onnxruntime, openvino, and other python packages +RUN wget https://download.java.net/java/GA/jdk18/43f95e8614114aeaa8e8a5fcf20a682d/36/GPL/openjdk-18_linux-x64_bin.tar.gz &&\ + tar xvf openjdk-18_linux-x64_bin.tar.gz && rm -rf openjdk-18_linux-x64_bin.tar.gz && \ + wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\ + tar -xzvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz && rm onnxruntime-*.tgz &&\ + wget https://github.com/Kitware/CMake/releases/download/v3.25.2/cmake-3.25.2-linux-x86_64.tar.gz &&\ + tar -xzvf cmake-3.25.2-linux-x86_64.tar.gz && rm cmake-*.tar.gz && mv cmake-* cmake &&\ + export CUDA_INT=$(echo $CUDA_VERSION | awk '{split($0, a, "."); print a[1]a[2]}') &&\ + python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel &&\ + python3 -m pip install --no-cache-dir onnxruntime-gpu==${ONNXRUNTIME_VERSION} &&\ + python3 -m pip install --no-cache-dir openvino openvino-dev[onnx] &&\ + python3 -m pip install --no-cache-dir opencv-python==${OPENCV_VERSION} opencv-python-headless==${OPENCV_VERSION} opencv-contrib-python==${OPENCV_VERSION} &&\ + python3 -m pip install --no-cache-dir torch==${TORCH_VERSION}+cu${CUDA_INT} torchvision==${TORCHVISION_VERSION}+cu${CUDA_INT} -f https://download.pytorch.org/whl/torch_stable.html + +# create env +ENV JAVA_HOME=/root/workspace/jdk-18 +ENV PATH=$JAVA_HOME/bin:/root/workspace/cmake/bin:$PATH +ENV ONNXRUNTIME_VERSION=${ONNXRUNTIME_VERSION} +ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} +ENV LD_LIBRARY_PATH=${ONNXRUNTIME_DIR}/lib:$LD_LIBRARY_PATH + +### install ppl.nn +RUN git clone --depth 1 --branch v${PPLNN_VERSION} --recursive https://github.com/openppl-public/ppl.nn.git &&\ + cd ppl.nn &&\ + export PYTHON_INCLUDE_DIR=$(python3 -c 'import sysconfig;print(sysconfig.get_paths()["include"])') &&\ + ./build.sh -DPPLNN_USE_X86_64=ON -DPPLNN_USE_CUDA=ON -DPPLNN_ENABLE_PYTHON_API=ON -DPYTHON3_INCLUDE_DIRS=${PYTHON_INCLUDE_DIR} &&\ + cd ./python/package && \ + ./build.sh && \ + cd - && mv pplnn-build/install ./ && rm -rf pplnn-build &&\ + cd /tmp/pyppl-package/dist && \ + python3 -m pip install pyppl*.whl && rm *.whl + +ENV pplnn_DIR=/root/workspace/ppl.nn/install/lib/cmake/ppl +ENV PYTHONPATH=/root/workspace/ppl.nn/install/lib:$PYTHONPATH + + +### build ncnn +RUN git clone --depth 1 --branch ${NCNN_VERSION} --recursive https://github.com/Tencent/ncnn.git &&\ + python3 -m pip install pybind11 &&\ + mkdir ncnn/build && cd ncnn/build &&\ + cmake -DNCNN_VULKAN=OFF -DNCNN_PYTHON=ON -DNCNN_BUILD_TOOLS=OFF -DCMAKE_INSTALL_PREFIX=../install .. &&\ + make -j $(nproc) && make install &&\ + cd .. && python3 -m pip install . && rm -rf ./build + +ENV ncnn_DIR=/root/workspace/ncnn/install/lib/cmake/ncnn + +### install ppl.cv +RUN git clone --depth 1 --branch v${PPLCV_VERSION} https://github.com/openppl-public/ppl.cv.git &&\ + cd ppl.cv &&\ + ./build.sh cuda && mv cuda-build/install ./ && rm -rf cuda-build + +ENV pplcv_DIR=/root/workspace/ppl.cv/install/lib/cmake/ppl + +### install tensorrt +RUN wget -c $TENSORRT_URL && \ + tar -zxvf /root/workspace/TensorRT-${TENSORRT_VERSION}*.tar.gz -C /root/workspace &&\ + rm -rf TensorRT-${TENSORRT_VERSION}*.tar.gz &&\ + ln -sf /root/workspace/TensorRT-${TENSORRT_VERSION} /root/workspace/TensorRT &&\ + cd /root/workspace/TensorRT && rm -rf data doc samples uff &&\ + export PY_VERSION=$(python3 -V | awk '{print $2}' | awk '{split($0, a, "."); print a[1]a[2]}') &&\ + python3 -m pip install ./python/tensorrt-*-cp${PY_VERSION}-none-linux_x86_64.whl + +ENV TENSORRT_DIR=/root/workspace/TensorRT +ENV LD_LIBRARY_PATH=$TENSORRT_DIR/lib:$LD_LIBRARY_PATH +ENV PATH=$TENSORRT_DIR/bin:$PATH diff --git a/docker/Release/Dockerfile b/docker/Release/Dockerfile new file mode 100644 index 000000000..dd6a2a985 --- /dev/null +++ b/docker/Release/Dockerfile @@ -0,0 +1,25 @@ +FROM openmmlab/mmdeploy:ubuntu20.04-cuda11.3 + +ARG MMDEPLOY_VERSION + +ENV BACKUP_LD_LIBRARY_PATH=$LD_LIBRARY_PATH +ENV LD_LIBRARY_PATH=/usr/local/cuda/compat:$LD_LIBRARY_PATH + +# build mmdeploy +RUN git clone --recursive -b $MMDEPLOY_VERSION --depth 1 https://github.com/open-mmlab/mmdeploy.git &&\ + export Torch_DIR=$(python3 -c "import torch;print(torch.utils.cmake_prefix_path + '/Torch')") &&\ + bash mmdeploy/.circleci/scripts/linux/build.sh "cpu;cuda" "ort;trt;ncnn;torchscript" \ + -Dpplcv_DIR=${pplcv_DIR} \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \ + -Dncnn_DIR=${ncnn_DIR} \ + -DTorch_DIR=${Torch_DIR} &&\ + cd mmdeploy &&\ + python3 -m pip install -U openmim pycuda &&\ + python3 -m pip install -r requirements.txt &&\ + python3 -m pip install -e . + +ENV MMDeploy_DIR="/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy" +ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${BACKUP_LD_LIBRARY_PATH}" +ENV PATH="/root/workspace/mmdeploy/build/bin:${PATH}" +ENV PYTHONPATH="/root/workspace/mmdeploy/build/lib:${PYTHONPATH}" diff --git a/tools/scripts/build_base_image.sh b/tools/scripts/build_base_image.sh new file mode 100644 index 000000000..f686a12f9 --- /dev/null +++ b/tools/scripts/build_base_image.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +set -e + +ip=${1} +port=${2:8585} + +date_today=`date +'%Y%m%d'` + +# create http server +nohup python3 -m http.server --directory /data2/shared/nvidia $port > tmp.log 2>&1 + +export TENSORRT_URL=http://$ip:$port/TensorRT-8.2.3.0.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz +export TENSORRT_VERSION=8.2.3.0 +export CUDA_INT=113 +export TAG=ubuntu20.04-cuda11.3 + +# build docker image +docker build ./docker/Base/ -t openmmlab/mmdeploy:$TAG \ + --build-arg CUDA_INT=$CUDA_INT \ + --build-arg TENSORRT_URL=${TENSORRT_URL} \ + --build-arg TENSORRT_VERSION=${TENSORRT_VERSION} + +docker tag openmmlab/mmdeploy:$TAG openmmlab/mmdeploy:${TAG}-${date_today} + +# test docker image +docker run --gpus=all -itd \ + -v /data2/benchmark:/root/workspace/openmmlab-data \ + -v /data2/checkpoints:/root/workspace/mmdeploy_checkpoints \ + -v ~/mmdeploy:/root/workspace/mmdeploy \ + openmmlab/mmdeploy:$TAG + + +# push to docker hub +docker login +docker push openmmlab/mmdeploy:$TAG +docker push openmmlab/mmdeploy:$TAG-${date_today}