mmdeploy/tools/scripts/build_base_image.sh
RunningLeon 2fcc8dc19d
[Feature]: Add github workflow based on self hosted runner (#1751)
* add win10 workflow

* update

* update and add dockerfile

* update script

* add build docker image script

* update

* support cuda10.2

* update docker file

* update build image tag

* add release docker file

* update release dockerfile

* fix ncnn python build

* test docker workflow

* remove docker build workflow

* fix comments

* add conda-incubator

* update build script and workflow

* fix comments
2023-03-21 11:44:11 +08:00

38 lines
1009 B
Bash

#!/bin/sh
set -e
ip=${1}
port=${2:8585}
date_today=`date +'%Y%m%d'`
# create http server
nohup python3 -m http.server --directory /data2/shared/nvidia $port > tmp.log 2>&1
export TENSORRT_URL=http://$ip:$port/TensorRT-8.2.3.0.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz
export TENSORRT_VERSION=8.2.3.0
export CUDA_INT=113
export TAG=ubuntu20.04-cuda11.3
# build docker image
docker build ./docker/Base/ -t openmmlab/mmdeploy:$TAG \
--build-arg CUDA_INT=$CUDA_INT \
--build-arg TENSORRT_URL=${TENSORRT_URL} \
--build-arg TENSORRT_VERSION=${TENSORRT_VERSION}
docker tag openmmlab/mmdeploy:$TAG openmmlab/mmdeploy:${TAG}-${date_today}
# test docker image
docker run --gpus=all -itd \
-v /data2/benchmark:/root/workspace/openmmlab-data \
-v /data2/checkpoints:/root/workspace/mmdeploy_checkpoints \
-v ~/mmdeploy:/root/workspace/mmdeploy \
openmmlab/mmdeploy:$TAG
# push to docker hub
docker login
docker push openmmlab/mmdeploy:$TAG
docker push openmmlab/mmdeploy:$TAG-${date_today}