add kl_quant chain and polish prepare.sh
parent
19560a5470
commit
2c64a6469e
|
@ -43,6 +43,7 @@ def main():
|
|||
'inference.pdiparams'))
|
||||
config["DataLoader"]["Eval"]["sampler"]["batch_size"] = 1
|
||||
config["DataLoader"]["Eval"]["loader"]["num_workers"] = 0
|
||||
|
||||
init_logger()
|
||||
device = paddle.set_device("cpu")
|
||||
train_dataloader = build_dataloader(config["DataLoader"], "Eval", device,
|
||||
|
@ -67,6 +68,7 @@ def main():
|
|||
quantize_model_path=os.path.join(
|
||||
config["Global"]["save_inference_dir"], "quant_post_static_model"),
|
||||
sample_generator=sample_generator(train_dataloader),
|
||||
batch_size=config["DataLoader"]["Eval"]["sampler"]["batch_size"],
|
||||
batch_nums=10)
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
===========================cpp_infer_params===========================
|
||||
model_name:MobileNetV3_large_x1_0_kl
|
||||
cpp_infer_type:cls
|
||||
cls_inference_model_dir:./MobileNetV3_large_x1_0_kl_quant_infer/
|
||||
det_inference_model_dir:
|
||||
cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/MobileNetV3_large_x1_0_kl_quant_infer.tar
|
||||
det_inference_url:
|
||||
infer_quant:False
|
||||
inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml
|
||||
use_gpu:True|False
|
||||
enable_mkldnn:False
|
||||
cpu_threads:1
|
||||
batch_size:1
|
||||
use_tensorrt:False
|
||||
precision:fp32
|
||||
image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG
|
||||
benchmark:False
|
||||
generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py
|
|
@ -0,0 +1,18 @@
|
|||
===========================cpp_infer_params===========================
|
||||
model_name:ResNet50_vd_kl_quant
|
||||
cpp_infer_type:cls
|
||||
cls_inference_model_dir:./ResNet50_vd_kl_quant_infer/
|
||||
det_inference_model_dir:
|
||||
cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/slim_model/ResNet50_vd_kl_quant_infer.tar
|
||||
det_inference_url:
|
||||
infer_quant:False
|
||||
inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml
|
||||
use_gpu:True|False
|
||||
enable_mkldnn:False
|
||||
cpu_threads:1
|
||||
batch_size:1
|
||||
use_tensorrt:False
|
||||
precision:fp32
|
||||
image_dir:./dataset/ILSVRC2012/val/ILSVRC2012_val_00000001.JPEG
|
||||
benchmark:False
|
||||
generate_yaml_cmd:python3.7 test_tipc/generate_cpp_yaml.py
|
|
@ -167,11 +167,11 @@ build/paddle_inference_install_dir/
|
|||
|
||||
* [Paddle预测库官网](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本。
|
||||
|
||||
以`manylinux_cuda11.1_cudnn8.1_avx_mkl_trt7_gcc8.2`版本为例,使用下述命令下载并解压:
|
||||
以`manylinux_cuda10.1_cudnn7.6_avx_mkl_trt6_gcc8.2`版本为例,使用下述命令下载并解压:
|
||||
|
||||
|
||||
```shell
|
||||
wget https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda11.1_cudnn8.1.1_trt7.2.3.4/paddle_inference.tgz
|
||||
wget https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz
|
||||
|
||||
tar -xvf paddle_inference.tgz
|
||||
```
|
||||
|
|
|
@ -50,6 +50,8 @@ if [[ ${MODE} = "cpp_infer" ]]; then
|
|||
echo "################### build opencv ###################"
|
||||
rm -rf ./deploy/cpp/opencv-3.4.7.tar.gz ./deploy/cpp/opencv-3.4.7/
|
||||
pushd ./deploy/cpp/
|
||||
wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz
|
||||
tar xf paddle_inference.tgz
|
||||
wget -nc https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
|
||||
tar -xf opencv-3.4.7.tar.gz
|
||||
|
||||
|
|
Loading…
Reference in New Issue