Add tensorrt int8 and fp16 configs for all the rest codebases (#152)

* add configs for all codebases except mmocr and fixed some bugs

* add ocr det

* add fp16 and int8 config for text recog
This commit is contained in:
AllentDan 2021-10-27 15:59:02 +08:00 committed by GitHub
parent 227bebaade
commit 5fdd75fba1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 388 additions and 12 deletions

View File

@ -0,0 +1,6 @@
import tensorrt as trt
backend_config = dict(
type='tensorrt',
common_config=dict(
fp16_mode=True, log_level=trt.Logger.INFO, max_workspace_size=0))

View File

@ -0,0 +1,13 @@
_base_ = ['./classification_dynamic.py', '../_base_/backends/tensorrt_fp16.py']
onnx_config = dict(input_shape=[224, 224])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 224, 224],
opt_shape=[4, 3, 224, 224],
max_shape=[64, 3, 224, 224])))
])

View File

@ -0,0 +1,13 @@
_base_ = ['./classification_static.py', '../_base_/backends/tensorrt_fp16.py']
onnx_config = dict(input_shape=[224, 224])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 224, 224],
opt_shape=[1, 3, 224, 224],
max_shape=[1, 3, 224, 224])))
])

View File

@ -0,0 +1,12 @@
_base_ = ['./base_dynamic.py', '../../_base_/backends/tensorrt_fp16.py']
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 800, 1344],
max_shape=[1, 3, 1344, 1344])))
])

View File

@ -0,0 +1,14 @@
_base_ = ['./base_static.py', '../../_base_/backends/tensorrt_fp16.py']
onnx_config = dict(input_shape=(1344, 800))
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 800, 1344],
opt_shape=[1, 3, 800, 1344],
max_shape=[1, 3, 800, 1344])))
])

View File

@ -0,0 +1 @@
_base_ = ['../_base_/base_tensorrt_fp16_dynamic-320x320-1344x1344.py']

View File

@ -0,0 +1 @@
_base_ = ['../_base_/base_tensorrt_fp16_static-800x1344.py']

View File

@ -0,0 +1,19 @@
_base_ = ['./two-stage_tensorrt_fp16_dynamic-320x320-1344x1344.py']
partition_config = dict(type='two_stage', apply_marks=True)
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 800, 1344],
max_shape=[1, 3, 1344, 1344]))),
dict(
input_shapes=dict(
bbox_feats=dict(
min_shape=[500, 256, 7, 7],
opt_shape=[1000, 256, 7, 7],
max_shape=[2000, 256, 7, 7])))
])

View File

@ -0,0 +1 @@
_base_ = ['../_base_/base_tensorrt_fp16_dynamic-320x320-1344x1344.py']

View File

@ -0,0 +1 @@
_base_ = ['../_base_/base_tensorrt_fp16_static-800x1344.py']

View File

@ -0,0 +1,14 @@
_base_ = [
'../_base_/mask_base_dynamic.py', '../../_base_/backends/tensorrt_fp16.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 800, 1344],
max_shape=[1, 3, 1344, 1344])))
])

View File

@ -0,0 +1,15 @@
_base_ = [
'../_base_/mask_base_static.py', '../../_base_/backends/tensorrt_fp16.py'
]
onnx_config = dict(input_shape=(1344, 800))
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 800, 1344],
opt_shape=[1, 3, 800, 1344],
max_shape=[1, 3, 800, 1344])))
])

View File

@ -0,0 +1,14 @@
_base_ = [
'../_base_/mask_base_dynamic.py', '../../_base_/backends/tensorrt_int8.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 800, 1344],
max_shape=[1, 3, 1344, 1344])))
])

View File

@ -0,0 +1,15 @@
_base_ = [
'../_base_/mask_base_static.py', '../../_base_/backends/tensorrt_int8.py'
]
onnx_config = dict(input_shape=(1344, 800))
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 800, 1344],
opt_shape=[1, 3, 800, 1344],
max_shape=[1, 3, 800, 1344])))
])

View File

@ -0,0 +1,11 @@
_base_ = [
'./super-resolution_dynamic.py', '../../_base_/backends/tensorrt_fp16.py'
]
backend_config = dict(model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 32, 32],
opt_shape=[1, 3, 256, 256],
max_shape=[1, 3, 512, 512])))
])

View File

@ -0,0 +1,12 @@
_base_ = [
'./super-resolution_static.py', '../../_base_/backends/tensorrt_fp16.py'
]
onnx_config = dict(input_shape=[256, 256])
backend_config = dict(model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 256, 256],
opt_shape=[1, 3, 256, 256],
max_shape=[1, 3, 256, 256])))
])

View File

@ -0,0 +1,11 @@
_base_ = [
'./super-resolution_dynamic.py', '../../_base_/backends/tensorrt_int8.py'
]
backend_config = dict(model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 32, 32],
opt_shape=[1, 3, 256, 256],
max_shape=[1, 3, 512, 512])))
])

View File

@ -0,0 +1,12 @@
_base_ = [
'./super-resolution_static.py', '../../_base_/backends/tensorrt_int8.py'
]
onnx_config = dict(input_shape=[256, 256])
backend_config = dict(model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 256, 256],
opt_shape=[1, 3, 256, 256],
max_shape=[1, 3, 256, 256])))
])

View File

@ -0,0 +1,13 @@
_base_ = [
'./text-detection_dynamic.py', '../../_base_/backends/tensorrt_fp16.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 600, 800],
max_shape=[1, 3, 1024, 1824])))
])

View File

@ -0,0 +1,15 @@
_base_ = [
'./text-detection_static.py', '../../_base_/backends/tensorrt_fp16.py'
]
onnx_config = dict(input_shape=[512, 512])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 512, 512],
opt_shape=[1, 3, 512, 512],
max_shape=[1, 3, 512, 512])))
])

View File

@ -0,0 +1,13 @@
_base_ = [
'./text-detection_dynamic.py', '../../_base_/backends/tensorrt_int8.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 320, 320],
opt_shape=[1, 3, 600, 800],
max_shape=[1, 3, 1024, 1824])))
])

View File

@ -0,0 +1,15 @@
_base_ = [
'./text-detection_static.py', '../../_base_/backends/tensorrt_int8.py'
]
onnx_config = dict(input_shape=[512, 512])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 512, 512],
opt_shape=[1, 3, 512, 512],
max_shape=[1, 3, 512, 512])))
])

View File

@ -0,0 +1,13 @@
_base_ = [
'./text-recognition_dynamic.py', '../../_base_/backends/tensorrt_fp16.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 64],
max_shape=[1, 1, 32, 640])))
])

View File

@ -0,0 +1,15 @@
_base_ = [
'./text-recognition_static.py', '../../_base_/backends/tensorrt_fp16.py'
]
onnx_config = dict(input_shape=[32, 32])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 32],
max_shape=[1, 1, 32, 32])))
])

View File

@ -0,0 +1,13 @@
_base_ = [
'./text-recognition_dynamic.py', '../../_base_/backends/tensorrt_int8.py'
]
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 64],
max_shape=[1, 1, 32, 640])))
])

View File

@ -0,0 +1,15 @@
_base_ = [
'./text-recognition_static.py', '../../_base_/backends/tensorrt_int8.py'
]
onnx_config = dict(input_shape=[32, 32])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 1, 32, 32],
opt_shape=[1, 1, 32, 32],
max_shape=[1, 1, 32, 32])))
])

View File

@ -0,0 +1,11 @@
_base_ = ['./segmentation_dynamic.py', '../_base_/backends/tensorrt_fp16.py']
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 512, 1024],
opt_shape=[1, 3, 1024, 2048],
max_shape=[1, 3, 2048, 2048])))
])

View File

@ -0,0 +1,13 @@
_base_ = ['./segmentation_static.py', '../_base_/backends/tensorrt_fp16.py']
onnx_config = dict(input_shape=[1024, 512])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 512, 1024],
opt_shape=[1, 3, 512, 1024],
max_shape=[1, 3, 512, 1024])))
])

View File

@ -0,0 +1,11 @@
_base_ = ['./segmentation_dynamic.py', '../_base_/backends/tensorrt_int8.py']
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 512, 1024],
opt_shape=[1, 3, 1024, 2048],
max_shape=[1, 3, 2048, 2048])))
])

View File

@ -0,0 +1,13 @@
_base_ = ['./segmentation_static.py', '../_base_/backends/tensorrt_int8.py']
onnx_config = dict(input_shape=[1024, 512])
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 512, 1024],
opt_shape=[1, 3, 512, 1024],
max_shape=[1, 3, 512, 1024])))
])

View File

@ -359,5 +359,9 @@ def get_tensor_from_input(codebase: Codebase, input_data: Dict[str, Any]):
from mmdeploy.mmocr.export import get_tensor_from_input \ from mmdeploy.mmocr.export import get_tensor_from_input \
as get_tensor_from_input_mmocr as get_tensor_from_input_mmocr
return get_tensor_from_input_mmocr(input_data) return get_tensor_from_input_mmocr(input_data)
elif codebase == Codebase.MMEDIT:
from mmdeploy.mmedit.export import get_tensor_from_input \
as get_tensor_from_input_mmedit
return get_tensor_from_input_mmedit(input_data)
else: else:
raise NotImplementedError(f'Unknown codebase type: {codebase.value}') raise NotImplementedError(f'Unknown codebase type: {codebase.value}')

View File

@ -45,9 +45,11 @@ def simple_test_mask_of_mask_test_mixin(ctx, self, x, img_metas, det_bboxes,
bboxes_shape, labels_shape = list(det_bboxes.shape), list( bboxes_shape, labels_shape = list(det_bboxes.shape), list(
det_labels.shape) det_labels.shape)
bboxes_shape[1], labels_shape[1] = 1, 1 bboxes_shape[1], labels_shape[1] = 1, 1
det_bboxes = torch.tensor([[[0., 0., 1., 1., det_bboxes = torch.tensor(
0.]]]).expand(*bboxes_shape) [[[0., 0., 1., 1., 0.]]],
det_labels = torch.tensor([[0]]).expand(*labels_shape) device=det_bboxes.device).expand(*bboxes_shape)
det_labels = torch.tensor(
[[0]], device=det_bboxes.device).expand(*labels_shape)
batch_size = det_bboxes.size(0) batch_size = det_bboxes.size(0)
det_bboxes = det_bboxes[..., :4] det_bboxes = det_bboxes[..., :4]

View File

@ -1,3 +1,7 @@
from .prepare_input import build_dataloader, build_dataset, create_input from .prepare_input import (build_dataloader, build_dataset, create_input,
get_tensor_from_input)
__all__ = ['create_input', 'build_dataset', 'build_dataloader'] __all__ = [
'create_input', 'build_dataset', 'build_dataloader',
'get_tensor_from_input'
]

View File

@ -1,4 +1,4 @@
from typing import Optional, Sequence, Union from typing import Any, Dict, Optional, Sequence, Union
import mmcv import mmcv
import numpy as np import numpy as np
@ -188,3 +188,14 @@ def build_dataloader(dataset: Dataset,
return build_dataloader_mmedit(dataset, samples_per_gpu, workers_per_gpu, return build_dataloader_mmedit(dataset, samples_per_gpu, workers_per_gpu,
num_gpus, dist, shuffle, seed, drop_last, num_gpus, dist, shuffle, seed, drop_last,
pin_memory, persistent_workers, **kwargs) pin_memory, persistent_workers, **kwargs)
def get_tensor_from_input(input_data: Dict[str, Any]):
"""Get input tensor from input data.
Args:
input_data (dict): Input data containing meta info and image tensor.
Returns:
torch.Tensor: An image in `Tensor`.
"""
return input_data['lq']

View File

@ -1,3 +1,7 @@
from .prepare_input import build_dataloader, build_dataset, create_input from .prepare_input import (build_dataloader, build_dataset, create_input,
get_tensor_from_input)
__all__ = ['create_input', 'build_dataset', 'build_dataloader'] __all__ = [
'create_input', 'build_dataset', 'build_dataloader',
'get_tensor_from_input'
]

View File

@ -2,7 +2,7 @@ from typing import Any, Optional, Sequence, Union
import mmcv import mmcv
import numpy as np import numpy as np
from mmcv.parallel import collate, scatter from mmcv.parallel import DataContainer, collate, scatter
from mmdet.datasets import replace_ImageToTensor from mmdet.datasets import replace_ImageToTensor
from mmocr.datasets import build_dataloader as build_dataloader_mmocr from mmocr.datasets import build_dataloader as build_dataloader_mmocr
from mmocr.datasets import build_dataset as build_dataset_mmocr from mmocr.datasets import build_dataset as build_dataset_mmocr
@ -189,4 +189,6 @@ def get_tensor_from_input(input_data: tuple):
Returns: Returns:
torch.Tensor: An image in `Tensor`. torch.Tensor: An image in `Tensor`.
""" """
return input_data['img'] if isinstance(input_data['img'], DataContainer):
return input_data['img'].data[0]
return input_data['img'][0]

View File

@ -138,4 +138,4 @@ def get_tensor_from_input(input_data: tuple):
Returns: Returns:
torch.Tensor: An image in `Tensor`. torch.Tensor: An image in `Tensor`.
""" """
return input_data['img'] return input_data['img'][0]

View File

@ -10,7 +10,11 @@ def forward_of_base_segmentor(ctx, self, img, img_metas=None, **kwargs):
"""Rewrite `forward` for default backend.""" """Rewrite `forward` for default backend."""
if img_metas is None: if img_metas is None:
img_metas = {} img_metas = {}
assert isinstance(img_metas, dict) while isinstance(img_metas, list):
img_metas = img_metas[0]
if isinstance(img, list):
img = torch.cat(img, 0)
assert isinstance(img, torch.Tensor) assert isinstance(img, torch.Tensor)
deploy_cfg = ctx.cfg deploy_cfg = ctx.cfg