update docs (#624)
parent
f568fe7da5
commit
5fce1e8f8d
|
@ -0,0 +1,13 @@
|
|||
_base_ = ['./classification_static.py', '../_base_/backends/tensorrt-fp16.py']
|
||||
|
||||
onnx_config = dict(input_shape=[384, 384])
|
||||
backend_config = dict(
|
||||
common_config=dict(max_workspace_size=1 << 30),
|
||||
model_inputs=[
|
||||
dict(
|
||||
input_shapes=dict(
|
||||
input=dict(
|
||||
min_shape=[1, 3, 384, 384],
|
||||
opt_shape=[1, 3, 384, 384],
|
||||
max_shape=[1, 3, 384, 384])))
|
||||
])
|
|
@ -559,6 +559,27 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
|
|||
<td align="center">89.85</td>
|
||||
<td align="center">90.41</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" rowspan="2"><a href="https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py">Vision Transformer</a></td>
|
||||
<td align="center">top-1</td>
|
||||
<td align="center">85.43</td>
|
||||
<td align="center">85.43</td>
|
||||
<td align="center">-</td>
|
||||
<td align="center">85.43</td>
|
||||
<td align="center">85.42</td>
|
||||
<td align="center">-</td>
|
||||
<td align="center">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">top-5</td>
|
||||
<td align="center">97.77</td>
|
||||
<td align="center">97.77</td>
|
||||
<td align="center">-</td>
|
||||
<td align="center">97.77</td>
|
||||
<td align="center">97.76</td>
|
||||
<td align="center">-</td>
|
||||
<td align="center">-</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
|
|
@ -24,7 +24,7 @@ The table below lists the models that are guaranteed to be exportable to other b
|
|||
| MobileNetV2 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2) |
|
||||
| ShuffleNetV1 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1) |
|
||||
| ShuffleNetV2 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2) |
|
||||
| VisionTransformer | MMClassification | Y | Y | ? | Y | ? | ? | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) |
|
||||
| VisionTransformer | MMClassification | Y | Y | Y | Y | ? | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) |
|
||||
| FCN | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fcn) |
|
||||
| PSPNet[\*static](#note) | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/pspnet) |
|
||||
| DeepLabV3 | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3) |
|
||||
|
|
|
@ -8,11 +8,12 @@ Please refer to [install.md](https://github.com/open-mmlab/mmclassification/blob
|
|||
|
||||
## List of MMClassification models supported by MMDeploy
|
||||
|
||||
| Model | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO | Model config |
|
||||
| :----------- | :----------: | :------: | :--: | :---: | :------: | :----------------------------------------------------------------------------------------: |
|
||||
| ResNet | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet) |
|
||||
| ResNeXt | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext) |
|
||||
| SE-ResNet | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet) |
|
||||
| MobileNetV2 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2) |
|
||||
| ShuffleNetV1 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1) |
|
||||
| ShuffleNetV2 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2) |
|
||||
| Model | ONNX Runtime | TensorRT | ncnn | PPLNN | OpenVINO | Model config |
|
||||
| :---------------- | :----------: | :------: | :--: | :---: | :------: | :---------------------------------------------------------------------------------------------: |
|
||||
| ResNet | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnet) |
|
||||
| ResNeXt | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/resnext) |
|
||||
| SE-ResNet | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/seresnet) |
|
||||
| MobileNetV2 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2) |
|
||||
| ShuffleNetV1 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1) |
|
||||
| ShuffleNetV2 | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2) |
|
||||
| VisionTransformer | Y | Y | Y | ? | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) |
|
||||
|
|
|
@ -556,6 +556,27 @@ GPU: ncnn, TensorRT, PPLNN
|
|||
<td align="center">89.85</td>
|
||||
<td align="center">90.41</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" rowspan="2"><a href="https://github.com/open-mmlab/mmclassification/blob/master/configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py">Vision Transformer</a></td>
|
||||
<td align="center">top-1</td>
|
||||
<td align="center">85.43</td>
|
||||
<td align="center">85.43</td>
|
||||
<td align="center">-</td>
|
||||
<td align="center">85.43</td>
|
||||
<td align="center">85.42</td>
|
||||
<td align="center">-</td>
|
||||
<td align="center">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">top-5</td>
|
||||
<td align="center">97.77</td>
|
||||
<td align="center">97.77</td>
|
||||
<td align="center">-</td>
|
||||
<td align="center">97.77</td>
|
||||
<td align="center">97.76</td>
|
||||
<td align="center">-</td>
|
||||
<td align="center">-</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
| MobileNetV2 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/mobilenet_v2) |
|
||||
| ShuffleNetV1 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v1) |
|
||||
| ShuffleNetV2 | MMClassification | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/shufflenet_v2) |
|
||||
| VisionTransformer | MMClassification | Y | Y | ? | Y | ? | ? | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) |
|
||||
| VisionTransformer | MMClassification | Y | Y | Y | Y | ? | Y | [config](https://github.com/open-mmlab/mmclassification/tree/master/configs/vision_transformer) |
|
||||
| FCN | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/fcn) |
|
||||
| PSPNet[\*static](#note) | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/pspnet) |
|
||||
| DeepLabV3 | MMSegmentation | Y | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmsegmentation/tree/master/configs/deeplabv3) |
|
||||
|
|
|
@ -54,6 +54,11 @@ tensorrt:
|
|||
backend_test: *default_backend_test
|
||||
deploy_config: configs/mmcls/classification_tensorrt-fp16_static-224x224.py
|
||||
|
||||
pipeline_trt_static_fp16_384x384: &pipeline_trt_static_fp16_384x384
|
||||
convert_image: *convert_image
|
||||
backend_test: *default_backend_test
|
||||
deploy_config: configs/mmcls/classification_tensorrt-fp16_static-384x384.py
|
||||
|
||||
pipeline_trt_static_int8: &pipeline_trt_static_int8
|
||||
convert_image: *convert_image
|
||||
backend_test: *default_backend_test
|
||||
|
@ -108,7 +113,7 @@ pplnn:
|
|||
torchscript:
|
||||
pipeline_ts_fp32: &pipeline_ts_fp32
|
||||
convert_image: *convert_image
|
||||
backend_test: False
|
||||
backend_test: True
|
||||
deploy_config: configs/mmcls/classification_torchscript.py
|
||||
|
||||
|
||||
|
@ -186,3 +191,13 @@ models:
|
|||
- *pipeline_ncnn_static_fp32
|
||||
# - *pipeline_pplnn_dynamic_fp32
|
||||
# - *pipeline_openvino_dynamic_fp32
|
||||
|
||||
- name: VisionTransformer
|
||||
metafile: configs/vision_transformer/metafile.yml
|
||||
model_configs:
|
||||
- configs/vision_transformer/vit-base-p16_ft-64xb64_in1k-384.py
|
||||
pipelines:
|
||||
- *pipeline_ts_fp32
|
||||
- *pipeline_ort_dynamic_fp32
|
||||
- *pipeline_trt_static_fp16_384x384
|
||||
- *pipeline_ncnn_static_fp32
|
||||
|
|
Loading…
Reference in New Issue