diff --git a/docs/en/03-benchmark/benchmark.md b/docs/en/03-benchmark/benchmark.md
index 854456d98..55724d3e8 100644
--- a/docs/en/03-benchmark/benchmark.md
+++ b/docs/en/03-benchmark/benchmark.md
@@ -373,7 +373,7 @@ Users can directly test the speed through [model profiling](../02-how-to-run/pro
fp16 |
- FCN |
+ FCN |
512x1024 |
128.42 |
23.97 |
@@ -382,7 +382,7 @@ Users can directly test the speed through [model profiling](../02-how-to-run/pro
27.00 |
- PSPNet |
+ PSPNet |
1x3x512x1024 |
119.77 |
24.10 |
@@ -391,7 +391,7 @@ Users can directly test the speed through [model profiling](../02-how-to-run/pro
27.26 |
- DeepLabV3 |
+ DeepLabV3 |
512x1024 |
226.75 |
31.80 |
@@ -400,7 +400,7 @@ Users can directly test the speed through [model profiling](../02-how-to-run/pro
36.01 |
- DeepLabV3+ |
+ DeepLabV3+ |
512x1024 |
151.25 |
47.03 |
@@ -1274,7 +1274,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
fp32 |
- FCN |
+ FCN |
Cityscapes |
mIoU |
72.25 |
@@ -1287,7 +1287,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
72.35 |
- PSPNet |
+ PSPNet |
Cityscapes |
mIoU |
78.55 |
@@ -1300,7 +1300,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
78.67 |
- deeplabv3 |
+ deeplabv3 |
Cityscapes |
mIoU |
79.09 |
@@ -1313,7 +1313,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
79.06 |
- deeplabv3+ |
+ deeplabv3+ |
Cityscapes |
mIoU |
79.61 |
@@ -1326,7 +1326,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
79.51 |
- Fast-SCNN |
+ Fast-SCNN |
Cityscapes |
mIoU |
70.96 |
@@ -1339,7 +1339,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- UNet |
+ UNet |
Cityscapes |
mIoU |
69.10 |
@@ -1352,7 +1352,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- ANN |
+ ANN |
Cityscapes |
mIoU |
77.40 |
@@ -1365,7 +1365,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- APCNet |
+ APCNet |
Cityscapes |
mIoU |
77.40 |
@@ -1378,7 +1378,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- BiSeNetV1 |
+ BiSeNetV1 |
Cityscapes |
mIoU |
74.44 |
@@ -1391,7 +1391,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- BiSeNetV2 |
+ BiSeNetV2 |
Cityscapes |
mIoU |
73.21 |
@@ -1404,7 +1404,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- CGNet |
+ CGNet |
Cityscapes |
mIoU |
68.25 |
@@ -1417,7 +1417,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- EMANet |
+ EMANet |
Cityscapes |
mIoU |
77.59 |
@@ -1430,7 +1430,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- EncNet |
+ EncNet |
Cityscapes |
mIoU |
75.67 |
@@ -1443,7 +1443,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- ERFNet |
+ ERFNet |
Cityscapes |
mIoU |
71.08 |
@@ -1456,7 +1456,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- FastFCN |
+ FastFCN |
Cityscapes |
mIoU |
79.12 |
@@ -1469,7 +1469,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- GCNet |
+ GCNet |
Cityscapes |
mIoU |
77.69 |
@@ -1482,7 +1482,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- ICNet |
+ ICNet |
Cityscapes |
mIoU |
76.29 |
@@ -1495,7 +1495,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- ISANet |
+ ISANet |
Cityscapes |
mIoU |
78.49 |
@@ -1508,7 +1508,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- OCRNet |
+ OCRNet |
Cityscapes |
mIoU |
74.30 |
@@ -1521,7 +1521,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- PointRend |
+ PointRend |
Cityscapes |
mIoU |
76.47 |
@@ -1534,7 +1534,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- Semantic FPN |
+ Semantic FPN |
Cityscapes |
mIoU |
74.52 |
@@ -1547,7 +1547,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- STDC |
+ STDC |
Cityscapes |
mIoU |
75.10 |
@@ -1560,7 +1560,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- STDC |
+ STDC |
Cityscapes |
mIoU |
77.17 |
@@ -1573,7 +1573,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- UPerNet |
+ UPerNet |
Cityscapes |
mIoU |
77.10 |
@@ -1586,7 +1586,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
- |
- Segmenter |
+ Segmenter |
ADE20K |
mIoU |
44.32 |
@@ -1627,7 +1627,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
fp32 |
- HRNet |
+ HRNet |
Pose Detection |
COCO |
AP |
@@ -1648,7 +1648,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
0.802 |
- LiteHRNet |
+ LiteHRNet |
Pose Detection |
COCO |
AP |
@@ -1669,7 +1669,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
0.728 |
- MSPN |
+ MSPN |
Pose Detection |
COCO |
AP |
@@ -1690,7 +1690,7 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](../
0.825 |
- Hourglass |
+ Hourglass |
Pose Detection |
COCO |
AP |
diff --git a/docs/en/03-benchmark/benchmark_edge.md b/docs/en/03-benchmark/benchmark_edge.md
index 5590b6c0c..64161eacf 100644
--- a/docs/en/03-benchmark/benchmark_edge.md
+++ b/docs/en/03-benchmark/benchmark_edge.md
@@ -28,9 +28,9 @@ tips:
## mmpose
-| model | dataset | spatial | snpe hybrid AR@IoU=0.50 | snpe hybrid AP@IoU=0.50 | latency(ms) |
-| :---------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------: | :-----: | :---------------------: | :---------------------: | :---------: |
-| [pose_hrnet_w32](https://github.com/open-mmlab/mmpose/blob/master/configs/animal/2d_kpt_sview_rgb_img/topdown_heatmap/animalpose/hrnet_w32_animalpose_256x256.py) | Animalpose | 256x256 | 0.997 | 0.989 | 630±50 |
+| model | dataset | spatial | snpe hybrid AR@IoU=0.50 | snpe hybrid AP@IoU=0.50 | latency(ms) |
+| :--------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------: | :-----: | :---------------------: | :---------------------: | :---------: |
+| [pose_hrnet_w32](https://github.com/open-mmlab/mmpose/blob/1.x/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py) | Animalpose | 256x256 | 0.997 | 0.989 | 630±50 |
tips:
@@ -38,9 +38,9 @@ tips:
## mmseg
-| model | dataset | spatial | mIoU | latency(ms) |
-| :---------------------------------------------------------------------------------------------------------------: | :--------: | :------: | :---: | :---------: |
-| [fcn](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py) | Cityscapes | 512x1024 | 71.11 | 4915±500 |
+| model | dataset | spatial | mIoU | latency(ms) |
+| :-----------------------------------------------------------------------------------------------------------------: | :--------: | :------: | :---: | :---------: |
+| [fcn](https://github.com/open-mmlab/mmsegmentation/blob/1.x/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-512x1024.py) | Cityscapes | 512x1024 | 71.11 | 4915±500 |
tips:
diff --git a/docs/en/03-benchmark/quantization.md b/docs/en/03-benchmark/quantization.md
index 4f0432031..288c6a548 100644
--- a/docs/en/03-benchmark/quantization.md
+++ b/docs/en/03-benchmark/quantization.md
@@ -29,8 +29,8 @@ Note: [mmocr](https://github.com/open-mmlab/mmocr) Uses 'shapely' to compute I
### Pose detection
-| model | dataset | fp32 AP | int8 AP |
-| :----------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :-----: | :-----: |
-| [Hourglass](https://github.com/open-mmlab/mmpose/blob/master/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hourglass52_coco_256x256.py) | COCO2017 | 0.717 | 0.713 |
+| model | dataset | fp32 AP | int8 AP |
+| :---------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :-----: | :-----: |
+| [Hourglass](https://github.com/open-mmlab/mmpose/blob/1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py) | COCO2017 | 0.717 | 0.713 |
Note: MMPose models are tested with `flip_test` explicitly set to `False` in model configs.
diff --git a/docs/en/04-supported-codebases/mmseg.md b/docs/en/04-supported-codebases/mmseg.md
index 2aef73dd0..75ff51629 100644
--- a/docs/en/04-supported-codebases/mmseg.md
+++ b/docs/en/04-supported-codebases/mmseg.md
@@ -1,15 +1,16 @@
# MMSegmentation Deployment
-- [Installation](#installation)
- - [Install mmseg](#install-mmseg)
- - [Install mmdeploy](#install-mmdeploy)
-- [Convert model](#convert-model)
-- [Model specification](#model-specification)
-- [Model inference](#model-inference)
- - [Backend model inference](#backend-model-inference)
- - [SDK model inference](#sdk-model-inference)
-- [Supported models](#supported-models)
-- [Reminder](#reminder)
+- [MMSegmentation Deployment](#mmsegmentation-deployment)
+ - [Installation](#installation)
+ - [Install mmseg](#install-mmseg)
+ - [Install mmdeploy](#install-mmdeploy)
+ - [Convert model](#convert-model)
+ - [Model specification](#model-specification)
+ - [Model inference](#model-inference)
+ - [Backend model inference](#backend-model-inference)
+ - [SDK model inference](#sdk-model-inference)
+ - [Supported models](#supported-models)
+ - [Reminder](#reminder)
______________________________________________________________________
@@ -227,6 +228,6 @@ Besides python API, mmdeploy SDK also provides other FFI (Foreign Function Inter
- Only `whole` inference mode is supported for all mmseg models.
-- PSPNet, Fast-SCNN only support static shape, because [nn.AdaptiveAvgPool2d](https://github.com/open-mmlab/mmsegmentation/blob/97f9670c5a4a2a3b4cfb411bcc26db16b23745f7/mmseg/models/decode_heads/psp_head.py#L38) is not supported by most inference backends.
+- PSPNet, Fast-SCNN only support static shape, because [nn.AdaptiveAvgPool2d](https://github.com/open-mmlab/mmsegmentation/blob/0c87f7a0c9099844eff8e90fa3db5b0d0ca02fee/mmseg/models/decode_heads/psp_head.py#L38) is not supported by most inference backends.
- For models that only supports static shape, you should use the deployment config file of static shape such as `configs/mmseg/segmentation_tensorrt_static-1024x2048.py`.
diff --git a/docs/zh_cn/03-benchmark/benchmark.md b/docs/zh_cn/03-benchmark/benchmark.md
index ba801b7bc..511fe5505 100644
--- a/docs/zh_cn/03-benchmark/benchmark.md
+++ b/docs/zh_cn/03-benchmark/benchmark.md
@@ -118,7 +118,6 @@ GPU: ncnn, TensorRT, PPLNN
-
@@ -142,8 +141,7 @@ GPU: ncnn, TensorRT, PPLNN
fp32 |
fp16 |
-
-
+
YOLOv3 |
320x320 |
14.76 |
@@ -372,7 +370,7 @@ GPU: ncnn, TensorRT, PPLNN
fp16 |
- FCN |
+ FCN |
512x1024 |
128.42 |
23.97 |
@@ -381,7 +379,7 @@ GPU: ncnn, TensorRT, PPLNN
27.00 |
- PSPNet |
+ PSPNet |
1x3x512x1024 |
119.77 |
24.10 |
@@ -390,7 +388,7 @@ GPU: ncnn, TensorRT, PPLNN
27.26 |
- DeepLabV3 |
+ DeepLabV3 |
512x1024 |
226.75 |
31.80 |
@@ -399,7 +397,7 @@ GPU: ncnn, TensorRT, PPLNN
36.01 |
- DeepLabV3+ |
+ DeepLabV3+ |
512x1024 |
151.25 |
47.03 |
@@ -600,6 +598,27 @@ GPU: ncnn, TensorRT, PPLNN
- |
97.77 |
+
+ Swin Transformer |
+ top-1 |
+ 81.18 |
+ 81.18 |
+ 81.18 |
+ 81.18 |
+ 81.18 |
+ - |
+ - |
+
+
+ top-5 |
+ 95.61 |
+ 95.61 |
+ 95.61 |
+ 95.61 |
+ 95.61 |
+ - |
+ - |
+
@@ -727,7 +746,7 @@ GPU: ncnn, TensorRT, PPLNN
37.3 |
37.1 |
37.3 |
- - |
+ 37.2 |
ATSS |
@@ -1250,7 +1269,7 @@ GPU: ncnn, TensorRT, PPLNN
fp32 |
- FCN |
+ FCN |
Cityscapes |
mIoU |
72.25 |
@@ -1263,7 +1282,7 @@ GPU: ncnn, TensorRT, PPLNN
72.35 |
- PSPNet |
+ PSPNet |
Cityscapes |
mIoU |
78.55 |
@@ -1276,7 +1295,7 @@ GPU: ncnn, TensorRT, PPLNN
78.67 |
- deeplabv3 |
+ deeplabv3 |
Cityscapes |
mIoU |
79.09 |
@@ -1289,7 +1308,7 @@ GPU: ncnn, TensorRT, PPLNN
79.06 |
- deeplabv3+ |
+ deeplabv3+ |
Cityscapes |
mIoU |
79.61 |
@@ -1302,7 +1321,7 @@ GPU: ncnn, TensorRT, PPLNN
79.51 |
- Fast-SCNN |
+ Fast-SCNN |
Cityscapes |
mIoU |
70.96 |
@@ -1315,7 +1334,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- UNet |
+ UNet |
Cityscapes |
mIoU |
69.10 |
@@ -1328,7 +1347,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- ANN |
+ ANN |
Cityscapes |
mIoU |
77.40 |
@@ -1341,7 +1360,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- APCNet |
+ APCNet |
Cityscapes |
mIoU |
77.40 |
@@ -1354,7 +1373,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- BiSeNetV1 |
+ BiSeNetV1 |
Cityscapes |
mIoU |
74.44 |
@@ -1367,7 +1386,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- BiSeNetV2 |
+ BiSeNetV2 |
Cityscapes |
mIoU |
73.21 |
@@ -1380,7 +1399,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- CGNet |
+ CGNet |
Cityscapes |
mIoU |
68.25 |
@@ -1393,7 +1412,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- EMANet |
+ EMANet |
Cityscapes |
mIoU |
77.59 |
@@ -1406,7 +1425,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- EncNet |
+ EncNet |
Cityscapes |
mIoU |
75.67 |
@@ -1419,7 +1438,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- ERFNet |
+ ERFNet |
Cityscapes |
mIoU |
71.08 |
@@ -1432,7 +1451,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- FastFCN |
+ FastFCN |
Cityscapes |
mIoU |
79.12 |
@@ -1445,7 +1464,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- GCNet |
+ GCNet |
Cityscapes |
mIoU |
77.69 |
@@ -1458,7 +1477,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- ICNet |
+ ICNet |
Cityscapes |
mIoU |
76.29 |
@@ -1471,7 +1490,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- ISANet |
+ ISANet |
Cityscapes |
mIoU |
78.49 |
@@ -1484,7 +1503,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- OCRNet |
+ OCRNet |
Cityscapes |
mIoU |
74.30 |
@@ -1497,7 +1516,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- PointRend |
+ PointRend |
Cityscapes |
mIoU |
76.47 |
@@ -1510,7 +1529,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- Semantic FPN |
+ Semantic FPN |
Cityscapes |
mIoU |
74.52 |
@@ -1523,7 +1542,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- STDC |
+ STDC |
Cityscapes |
mIoU |
75.10 |
@@ -1536,7 +1555,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- STDC |
+ STDC |
Cityscapes |
mIoU |
77.17 |
@@ -1549,7 +1568,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- UPerNet |
+ UPerNet |
Cityscapes |
mIoU |
77.10 |
@@ -1562,7 +1581,7 @@ GPU: ncnn, TensorRT, PPLNN
- |
- Segmenter |
+ Segmenter |
ADE20K |
mIoU |
44.32 |
@@ -1603,7 +1622,7 @@ GPU: ncnn, TensorRT, PPLNN
fp32 |
- HRNet |
+ HRNet |
Pose Detection |
COCO |
AP |
@@ -1624,7 +1643,7 @@ GPU: ncnn, TensorRT, PPLNN
0.802 |
- LiteHRNet |
+ LiteHRNet |
Pose Detection |
COCO |
AP |
@@ -1645,7 +1664,7 @@ GPU: ncnn, TensorRT, PPLNN
0.728 |
- MSPN |
+ MSPN |
Pose Detection |
COCO |
AP |
@@ -1666,7 +1685,7 @@ GPU: ncnn, TensorRT, PPLNN
0.825 |
- Hourglass |
+ Hourglass |
Pose Detection |
COCO |
AP |
diff --git a/docs/zh_cn/03-benchmark/benchmark_edge.md b/docs/zh_cn/03-benchmark/benchmark_edge.md
index 5e1733a6c..0a5411300 100644
--- a/docs/zh_cn/03-benchmark/benchmark_edge.md
+++ b/docs/zh_cn/03-benchmark/benchmark_edge.md
@@ -28,9 +28,9 @@ tips:
## mmpose 模型
-| model | dataset | spatial | snpe hybrid AR@IoU=0.50 | snpe hybrid AP@IoU=0.50 | latency(ms) |
-| :---------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------: | :-----: | :---------------------: | :---------------------: | :---------: |
-| [pose_hrnet_w32](https://github.com/open-mmlab/mmpose/blob/master/configs/animal/2d_kpt_sview_rgb_img/topdown_heatmap/animalpose/hrnet_w32_animalpose_256x256.py) | Animalpose | 256x256 | 0.997 | 0.989 | 630±50 |
+| model | dataset | spatial | snpe hybrid AR@IoU=0.50 | snpe hybrid AP@IoU=0.50 | latency(ms) |
+| :--------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------: | :-----: | :---------------------: | :---------------------: | :---------: |
+| [pose_hrnet_w32](https://github.com/open-mmlab/mmpose/blob/1.x/configs/animal_2d_keypoint/topdown_heatmap/animalpose/td-hm_hrnet-w32_8xb64-210e_animalpose-256x256.py) | Animalpose | 256x256 | 0.997 | 0.989 | 630±50 |
tips:
@@ -38,9 +38,9 @@ tips:
## mmseg
-| model | dataset | spatial | mIoU | latency(ms) |
-| :---------------------------------------------------------------------------------------------------------------: | :--------: | :------: | :---: | :---------: |
-| [fcn](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fcn/fcn_r18-d8_512x1024_80k_cityscapes.py) | Cityscapes | 512x1024 | 71.11 | 4915±500 |
+| model | dataset | spatial | mIoU | latency(ms) |
+| :-----------------------------------------------------------------------------------------------------------------: | :--------: | :------: | :---: | :---------: |
+| [fcn](https://github.com/open-mmlab/mmsegmentation/blob/1.x/configs/fcn/fcn_r18-d8_4xb2-80k_cityscapes-512x1024.py) | Cityscapes | 512x1024 | 71.11 | 4915±500 |
tips:
diff --git a/docs/zh_cn/03-benchmark/quantization.md b/docs/zh_cn/03-benchmark/quantization.md
index d65cae896..81f2ada5e 100644
--- a/docs/zh_cn/03-benchmark/quantization.md
+++ b/docs/zh_cn/03-benchmark/quantization.md
@@ -29,8 +29,8 @@
### 姿态检测任务
-| model | dataset | fp32 AP | int8 AP |
-| :----------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :-----: | :-----: |
-| [Hourglass](https://github.com/open-mmlab/mmpose/blob/master/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hourglass52_coco_256x256.py) | COCO2017 | 0.717 | 0.713 |
+| model | dataset | fp32 AP | int8 AP |
+| :---------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :-----: | :-----: |
+| [Hourglass](https://github.com/open-mmlab/mmpose/blob/1.x/configs/body_2d_keypoint/topdown_heatmap/coco/td-hm_hourglass52_8xb32-210e_coco-256x256.py) | COCO2017 | 0.717 | 0.713 |
备注:测试转换后的模型精度时,对于 mmpose 模型,在模型配置文件中 `flip_test` 需设置为 `False`。
diff --git a/docs/zh_cn/04-supported-codebases/mmseg.md b/docs/zh_cn/04-supported-codebases/mmseg.md
index d54480603..c96ea6d56 100644
--- a/docs/zh_cn/04-supported-codebases/mmseg.md
+++ b/docs/zh_cn/04-supported-codebases/mmseg.md
@@ -1,14 +1,16 @@
# MMSegmentation 模型部署
-- [安装](#安装)
- - [安装 mmcls](#安装-mmseg)
- - [安装 mmdeploy](#安装-mmdeploy)
-- [模型转换](#模型转换)
-- [模型规范](#模型规范)
-- [模型推理](#模型推理)
- - [后端模型推理](#后端模型推理)
- - [SDK 模型推理](#sdk-模型推理)
-- [模型支持列表](#模型支持列表)
+- [MMSegmentation 模型部署](#mmsegmentation-模型部署)
+ - [安装](#安装)
+ - [安装 mmseg](#安装-mmseg)
+ - [安装 mmdeploy](#安装-mmdeploy)
+ - [模型转换](#模型转换)
+ - [模型规范](#模型规范)
+ - [模型推理](#模型推理)
+ - [后端模型推理](#后端模型推理)
+ - [SDK 模型推理](#sdk-模型推理)
+ - [模型支持列表](#模型支持列表)
+ - [注意事项](#注意事项)
______________________________________________________________________
@@ -230,6 +232,6 @@ cv2.imwrite('output_segmentation.png', img)
- 所有 mmseg 模型仅支持 "whole" 推理模式。
-- PSPNet,Fast-SCNN 仅支持静态输入,因为多数推理框架的 [nn.AdaptiveAvgPool2d](https://github.com/open-mmlab/mmsegmentation/blob/97f9670c5a4a2a3b4cfb411bcc26db16b23745f7/mmseg/models/decode_heads/psp_head.py#L38) 不支持动态输入。
+- PSPNet,Fast-SCNN 仅支持静态输入,因为多数推理框架的 [nn.AdaptiveAvgPool2d](https://github.com/open-mmlab/mmsegmentation/blob/0c87f7a0c9099844eff8e90fa3db5b0d0ca02fee/mmseg/models/decode_heads/psp_head.py#L38) 不支持动态输入。
- 对于仅支持静态形状的模型,应使用静态形状的部署配置文件,例如 `configs/mmseg/segmentation_tensorrt_static-1024x2048.py`
diff --git a/mmdeploy/codebase/base/backend_model.py b/mmdeploy/codebase/base/backend_model.py
index 336747896..ba1870591 100644
--- a/mmdeploy/codebase/base/backend_model.py
+++ b/mmdeploy/codebase/base/backend_model.py
@@ -48,7 +48,7 @@ class BaseBackendModel(BaseModel, metaclass=ABCMeta):
Args:
backend (Backend): The backend enum type.
- beckend_files (Sequence[str]): Paths to all required backend files(
+ backend_files (Sequence[str]): Paths to all required backend files(
e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).
device (str): A string specifying device type.
input_names (Sequence[str] | None): Names of model inputs in
diff --git a/mmdeploy/codebase/base/runner.py b/mmdeploy/codebase/base/runner.py
index ba1ad6c99..08cad70c6 100644
--- a/mmdeploy/codebase/base/runner.py
+++ b/mmdeploy/codebase/base/runner.py
@@ -8,15 +8,22 @@ from mmengine.runner import Runner
class DeployTestRunner(Runner):
+ """The runner for test models.
+
+ Args:
+ log_file (str | None): The path of log file. Default is ``None``.
+ device (str): The device type.
+ """
def __init__(self,
log_file: Optional[str] = None,
device: str = get_device(),
*args,
**kwargs):
+
self._log_file = log_file
self._device = device
- super().__init__(*args, **kwargs)
+ super(DeployTestRunner, self).__init__(*args, **kwargs)
def wrap_model(self, model_wrapper_cfg: Optional[Dict],
model: BaseModel) -> BaseModel:
@@ -46,7 +53,7 @@ class DeployTestRunner(Runner):
log_level: Union[int, str] = 'INFO',
log_file: str = None,
**kwargs) -> MMLogger:
- """Build a global asscessable MMLogger.
+ """Build a global accessible MMLogger.
Args:
log_level (int or str): The log level of MMLogger handlers.
diff --git a/mmdeploy/codebase/base/task.py b/mmdeploy/codebase/base/task.py
index 53c9cf9be..a6e20ce75 100644
--- a/mmdeploy/codebase/base/task.py
+++ b/mmdeploy/codebase/base/task.py
@@ -24,6 +24,9 @@ class BaseTask(metaclass=ABCMeta):
model_cfg (str | Config): Model config file.
deploy_cfg (str | Config): Deployment config file.
device (str): A string specifying device type.
+ experiment_name (str, optional): Name of current experiment.
+ If not specified, timestamp will be used as
+ ``experiment_name``. Defaults to ``None``.
"""
def __init__(self,
@@ -70,6 +73,12 @@ class BaseTask(metaclass=ABCMeta):
pass
def build_data_preprocessor(self):
+ """build data preprocessor.
+
+ Returns:
+ BaseDataPreprocessor:
+ Initialized instance of :class:`BaseDataPreprocessor`.
+ """
model = deepcopy(self.model_cfg.model)
preprocess_cfg = model['data_preprocessor']
diff --git a/mmdeploy/codebase/mmseg/deploy/segmentation.py b/mmdeploy/codebase/mmseg/deploy/segmentation.py
index e52a52496..771c2f102 100644
--- a/mmdeploy/codebase/mmseg/deploy/segmentation.py
+++ b/mmdeploy/codebase/mmseg/deploy/segmentation.py
@@ -65,9 +65,9 @@ def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
- model_cfg Config: Input model Config object.
+ model_cfg (Config): Input model Config object.
Returns:
- (list[str], list[np.ndarray]): Class names and palette
+ (list[str], list[np.ndarray]): Class names and palette.
"""
from mmseg import datasets # noqa
from mmseg.registry import DATASETS
@@ -106,10 +106,12 @@ class MMSegmentation(MMCodebase):
@classmethod
def register_deploy_modules(cls):
+ """register deploy modules."""
import mmdeploy.codebase.mmseg.models # noqa: F401
@classmethod
def register_all_modules(cls):
+ """register all modules."""
from mmseg.utils.set_env import register_all_modules
cls.register_deploy_modules()
@@ -167,7 +169,8 @@ class Segmentation(BaseTask):
`np.ndarray`, `torch.Tensor`.
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Defaults to `None`.
-
+ data_preprocessor (BaseDataPreprocessor | None): Input data pre-
+ processor. Default is ``None``.
Returns:
tuple: (data, img), meta information for the input image and input.
"""
@@ -200,11 +203,11 @@ class Segmentation(BaseTask):
"""
Args:
- name:
- save_dir:
+ name (str): Name of visualizer.
+ save_dir (str): Directory to save drawn results.
Returns:
-
+ SegLocalVisualizer: Instance of mmseg visualizer.
"""
# import to make SegLocalVisualizer could be built
from mmseg.visualization import SegLocalVisualizer # noqa: F401,F403
@@ -236,7 +239,7 @@ class Segmentation(BaseTask):
window_name (str): The name of visualization window. Defaults to
an empty string.
show_result (bool): Whether to show result in windows, defaults
- to `False`.
+ to ``False``.
opacity: (float): Opacity of painted segmentation map.
Defaults to `0.5`.
"""
diff --git a/mmdeploy/codebase/mmseg/deploy/segmentation_model.py b/mmdeploy/codebase/mmseg/deploy/segmentation_model.py
index b1b24f1ad..5ad1c9657 100644
--- a/mmdeploy/codebase/mmseg/deploy/segmentation_model.py
+++ b/mmdeploy/codebase/mmseg/deploy/segmentation_model.py
@@ -29,10 +29,10 @@ class End2EndModel(BaseBackendModel):
backend_files (Sequence[str]): Paths to all required backend files(e.g.
'.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).
device (str): A string represents device type.
- class_names (Sequence[str]): A list of string specifying class names.
- palette (np.ndarray): The palette of segmentation map.
deploy_cfg (str | mmengine.Config): Deployment config file or loaded
Config object.
+ data_preprocessor (dict | nn.Module | None): Input data pre-
+ processor. Default is ``None``.
"""
def __init__(self,
@@ -73,8 +73,9 @@ class End2EndModel(BaseBackendModel):
Args:
inputs (torch.Tensor): Input image tensor
in [N x C x H x W] format.
- data_samples (List[BaseDataElement]): A list of meta info for
- image(s).
+ data_samples (list[:obj:`SegDataSample`]): The seg data
+ samples. It usually includes information such as
+ `metainfo` and `gt_sem_seg`. Default to None.
mode (str): forward mode, only support 'predict'.
**kwargs: Other key-pair arguments.
@@ -93,6 +94,18 @@ class End2EndModel(BaseBackendModel):
def pack_result(self, batch_outputs: torch.Tensor,
data_samples: List[BaseDataElement]):
+ """Pack segmentation result to data samples.
+ Args:
+ batch_outputs (Tensor): Batched segmentation output
+ tensor.
+ data_samples (list[:obj:`SegDataSample`]): The seg data
+ samples. It usually includes information such as
+ `metainfo` and `gt_sem_seg`. Default to None.
+
+ Returns:
+ list[:obj:`SegDataSample`]: The updated seg data samples.
+ """
+
predictions = []
for seg_pred, data_sample in zip(batch_outputs, data_samples):
# resize seg_pred to original image shape
@@ -123,9 +136,9 @@ class RKNNModel(End2EndModel):
Args:
inputs (Tensor): Inputs with shape (N, C, H, W).
- data_samples (List[:obj:`DetDataSample`]): The Data
- Samples. It usually includes information such as
- `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
+ data_samples (list[:obj:`SegDataSample`]): The seg data
+ samples. It usually includes information such as
+ `metainfo` and `gt_sem_seg`. Default to None.
Returns:
list: A list contains predictions.
@@ -156,12 +169,13 @@ class SDKEnd2EndModel(End2EndModel):
"""Run forward inference.
Args:
- img (Sequence[torch.Tensor]): A list contains input image(s)
- in [N x C x H x W] format.
- img_metas (Sequence[Sequence[dict]]): A list of meta info for
- image(s).
- *args: Other arguments.
- **kwargs: Other key-pair arguments.
+ inputs (Sequence[torch.Tensor]): A list contains input
+ image(s) in [C x H x W] format.
+ data_samples (list[:obj:`SegDataSample`]): The seg data
+ samples. It usually includes information such as
+ `metainfo` and `gt_sem_seg`. Default to None.
+ mode (str): Return what kind of value. Defaults to
+ 'predict'.
Returns:
list: A list contains predictions.