From e13076adef521b9c082f767e12bd69c96f6496bb Mon Sep 17 00:00:00 2001 From: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Date: Thu, 23 Sep 2021 19:14:52 +0800 Subject: [PATCH] [Fix] fix_torchserver1.1 (#844) * test_torchserver1.1 * test_torchserver1.2 * update * update mmseg_handler.py * update docs * update torchserver * tranfer torchserver to torchserve * update docs --- docs/useful_tools.md | 19 ++++++- setup.cfg | 2 +- tools/{ => torchserve}/mmseg2torchserve.py | 0 tools/{ => torchserve}/mmseg_handler.py | 9 ++-- tools/torchserve/test_torchserve.py | 59 ++++++++++++++++++++++ 5 files changed, 84 insertions(+), 5 deletions(-) rename tools/{ => torchserve}/mmseg2torchserve.py (100%) rename tools/{ => torchserve}/mmseg_handler.py (86%) create mode 100644 tools/torchserve/test_torchserve.py diff --git a/docs/useful_tools.md b/docs/useful_tools.md index 28f9a42ef..8cec9b702 100644 --- a/docs/useful_tools.md +++ b/docs/useful_tools.md @@ -304,7 +304,7 @@ In order to serve an `MMSegmentation` model with [`TorchServe`](https://pytorch. ### 1. Convert model from MMSegmentation to TorchServe ```shell -python tools/mmseg2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +python tools/torchserve/mmseg2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ --output-folder ${MODEL_STORE} \ --model-name ${MODEL_NAME} ``` @@ -359,3 +359,20 @@ plt.show() You should see something similar to: ![3dogs_mask](../resources/3dogs_mask.png) + +And you can use `test_torchserve.py` to compare result of torchserve and pytorch, and visualize them. + +```shell +python tools/torchserve/test_torchserve.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--result-image ${RESULT_IMAGE}] [--device ${DEVICE}] +``` + +Example: + +```shell +python tools/torchserve/test_torchserve.py \ +demo/demo.png \ +configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py \ +checkpoint/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth \ +fcn +``` diff --git a/setup.cfg b/setup.cfg index 75fcedc7c..8605ae939 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,6 +8,6 @@ line_length = 79 multi_line_output = 0 known_standard_library = setuptools known_first_party = mmseg -known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,scipy,seaborn,torch,ts +known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,torch,ts no_lines_before = STDLIB,LOCALFOLDER default_section = THIRDPARTY diff --git a/tools/mmseg2torchserve.py b/tools/torchserve/mmseg2torchserve.py similarity index 100% rename from tools/mmseg2torchserve.py rename to tools/torchserve/mmseg2torchserve.py diff --git a/tools/mmseg_handler.py b/tools/torchserve/mmseg_handler.py similarity index 86% rename from tools/mmseg_handler.py rename to tools/torchserve/mmseg_handler.py index 7fabd46b9..e195f6d5d 100644 --- a/tools/mmseg_handler.py +++ b/tools/torchserve/mmseg_handler.py @@ -1,11 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. import base64 -import io import os import cv2 import mmcv import torch +from mmcv.cnn.utils.sync_bn import revert_sync_batchnorm from ts.torch_handler.base_handler import BaseHandler from mmseg.apis import inference_segmentor, init_segmentor @@ -27,6 +27,7 @@ class MMsegHandler(BaseHandler): self.config_file = os.path.join(model_dir, 'config.py') self.model = init_segmentor(self.config_file, checkpoint, self.device) + self.model = revert_sync_batchnorm(self.model) self.initialized = True def preprocess(self, data): @@ -47,8 +48,10 @@ class MMsegHandler(BaseHandler): def postprocess(self, data): output = [] + for image_result in data: - buffer = io.BytesIO() _, buffer = cv2.imencode('.png', image_result[0].astype('uint8')) - output.append(buffer.tobytes()) + bast64_data = base64.b64encode(buffer.tobytes()) + bast64_str = str(bast64_data, 'utf-8') + output.append(bast64_str) return output diff --git a/tools/torchserve/test_torchserve.py b/tools/torchserve/test_torchserve.py new file mode 100644 index 000000000..824dee952 --- /dev/null +++ b/tools/torchserve/test_torchserve.py @@ -0,0 +1,59 @@ +import base64 +from argparse import ArgumentParser +from io import BytesIO + +import matplotlib.pyplot as plt +import mmcv +import requests + +from mmseg.apis import inference_segmentor, init_segmentor + + +def parse_args(): + parser = ArgumentParser( + description='Compare result of torchserve and pytorch,' + 'and visualize them.') + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('model_name', help='The model name in the server') + parser.add_argument( + '--inference-addr', + default='127.0.0.1:8080', + help='Address and port of the inference server') + parser.add_argument( + '--result-image', + type=str, + default=None, + help='save server output in result-image') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + + args = parser.parse_args() + return args + + +def main(args): + url = 'http://' + args.inference_addr + '/predictions/' + args.model_name + with open(args.img, 'rb') as image: + tmp_res = requests.post(url, image) + base64_str = tmp_res.content + buffer = base64.b64decode(base64_str) + if args.result_image: + with open(args.result_image, 'wb') as out_image: + out_image.write(buffer) + plt.imshow(mmcv.imread(args.result_image, 'grayscale')) + plt.show() + else: + plt.imshow(plt.imread(BytesIO(buffer))) + plt.show() + model = init_segmentor(args.config, args.checkpoint, args.device) + image = mmcv.imread(args.img) + result = inference_segmentor(model, image) + plt.imshow(result[0]) + plt.show() + + +if __name__ == '__main__': + args = parse_args() + main(args)