[Fix] fix_torchserver1.1 (#844)
* test_torchserver1.1 * test_torchserver1.2 * update * update mmseg_handler.py * update docs * update torchserver * tranfer torchserver to torchserve * update docspull/724/merge
parent
cff01b36c9
commit
0b11d58773
|
@ -304,7 +304,7 @@ In order to serve an `MMSegmentation` model with [`TorchServe`](https://pytorch.
|
|||
### 1. Convert model from MMSegmentation to TorchServe
|
||||
|
||||
```shell
|
||||
python tools/mmseg2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \
|
||||
python tools/torchserve/mmseg2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \
|
||||
--output-folder ${MODEL_STORE} \
|
||||
--model-name ${MODEL_NAME}
|
||||
```
|
||||
|
@ -359,3 +359,20 @@ plt.show()
|
|||
You should see something similar to:
|
||||
|
||||

|
||||
|
||||
And you can use `test_torchserve.py` to compare result of torchserve and pytorch, and visualize them.
|
||||
|
||||
```shell
|
||||
python tools/torchserve/test_torchserve.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME}
|
||||
[--inference-addr ${INFERENCE_ADDR}] [--result-image ${RESULT_IMAGE}] [--device ${DEVICE}]
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
python tools/torchserve/test_torchserve.py \
|
||||
demo/demo.png \
|
||||
configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py \
|
||||
checkpoint/fcn_r50-d8_512x1024_40k_cityscapes_20200604_192608-efe53f0d.pth \
|
||||
fcn
|
||||
```
|
||||
|
|
|
@ -8,6 +8,6 @@ line_length = 79
|
|||
multi_line_output = 0
|
||||
known_standard_library = setuptools
|
||||
known_first_party = mmseg
|
||||
known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,scipy,seaborn,torch,ts
|
||||
known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,torch,ts
|
||||
no_lines_before = STDLIB,LOCALFOLDER
|
||||
default_section = THIRDPARTY
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import base64
|
||||
import io
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import mmcv
|
||||
import torch
|
||||
from mmcv.cnn.utils.sync_bn import revert_sync_batchnorm
|
||||
from ts.torch_handler.base_handler import BaseHandler
|
||||
|
||||
from mmseg.apis import inference_segmentor, init_segmentor
|
||||
|
@ -27,6 +27,7 @@ class MMsegHandler(BaseHandler):
|
|||
self.config_file = os.path.join(model_dir, 'config.py')
|
||||
|
||||
self.model = init_segmentor(self.config_file, checkpoint, self.device)
|
||||
self.model = revert_sync_batchnorm(self.model)
|
||||
self.initialized = True
|
||||
|
||||
def preprocess(self, data):
|
||||
|
@ -47,8 +48,10 @@ class MMsegHandler(BaseHandler):
|
|||
|
||||
def postprocess(self, data):
|
||||
output = []
|
||||
|
||||
for image_result in data:
|
||||
buffer = io.BytesIO()
|
||||
_, buffer = cv2.imencode('.png', image_result[0].astype('uint8'))
|
||||
output.append(buffer.tobytes())
|
||||
bast64_data = base64.b64encode(buffer.tobytes())
|
||||
bast64_str = str(bast64_data, 'utf-8')
|
||||
output.append(bast64_str)
|
||||
return output
|
|
@ -0,0 +1,59 @@
|
|||
import base64
|
||||
from argparse import ArgumentParser
|
||||
from io import BytesIO
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import mmcv
|
||||
import requests
|
||||
|
||||
from mmseg.apis import inference_segmentor, init_segmentor
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = ArgumentParser(
|
||||
description='Compare result of torchserve and pytorch,'
|
||||
'and visualize them.')
|
||||
parser.add_argument('img', help='Image file')
|
||||
parser.add_argument('config', help='Config file')
|
||||
parser.add_argument('checkpoint', help='Checkpoint file')
|
||||
parser.add_argument('model_name', help='The model name in the server')
|
||||
parser.add_argument(
|
||||
'--inference-addr',
|
||||
default='127.0.0.1:8080',
|
||||
help='Address and port of the inference server')
|
||||
parser.add_argument(
|
||||
'--result-image',
|
||||
type=str,
|
||||
default=None,
|
||||
help='save server output in result-image')
|
||||
parser.add_argument(
|
||||
'--device', default='cuda:0', help='Device used for inference')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def main(args):
|
||||
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
|
||||
with open(args.img, 'rb') as image:
|
||||
tmp_res = requests.post(url, image)
|
||||
base64_str = tmp_res.content
|
||||
buffer = base64.b64decode(base64_str)
|
||||
if args.result_image:
|
||||
with open(args.result_image, 'wb') as out_image:
|
||||
out_image.write(buffer)
|
||||
plt.imshow(mmcv.imread(args.result_image, 'grayscale'))
|
||||
plt.show()
|
||||
else:
|
||||
plt.imshow(plt.imread(BytesIO(buffer)))
|
||||
plt.show()
|
||||
model = init_segmentor(args.config, args.checkpoint, args.device)
|
||||
image = mmcv.imread(args.img)
|
||||
result = inference_segmentor(model, image)
|
||||
plt.imshow(result[0])
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_args()
|
||||
main(args)
|
Loading…
Reference in New Issue