Merge branch 'xiexinchen/rename_apis' into 'refactor_dev'
[Refactor] Rename APIs See merge request openmmlab-enterprise/openmmlab-ce/mmsegmentation!63pull/1801/head
commit
e324e8f3d4
|
@ -9,7 +9,7 @@ from argparse import ArgumentParser
|
||||||
import requests
|
import requests
|
||||||
from mmcv import Config
|
from mmcv import Config
|
||||||
|
|
||||||
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot
|
from mmseg.apis import inference_model, init_model, show_result_pyplot
|
||||||
from mmseg.utils import get_root_logger
|
from mmseg.utils import get_root_logger
|
||||||
|
|
||||||
# ignore warnings when segmentors inference
|
# ignore warnings when segmentors inference
|
||||||
|
@ -57,7 +57,7 @@ def parse_args():
|
||||||
return args
|
return args
|
||||||
|
|
||||||
|
|
||||||
def inference_model(config_name, checkpoint, args, logger=None):
|
def inference(config_name, checkpoint, args, logger=None):
|
||||||
cfg = Config.fromfile(config_name)
|
cfg = Config.fromfile(config_name)
|
||||||
if args.aug:
|
if args.aug:
|
||||||
if 'flip' in cfg.data.test.pipeline[
|
if 'flip' in cfg.data.test.pipeline[
|
||||||
|
@ -72,9 +72,9 @@ def inference_model(config_name, checkpoint, args, logger=None):
|
||||||
else:
|
else:
|
||||||
print(f'{config_name}: unable to start aug test', flush=True)
|
print(f'{config_name}: unable to start aug test', flush=True)
|
||||||
|
|
||||||
model = init_segmentor(cfg, checkpoint, device=args.device)
|
model = init_model(cfg, checkpoint, device=args.device)
|
||||||
# test a single image
|
# test a single image
|
||||||
result = inference_segmentor(model, args.img)
|
result = inference_model(model, args.img)
|
||||||
|
|
||||||
# show the results
|
# show the results
|
||||||
if args.show:
|
if args.show:
|
||||||
|
@ -102,7 +102,7 @@ def main(args):
|
||||||
model_info['checkpoint'].strip())
|
model_info['checkpoint'].strip())
|
||||||
try:
|
try:
|
||||||
# build the model from a config file and a checkpoint file
|
# build the model from a config file and a checkpoint file
|
||||||
inference_model(config_name, checkpoint, args)
|
inference(config_name, checkpoint, args)
|
||||||
except Exception:
|
except Exception:
|
||||||
print(f'{config_name} test failed!')
|
print(f'{config_name} test failed!')
|
||||||
continue
|
continue
|
||||||
|
@ -139,7 +139,7 @@ def main(args):
|
||||||
# test model inference with checkpoint
|
# test model inference with checkpoint
|
||||||
try:
|
try:
|
||||||
# build the model from a config file and a checkpoint file
|
# build the model from a config file and a checkpoint file
|
||||||
inference_model(config_path, checkpoint, args, logger)
|
inference(config_path, checkpoint, args, logger)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f'{config_path} " : {repr(e)}')
|
logger.error(f'{config_path} " : {repr(e)}')
|
||||||
|
|
||||||
|
|
|
@ -144,7 +144,7 @@
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot\n",
|
"from mmseg.apis import inference_model, init_model, show_result_pyplot\n",
|
||||||
"from mmseg.core.evaluation import get_palette"
|
"from mmseg.core.evaluation import get_palette"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -173,7 +173,7 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# build the model from a config file and a checkpoint file\n",
|
"# build the model from a config file and a checkpoint file\n",
|
||||||
"model = init_segmentor(config_file, checkpoint_file, device='cuda:0')"
|
"model = init_model(config_file, checkpoint_file, device='cuda:0')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -186,7 +186,7 @@
|
||||||
"source": [
|
"source": [
|
||||||
"# test a single image\n",
|
"# test a single image\n",
|
||||||
"img = 'demo/demo.png'\n",
|
"img = 'demo/demo.png'\n",
|
||||||
"result = inference_segmentor(model, img)"
|
"result = inference_model(model, img)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -596,7 +596,7 @@
|
||||||
"img = mmcv.imread('iccv09Data/images/6000124.jpg')\n",
|
"img = mmcv.imread('iccv09Data/images/6000124.jpg')\n",
|
||||||
"\n",
|
"\n",
|
||||||
"model.cfg = cfg\n",
|
"model.cfg = cfg\n",
|
||||||
"result = inference_segmentor(model, img)\n",
|
"result = inference_model(model, img)\n",
|
||||||
"plt.figure(figsize=(8, 6))\n",
|
"plt.figure(figsize=(8, 6))\n",
|
||||||
"show_result_pyplot(model, img, result, palette)"
|
"show_result_pyplot(model, img, result, palette)"
|
||||||
]
|
]
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
|
|
||||||
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot
|
from mmseg.apis import inference_model, init_model, show_result_pyplot
|
||||||
from mmseg.core.evaluation import get_palette
|
from mmseg.core.evaluation import get_palette
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,9 +24,9 @@ def main():
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# build the model from a config file and a checkpoint file
|
# build the model from a config file and a checkpoint file
|
||||||
model = init_segmentor(args.config, args.checkpoint, device=args.device)
|
model = init_model(args.config, args.checkpoint, device=args.device)
|
||||||
# test a single image
|
# test a single image
|
||||||
result = inference_segmentor(model, args.img)
|
result = inference_model(model, args.img)
|
||||||
# show the results
|
# show the results
|
||||||
show_result_pyplot(
|
show_result_pyplot(
|
||||||
model,
|
model,
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot\n",
|
"from mmseg.apis import init_model, inference_model, show_result_pyplot\n",
|
||||||
"from mmseg.core.evaluation import get_palette"
|
"from mmseg.core.evaluation import get_palette"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -45,7 +45,7 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# build the model from a config file and a checkpoint file\n",
|
"# build the model from a config file and a checkpoint file\n",
|
||||||
"model = init_segmentor(config_file, checkpoint_file, device='cuda:0')"
|
"model = init_model(config_file, checkpoint_file, device='cuda:0')"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -56,7 +56,7 @@
|
||||||
"source": [
|
"source": [
|
||||||
"# test a single image\n",
|
"# test a single image\n",
|
||||||
"img = 'demo.png'\n",
|
"img = 'demo.png'\n",
|
||||||
"result = inference_segmentor(model, img)"
|
"result = inference_model(model, img)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -3,7 +3,7 @@ from argparse import ArgumentParser
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
|
|
||||||
from mmseg.apis import inference_segmentor, init_segmentor
|
from mmseg.apis import inference_model, init_model
|
||||||
from mmseg.core.evaluation import get_palette
|
from mmseg.core.evaluation import get_palette
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ def main():
|
||||||
'At least one output should be enabled.'
|
'At least one output should be enabled.'
|
||||||
|
|
||||||
# build the model from a config file and a checkpoint file
|
# build the model from a config file and a checkpoint file
|
||||||
model = init_segmentor(args.config, args.checkpoint, device=args.device)
|
model = init_model(args.config, args.checkpoint, device=args.device)
|
||||||
|
|
||||||
# build input video
|
# build input video
|
||||||
cap = cv2.VideoCapture(args.video)
|
cap = cv2.VideoCapture(args.video)
|
||||||
|
@ -83,7 +83,7 @@ def main():
|
||||||
break
|
break
|
||||||
|
|
||||||
# test a single image
|
# test a single image
|
||||||
result = inference_segmentor(model, frame)
|
result = inference_model(model, frame)
|
||||||
|
|
||||||
# blend raw image and prediction
|
# blend raw image and prediction
|
||||||
draw_img = model.show_result(
|
draw_img = model.show_result(
|
||||||
|
|
|
@ -203,18 +203,18 @@ PYTHONPATH="$(dirname $0)/..":$PYTHONPATH
|
||||||
To verify whether MMSegmentation and the required environment are installed correctly, we can run sample python codes to initialize a segmentor and inference a demo image:
|
To verify whether MMSegmentation and the required environment are installed correctly, we can run sample python codes to initialize a segmentor and inference a demo image:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from mmseg.apis import inference_segmentor, init_segmentor
|
from mmseg.apis import inference_model, init_model
|
||||||
import mmcv
|
import mmcv
|
||||||
|
|
||||||
config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
|
config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
|
||||||
checkpoint_file = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'
|
checkpoint_file = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'
|
||||||
|
|
||||||
# build the model from a config file and a checkpoint file
|
# build the model from a config file and a checkpoint file
|
||||||
model = init_segmentor(config_file, checkpoint_file, device='cuda:0')
|
model = init_model(config_file, checkpoint_file, device='cuda:0')
|
||||||
|
|
||||||
# test a single image and show the results
|
# test a single image and show the results
|
||||||
img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once
|
img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once
|
||||||
result = inference_segmentor(model, img)
|
result = inference_model(model, img)
|
||||||
# visualize the results in a new window
|
# visualize the results in a new window
|
||||||
model.show_result(img, result, show=True)
|
model.show_result(img, result, show=True)
|
||||||
# or save the visualization results to image files
|
# or save the visualization results to image files
|
||||||
|
@ -224,7 +224,7 @@ model.show_result(img, result, out_file='result.jpg', opacity=0.5)
|
||||||
# test a video and show the results
|
# test a video and show the results
|
||||||
video = mmcv.VideoReader('video.mp4')
|
video = mmcv.VideoReader('video.mp4')
|
||||||
for frame in video:
|
for frame in video:
|
||||||
result = inference_segmentor(model, frame)
|
result = inference_model(model, frame)
|
||||||
model.show_result(frame, result, wait_time=1)
|
model.show_result(frame, result, wait_time=1)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -203,18 +203,18 @@ PYTHONPATH="$(dirname $0)/..":$PYTHONPATH
|
||||||
为了验证 MMSegmentation 和它所需要的环境是否正确安装,我们可以使用样例 python 代码来初始化一个 segmentor 并推理一张 demo 图像。
|
为了验证 MMSegmentation 和它所需要的环境是否正确安装,我们可以使用样例 python 代码来初始化一个 segmentor 并推理一张 demo 图像。
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from mmseg.apis import inference_segmentor, init_segmentor
|
from mmseg.apis import inference_model, init_model
|
||||||
import mmcv
|
import mmcv
|
||||||
|
|
||||||
config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
|
config_file = 'configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
|
||||||
checkpoint_file = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'
|
checkpoint_file = 'checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'
|
||||||
|
|
||||||
# 从一个 config 配置文件和 checkpoint 文件里创建分割模型
|
# 从一个 config 配置文件和 checkpoint 文件里创建分割模型
|
||||||
model = init_segmentor(config_file, checkpoint_file, device='cuda:0')
|
model = init_model(config_file, checkpoint_file, device='cuda:0')
|
||||||
|
|
||||||
# 测试一张样例图片并得到结果
|
# 测试一张样例图片并得到结果
|
||||||
img = 'test.jpg' # 或者 img = mmcv.imread(img), 这将只加载图像一次.
|
img = 'test.jpg' # 或者 img = mmcv.imread(img), 这将只加载图像一次.
|
||||||
result = inference_segmentor(model, img)
|
result = inference_model(model, img)
|
||||||
# 在新的窗口里可视化结果
|
# 在新的窗口里可视化结果
|
||||||
model.show_result(img, result, show=True)
|
model.show_result(img, result, show=True)
|
||||||
# 或者保存图片文件的可视化结果
|
# 或者保存图片文件的可视化结果
|
||||||
|
@ -224,7 +224,7 @@ model.show_result(img, result, out_file='result.jpg', opacity=0.5)
|
||||||
# 测试一个视频并得到分割结果
|
# 测试一个视频并得到分割结果
|
||||||
video = mmcv.VideoReader('video.mp4')
|
video = mmcv.VideoReader('video.mp4')
|
||||||
for frame in video:
|
for frame in video:
|
||||||
result = inference_segmentor(model, frame)
|
result = inference_model(model, frame)
|
||||||
model.show_result(frame, result, wait_time=1)
|
model.show_result(frame, result, wait_time=1)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Copyright (c) OpenMMLab. All rights reserved.
|
# Copyright (c) OpenMMLab. All rights reserved.
|
||||||
from .inference import inference_segmentor, init_segmentor, show_result_pyplot
|
from .inference import inference_model, init_model, show_result_pyplot
|
||||||
|
|
||||||
__all__ = ['init_segmentor', 'inference_segmentor', 'show_result_pyplot']
|
__all__ = ['init_model', 'inference_model', 'show_result_pyplot']
|
||||||
|
|
|
@ -9,7 +9,7 @@ from mmseg.datasets.pipelines import Compose
|
||||||
from mmseg.models import build_segmentor
|
from mmseg.models import build_segmentor
|
||||||
|
|
||||||
|
|
||||||
def init_segmentor(config, checkpoint=None, device='cuda:0'):
|
def init_model(config, checkpoint=None, device='cuda:0'):
|
||||||
"""Initialize a segmentor from config file.
|
"""Initialize a segmentor from config file.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -67,7 +67,7 @@ class LoadImage:
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def inference_segmentor(model, img):
|
def inference_model(model, img):
|
||||||
"""Inference image(s) with the segmentor.
|
"""Inference image(s) with the segmentor.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|
|
@ -4,7 +4,7 @@ import warnings
|
||||||
|
|
||||||
from mmcv import Config, DictAction
|
from mmcv import Config, DictAction
|
||||||
|
|
||||||
from mmseg.apis import init_segmentor
|
from mmseg.apis import init_model
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
|
@ -59,7 +59,7 @@ def main():
|
||||||
cfg.dump('example.py')
|
cfg.dump('example.py')
|
||||||
# dump models graph
|
# dump models graph
|
||||||
if args.graph:
|
if args.graph:
|
||||||
model = init_segmentor(args.config, device='cpu')
|
model = init_model(args.config, device='cpu')
|
||||||
print(f'Model graph:\n{str(model)}')
|
print(f'Model graph:\n{str(model)}')
|
||||||
with open('example-graph.txt', 'w') as f:
|
with open('example-graph.txt', 'w') as f:
|
||||||
f.writelines(str(model))
|
f.writelines(str(model))
|
||||||
|
|
|
@ -8,7 +8,7 @@ import torch
|
||||||
from mmcv.cnn.utils.sync_bn import revert_sync_batchnorm
|
from mmcv.cnn.utils.sync_bn import revert_sync_batchnorm
|
||||||
from ts.torch_handler.base_handler import BaseHandler
|
from ts.torch_handler.base_handler import BaseHandler
|
||||||
|
|
||||||
from mmseg.apis import inference_segmentor, init_segmentor
|
from mmseg.apis import inference_model, init_model
|
||||||
|
|
||||||
|
|
||||||
class MMsegHandler(BaseHandler):
|
class MMsegHandler(BaseHandler):
|
||||||
|
@ -26,7 +26,7 @@ class MMsegHandler(BaseHandler):
|
||||||
checkpoint = os.path.join(model_dir, serialized_file)
|
checkpoint = os.path.join(model_dir, serialized_file)
|
||||||
self.config_file = os.path.join(model_dir, 'config.py')
|
self.config_file = os.path.join(model_dir, 'config.py')
|
||||||
|
|
||||||
self.model = init_segmentor(self.config_file, checkpoint, self.device)
|
self.model = init_model(self.config_file, checkpoint, self.device)
|
||||||
self.model = revert_sync_batchnorm(self.model)
|
self.model = revert_sync_batchnorm(self.model)
|
||||||
self.initialized = True
|
self.initialized = True
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ class MMsegHandler(BaseHandler):
|
||||||
return images
|
return images
|
||||||
|
|
||||||
def inference(self, data, *args, **kwargs):
|
def inference(self, data, *args, **kwargs):
|
||||||
results = [inference_segmentor(self.model, img) for img in data]
|
results = [inference_model(self.model, img) for img in data]
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def postprocess(self, data):
|
def postprocess(self, data):
|
||||||
|
|
|
@ -6,7 +6,7 @@ import matplotlib.pyplot as plt
|
||||||
import mmcv
|
import mmcv
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from mmseg.apis import inference_segmentor, init_segmentor
|
from mmseg.apis import inference_model, init_model
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
|
@ -46,9 +46,9 @@ def main(args):
|
||||||
else:
|
else:
|
||||||
plt.imshow(plt.imread(BytesIO(content)))
|
plt.imshow(plt.imread(BytesIO(content)))
|
||||||
plt.show()
|
plt.show()
|
||||||
model = init_segmentor(args.config, args.checkpoint, args.device)
|
model = init_model(args.config, args.checkpoint, args.device)
|
||||||
image = mmcv.imread(args.img)
|
image = mmcv.imread(args.img)
|
||||||
result = inference_segmentor(model, image)
|
result = inference_model(model, image)
|
||||||
plt.imshow(result[0])
|
plt.imshow(result[0])
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue