[Feature] Support NCNN inference tests (#34)

* add inference test

* fix for review comments

* add inference test results table

* fix grammer errors

* fix typo
This commit is contained in:
hanrui1sensetime 2021-08-12 17:38:38 +08:00 committed by GitHub
parent 7dbc12d23f
commit f8b62ae4d0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 41 additions and 23 deletions

View File

@ -66,3 +66,19 @@ Before starting this tutorial, you should make sure that the prerequisites menti
Undefined symbol: __cpu_model Undefined symbol: __cpu_model
``` ```
This is a bug of gcc-5, you should update to `gcc >= 6` This is a bug of gcc-5, you should update to `gcc >= 6`
## Performance Test
### MMCls
This table shows the performance of mmclassification models deployed on ncnn.
Dataset: ImageNet `val` dataset.
| Model | Top-1(%) | Top-5(%) |
|-------|----------|----------|
| MobileNetV2| 71.86 (71.86) | 90.42 (90.42) |
| ResNet | 69.88 (70.07) | 89.34 (89.44) |
| ResNeXt | 78.61 (78.71) | 94.17 (94.12) |
The data in the parentheses is the inference result from pytorch.
(According to: [mmcls model_zoo docs](https://github.com/open-mmlab/mmclassification/blob/master/docs/model_zoo.md))

View File

@ -107,20 +107,23 @@ class NCNNClassifier(DeployBaseClassifier):
def forward_test(self, imgs, *args, **kwargs): def forward_test(self, imgs, *args, **kwargs):
import ncnn import ncnn
assert len(imgs.shape) == 4 assert len(imgs.shape) == 4
# Only for batch == 1 now. batch_size = imgs.shape[0]
assert imgs.shape[0] == 1 results_list = []
input_data = imgs[0].cpu().numpy() for idx in range(batch_size):
input_data = ncnn.Mat(input_data) input_data = imgs[idx].cpu().numpy()
if self.device_id == -1: input_data = ncnn.Mat(input_data)
ex = self.net.create_extractor() if self.device_id == -1:
ex.input('input', input_data) extractor = self.net.create_extractor()
ret, results = ex.extract('output') extractor.input('input', input_data)
results = np.array(results) return_status, results = extractor.extract('output')
assert ret != -100, 'Memory allocation failed in ncnn layers' results = np.array(results)
assert ret == 0 assert return_status != -100, \
return [results] 'Memory allocation failed in ncnn layers'
else: assert return_status == 0
raise NotImplementedError('GPU device is not implemented.') results_list.append(results)
else:
raise NotImplementedError('GPU device is not implemented.')
return results_list
class PPLClassifier(DeployBaseClassifier): class PPLClassifier(DeployBaseClassifier):

View File

@ -14,7 +14,8 @@ def parse_args():
description='MMDeploy test (and eval) a backend.') description='MMDeploy test (and eval) a backend.')
parser.add_argument('deploy_cfg', help='Deploy config path') parser.add_argument('deploy_cfg', help='Deploy config path')
parser.add_argument('model_cfg', help='Model config path') parser.add_argument('model_cfg', help='Model config path')
parser.add_argument('model', help='Input model file.') parser.add_argument(
'--model', type=str, nargs='+', help='Input model files.')
parser.add_argument('--out', help='output result file in pickle format') parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument( parser.add_argument(
'--format-only', '--format-only',
@ -66,7 +67,6 @@ def main():
args = parse_args() args = parse_args()
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.') raise ValueError('The output file must be a pkl file.')
deploy_cfg_path = args.deploy_cfg deploy_cfg_path = args.deploy_cfg
model_cfg_path = args.model_cfg model_cfg_path = args.model_cfg
@ -82,13 +82,12 @@ def main():
# load the model of the backend # load the model of the backend
device_id = -1 if args.device == 'cpu' else 0 device_id = -1 if args.device == 'cpu' else 0
backend = deploy_cfg.get('backend', 'default') backend = deploy_cfg.get('backend', 'default')
model = init_backend_model([args.model], model = init_backend_model(
codebase=codebase, args.model,
backend=backend, codebase=codebase,
class_names=get_classes_from_config( backend=backend,
codebase, model_cfg), class_names=get_classes_from_config(codebase, model_cfg),
device_id=device_id) device_id=device_id)
model = MMDataParallel(model, device_ids=[0]) model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(codebase, model, data_loader, args.show, outputs = single_gpu_test(codebase, model, data_loader, args.show,
args.show_dir, args.show_score_thr) args.show_dir, args.show_score_thr)