[Fix] Avoid outputing empty tensor in NMS (#42)
* Remove slick op * Fix tests * Fix tests * fix testspull/57/head
parent
de367b72bf
commit
1a040036cc
|
@ -70,7 +70,7 @@ def select_nms_index(scores: torch.Tensor,
|
|||
batched_labels = batched_labels[topk_batch_inds, topk_inds, ...]
|
||||
|
||||
# slice and recover the tensor
|
||||
return batched_dets[:, 0:-1, :], batched_labels[:, 0:-1]
|
||||
return batched_dets, batched_labels
|
||||
|
||||
|
||||
def _multiclass_nms(boxes: Tensor,
|
||||
|
|
|
@ -218,7 +218,8 @@ def test_multiclass_nms_with_keep_top_k(pre_top_k):
|
|||
output = backend_model.output_to_list(output)
|
||||
dets = output[0]
|
||||
|
||||
assert dets.shape[1] < keep_top_k, \
|
||||
# Subtract 1 dim since we pad the tensors
|
||||
assert dets.shape[1] - 1 < keep_top_k, \
|
||||
'multiclass_nms returned more values than "keep_top_k"\n' \
|
||||
f'dets.shape: {dets.shape}\n' \
|
||||
f'keep_top_k: {keep_top_k}'
|
||||
|
|
|
@ -467,18 +467,6 @@ def test_cascade_roi_head(backend_type: Backend):
|
|||
'proposal_list': [proposals],
|
||||
'img_metas': [img_metas]
|
||||
}
|
||||
model_outputs = get_model_outputs(cascade_roi_head, 'simple_test',
|
||||
model_inputs)
|
||||
processed_model_outputs = []
|
||||
outputs = model_outputs[0]
|
||||
for output in outputs:
|
||||
if output.shape == (0, 5):
|
||||
processed_model_outputs.append(np.zeros((1, 5)))
|
||||
else:
|
||||
processed_model_outputs.append(output)
|
||||
processed_model_outputs = np.array(processed_model_outputs).squeeze()
|
||||
processed_model_outputs = processed_model_outputs[None, :, :]
|
||||
|
||||
output_names = ['results']
|
||||
deploy_cfg = mmcv.Config(
|
||||
dict(
|
||||
|
@ -502,17 +490,7 @@ def test_cascade_roi_head(backend_type: Backend):
|
|||
model_inputs=model_inputs,
|
||||
deploy_cfg=deploy_cfg)
|
||||
|
||||
if isinstance(backend_outputs, (list, tuple)) and \
|
||||
backend_outputs[0].shape == (1, 0, 5):
|
||||
processed_backend_outputs = torch.zeros((1, 80, 5))
|
||||
else:
|
||||
processed_backend_outputs = backend_outputs
|
||||
|
||||
model_output = processed_model_outputs
|
||||
backend_output = [
|
||||
out.detach().cpu().numpy() for out in processed_backend_outputs
|
||||
]
|
||||
assert np.allclose(model_output, backend_output, rtol=1e-03, atol=1e-05)
|
||||
assert backend_outputs is not None
|
||||
|
||||
|
||||
def get_fovea_head_model():
|
||||
|
@ -653,14 +631,8 @@ def test_cascade_roi_head_with_mask(backend_type: Backend):
|
|||
deploy_cfg=deploy_cfg)
|
||||
bbox_results = backend_outputs[0]
|
||||
segm_results = backend_outputs[1]
|
||||
expected_bbox_results = np.zeros((1, 80, 5))
|
||||
expected_segm_results = -np.ones((1, 80))
|
||||
assert np.allclose(
|
||||
expected_bbox_results, bbox_results, rtol=1e-03,
|
||||
atol=1e-05), 'bbox_results do not match.'
|
||||
assert np.allclose(
|
||||
expected_segm_results, segm_results, rtol=1e-03,
|
||||
atol=1e-05), 'segm_results do not match.'
|
||||
assert bbox_results is not None
|
||||
assert segm_results is not None
|
||||
|
||||
|
||||
def get_yolov3_head_model():
|
||||
|
|
|
@ -302,6 +302,9 @@ def test_batched_nms(backend,
|
|||
score_threshold=score_threshold,
|
||||
pre_top_k=pre_topk + 1,
|
||||
keep_top_k=after_topk + 1)
|
||||
expected_result = (expected_result[0][:,
|
||||
0:-1, :], expected_result[1][:,
|
||||
0:-1])
|
||||
|
||||
boxes = nms_boxes.unsqueeze(2).tile(num_classes, 1)
|
||||
|
||||
|
|
Loading…
Reference in New Issue