mirror of
https://github.com/open-mmlab/mmsegmentation.git
synced 2025-06-03 22:03:48 +08:00
* Support progressive test with fewer memory cost. * Temp code * Using processor to refactor evaluation workflow. * refactor eval hook. * Fix process bar. * Fix middle save argument. * Modify some variable name of dataset evaluate api. * Modify some viriable name of eval hook. * Fix some priority bugs of eval hook. * Depreciated efficient_test. * Fix training progress blocked by eval hook. * Depreciated old test api. * Fix test api error. * Modify outer api. * Build a sampler test api. * TODO: Refactor format_results. * Modify variable names. * Fix num_classes bug. * Fix sampler index bug. * Fix grammaly bug. * Support batch sampler. * More readable test api. * Remove some command arg and fix eval hook bug. * Support format-only arg. * Modify format_results of datasets. * Modify tool which use test apis. * support cityscapes eval * fixed cityscapes * 1. Add comments for batch_sampler; 2. Keep eval hook api same and add deprecated warning; 3. Add doc string for dataset.pre_eval; * Add efficient_test doc string. * Modify test tool to compat old version. * Modify eval hook to compat with old version. * Modify test api to compat old version api. * Sampler explanation. * update warning * Modify deploy_test.py * compatible with old output, add efficient test back * clear logic of exclusive * Warning about efficient_test. * Modify format_results save folder. * Fix bugs of format_results. * Modify deploy_test.py. * Update doc * Fix deploy test bugs. * Fix custom dataset unit tests. * Fix dataset unit tests. * Fix eval hook unit tests. * Fix some imcompatible. * Add pre_eval argument for eval hooks. * Update eval hook doc string. * Make pre_eval false in default. * Add unit tests for dataset format_results. * Fix some comments and bc-breaking bug. * Fix pre_eval set cfg field. * Remove redundant codes. Co-authored-by: Jiarui XU <xvjiarui0826@gmail.com>
205 lines
7.1 KiB
Python
205 lines
7.1 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import logging
|
|
import tempfile
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
import mmcv.runner
|
|
import pytest
|
|
import torch
|
|
import torch.nn as nn
|
|
from mmcv.runner import obj_from_dict
|
|
from torch.utils.data import DataLoader, Dataset
|
|
|
|
from mmseg.apis import single_gpu_test
|
|
from mmseg.core import DistEvalHook, EvalHook
|
|
|
|
|
|
class ExampleDataset(Dataset):
|
|
|
|
def __getitem__(self, idx):
|
|
results = dict(img=torch.tensor([1]), img_metas=dict())
|
|
return results
|
|
|
|
def __len__(self):
|
|
return 1
|
|
|
|
|
|
class ExampleModel(nn.Module):
|
|
|
|
def __init__(self):
|
|
super(ExampleModel, self).__init__()
|
|
self.test_cfg = None
|
|
self.conv = nn.Conv2d(3, 3, 3)
|
|
|
|
def forward(self, img, img_metas, test_mode=False, **kwargs):
|
|
return img
|
|
|
|
def train_step(self, data_batch, optimizer):
|
|
loss = self.forward(**data_batch)
|
|
return dict(loss=loss)
|
|
|
|
|
|
def test_iter_eval_hook():
|
|
with pytest.raises(TypeError):
|
|
test_dataset = ExampleModel()
|
|
data_loader = [
|
|
DataLoader(
|
|
test_dataset,
|
|
batch_size=1,
|
|
sampler=None,
|
|
num_worker=0,
|
|
shuffle=False)
|
|
]
|
|
EvalHook(data_loader)
|
|
|
|
test_dataset = ExampleDataset()
|
|
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
|
|
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
|
|
loader = DataLoader(test_dataset, batch_size=1)
|
|
model = ExampleModel()
|
|
data_loader = DataLoader(
|
|
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
|
|
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
|
optimizer = obj_from_dict(optim_cfg, torch.optim,
|
|
dict(params=model.parameters()))
|
|
|
|
# test EvalHook
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
eval_hook = EvalHook(data_loader, by_epoch=False, efficient_test=True)
|
|
runner = mmcv.runner.IterBasedRunner(
|
|
model=model,
|
|
optimizer=optimizer,
|
|
work_dir=tmpdir,
|
|
logger=logging.getLogger())
|
|
runner.register_hook(eval_hook)
|
|
runner.run([loader], [('train', 1)], 1)
|
|
test_dataset.evaluate.assert_called_with([torch.tensor([1])],
|
|
logger=runner.logger)
|
|
|
|
|
|
def test_epoch_eval_hook():
|
|
with pytest.raises(TypeError):
|
|
test_dataset = ExampleModel()
|
|
data_loader = [
|
|
DataLoader(
|
|
test_dataset,
|
|
batch_size=1,
|
|
sampler=None,
|
|
num_worker=0,
|
|
shuffle=False)
|
|
]
|
|
EvalHook(data_loader, by_epoch=True)
|
|
|
|
test_dataset = ExampleDataset()
|
|
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
|
|
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
|
|
loader = DataLoader(test_dataset, batch_size=1)
|
|
model = ExampleModel()
|
|
data_loader = DataLoader(
|
|
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
|
|
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
|
optimizer = obj_from_dict(optim_cfg, torch.optim,
|
|
dict(params=model.parameters()))
|
|
|
|
# test EvalHook with interval
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
eval_hook = EvalHook(data_loader, by_epoch=True, interval=2)
|
|
runner = mmcv.runner.EpochBasedRunner(
|
|
model=model,
|
|
optimizer=optimizer,
|
|
work_dir=tmpdir,
|
|
logger=logging.getLogger())
|
|
runner.register_hook(eval_hook)
|
|
runner.run([loader], [('train', 1)], 2)
|
|
test_dataset.evaluate.assert_called_once_with([torch.tensor([1])],
|
|
logger=runner.logger)
|
|
|
|
|
|
def multi_gpu_test(model,
|
|
data_loader,
|
|
tmpdir=None,
|
|
gpu_collect=False,
|
|
pre_eval=False):
|
|
# Pre eval is set by default when training.
|
|
results = single_gpu_test(model, data_loader, pre_eval=True)
|
|
return results
|
|
|
|
|
|
@patch('mmseg.apis.multi_gpu_test', multi_gpu_test)
|
|
def test_dist_eval_hook():
|
|
with pytest.raises(TypeError):
|
|
test_dataset = ExampleModel()
|
|
data_loader = [
|
|
DataLoader(
|
|
test_dataset,
|
|
batch_size=1,
|
|
sampler=None,
|
|
num_worker=0,
|
|
shuffle=False)
|
|
]
|
|
DistEvalHook(data_loader)
|
|
|
|
test_dataset = ExampleDataset()
|
|
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
|
|
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
|
|
loader = DataLoader(test_dataset, batch_size=1)
|
|
model = ExampleModel()
|
|
data_loader = DataLoader(
|
|
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
|
|
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
|
optimizer = obj_from_dict(optim_cfg, torch.optim,
|
|
dict(params=model.parameters()))
|
|
|
|
# test DistEvalHook
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
eval_hook = DistEvalHook(
|
|
data_loader, by_epoch=False, efficient_test=True)
|
|
runner = mmcv.runner.IterBasedRunner(
|
|
model=model,
|
|
optimizer=optimizer,
|
|
work_dir=tmpdir,
|
|
logger=logging.getLogger())
|
|
runner.register_hook(eval_hook)
|
|
runner.run([loader], [('train', 1)], 1)
|
|
test_dataset.evaluate.assert_called_with([torch.tensor([1])],
|
|
logger=runner.logger)
|
|
|
|
|
|
@patch('mmseg.apis.multi_gpu_test', multi_gpu_test)
|
|
def test_dist_eval_hook_epoch():
|
|
with pytest.raises(TypeError):
|
|
test_dataset = ExampleModel()
|
|
data_loader = [
|
|
DataLoader(
|
|
test_dataset,
|
|
batch_size=1,
|
|
sampler=None,
|
|
num_worker=0,
|
|
shuffle=False)
|
|
]
|
|
DistEvalHook(data_loader)
|
|
|
|
test_dataset = ExampleDataset()
|
|
test_dataset.pre_eval = MagicMock(return_value=[torch.tensor([1])])
|
|
test_dataset.evaluate = MagicMock(return_value=dict(test='success'))
|
|
loader = DataLoader(test_dataset, batch_size=1)
|
|
model = ExampleModel()
|
|
data_loader = DataLoader(
|
|
test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)
|
|
optim_cfg = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
|
optimizer = obj_from_dict(optim_cfg, torch.optim,
|
|
dict(params=model.parameters()))
|
|
|
|
# test DistEvalHook
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
eval_hook = DistEvalHook(data_loader, by_epoch=True, interval=2)
|
|
runner = mmcv.runner.EpochBasedRunner(
|
|
model=model,
|
|
optimizer=optimizer,
|
|
work_dir=tmpdir,
|
|
logger=logging.getLogger())
|
|
runner.register_hook(eval_hook)
|
|
runner.run([loader], [('train', 1)], 2)
|
|
test_dataset.evaluate.assert_called_with([torch.tensor([1])],
|
|
logger=runner.logger)
|