Add test tutorial (#9)

* add test tutorial

* remote torch/torchvision from requirements

* update getting started

* rename drop_out_ratio -> dropout_ratio
This commit is contained in:
Jerry Jiarui XU 2020-07-12 23:53:56 +08:00 committed by GitHub
parent b72a6d00ef
commit b975d3b72a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 81 additions and 70 deletions

View File

@ -22,7 +22,7 @@ model = dict(
project_channels=256, project_channels=256,
query_scales=(1, ), query_scales=(1, ),
key_pool_scales=(1, 3, 6, 8), key_pool_scales=(1, 3, 6, 8),
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -35,7 +35,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -20,7 +20,7 @@ model = dict(
in_index=3, in_index=3,
channels=512, channels=512,
recurrence=2, recurrence=2,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -33,7 +33,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -20,7 +20,7 @@ model = dict(
in_index=3, in_index=3,
channels=512, channels=512,
pam_channels=64, pam_channels=64,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -33,7 +33,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -20,7 +20,7 @@ model = dict(
in_index=3, in_index=3,
channels=512, channels=512,
dilations=(1, 12, 24, 36), dilations=(1, 12, 24, 36),
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -33,7 +33,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -22,7 +22,7 @@ model = dict(
dilations=(1, 12, 24, 36), dilations=(1, 12, 24, 36),
c1_in_channels=256, c1_in_channels=256,
c1_channels=48, c1_channels=48,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -35,7 +35,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -22,7 +22,7 @@ model = dict(
num_codes=32, num_codes=32,
use_se_loss=True, use_se_loss=True,
add_lateral=False, add_lateral=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -37,7 +37,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -41,7 +41,7 @@ model = dict(
kernel_size=1, kernel_size=1,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -21,7 +21,7 @@ model = dict(
channels=512, channels=512,
num_convs=2, num_convs=2,
concat_input=True, concat_input=True,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -34,7 +34,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -22,7 +22,7 @@ model = dict(
ratio=1 / 4., ratio=1 / 4.,
pooling_type='att', pooling_type='att',
fusion_types=('channel_add', ), fusion_types=('channel_add', ),
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -35,7 +35,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -19,7 +19,7 @@ model = dict(
in_channels=2048, in_channels=2048,
in_index=3, in_index=3,
channels=512, channels=512,
drop_out_ratio=0.1, dropout_ratio=0.1,
reduction=2, reduction=2,
use_scale=True, use_scale=True,
mode='embedded_gaussian', mode='embedded_gaussian',
@ -35,7 +35,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -43,7 +43,7 @@ model = dict(
kernel_size=1, kernel_size=1,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -56,7 +56,7 @@ model = dict(
input_transform='resize_concat', input_transform='resize_concat',
channels=512, channels=512,
ocr_channels=256, ocr_channels=256,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -25,7 +25,7 @@ model = dict(
shrink_factor=2, shrink_factor=2,
normalization_factor=1.0, normalization_factor=1.0,
psa_softmax=True, psa_softmax=True,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -38,7 +38,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -20,7 +20,7 @@ model = dict(
in_index=3, in_index=3,
channels=512, channels=512,
pool_scales=(1, 2, 3, 6), pool_scales=(1, 2, 3, 6),
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -33,7 +33,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -20,7 +20,7 @@ model = dict(
in_index=[0, 1, 2, 3], in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6), pool_scales=(1, 2, 3, 6),
channels=512, channels=512,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -33,7 +33,7 @@ model = dict(
channels=256, channels=256,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=0.1, dropout_ratio=0.1,
num_classes=19, num_classes=19,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -13,7 +13,7 @@ model = dict(decode_head=[
kernel_size=1, kernel_size=1,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=150, num_classes=150,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -26,7 +26,7 @@ model = dict(decode_head=[
input_transform='resize_concat', input_transform='resize_concat',
channels=512, channels=512,
ocr_channels=256, ocr_channels=256,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=150, num_classes=150,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -14,7 +14,7 @@ model = dict(decode_head=[
kernel_size=1, kernel_size=1,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=21, num_classes=21,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -27,7 +27,7 @@ model = dict(decode_head=[
input_transform='resize_concat', input_transform='resize_concat',
channels=512, channels=512,
ocr_channels=256, ocr_channels=256,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=21, num_classes=21,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -14,7 +14,7 @@ model = dict(decode_head=[
kernel_size=1, kernel_size=1,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=21, num_classes=21,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -27,7 +27,7 @@ model = dict(decode_head=[
input_transform='resize_concat', input_transform='resize_concat',
channels=512, channels=512,
ocr_channels=256, ocr_channels=256,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=21, num_classes=21,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -13,7 +13,7 @@ model = dict(decode_head=[
kernel_size=1, kernel_size=1,
num_convs=1, num_convs=1,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=150, num_classes=150,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,
@ -26,7 +26,7 @@ model = dict(decode_head=[
input_transform='resize_concat', input_transform='resize_concat',
channels=512, channels=512,
ocr_channels=256, ocr_channels=256,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=150, num_classes=150,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
align_corners=False, align_corners=False,

View File

@ -18,7 +18,7 @@ model = dict(
num_convs=1, num_convs=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=19, num_classes=19,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(
@ -31,7 +31,7 @@ model = dict(
input_transform='resize_concat', input_transform='resize_concat',
in_index=(0, 1, 2, 3), in_index=(0, 1, 2, 3),
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=19, num_classes=19,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(

View File

@ -18,7 +18,7 @@ model = dict(
num_convs=1, num_convs=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=19, num_classes=19,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(
@ -31,7 +31,7 @@ model = dict(
input_transform='resize_concat', input_transform='resize_concat',
in_index=(0, 1, 2, 3), in_index=(0, 1, 2, 3),
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=19, num_classes=19,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(

View File

@ -18,7 +18,7 @@ model = dict(
num_convs=1, num_convs=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=19, num_classes=19,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(
@ -31,7 +31,7 @@ model = dict(
input_transform='resize_concat', input_transform='resize_concat',
in_index=(0, 1, 2, 3), in_index=(0, 1, 2, 3),
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=19, num_classes=19,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(

View File

@ -18,7 +18,7 @@ model = dict(
num_convs=1, num_convs=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=150, num_classes=150,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(
@ -31,7 +31,7 @@ model = dict(
input_transform='resize_concat', input_transform='resize_concat',
in_index=(0, 1, 2, 3), in_index=(0, 1, 2, 3),
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=150, num_classes=150,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(

View File

@ -18,7 +18,7 @@ model = dict(
num_convs=1, num_convs=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=21, num_classes=21,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(
@ -31,7 +31,7 @@ model = dict(
input_transform='resize_concat', input_transform='resize_concat',
in_index=(0, 1, 2, 3), in_index=(0, 1, 2, 3),
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=21, num_classes=21,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(

View File

@ -18,7 +18,7 @@ model = dict(
num_convs=1, num_convs=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=21, num_classes=21,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(
@ -31,7 +31,7 @@ model = dict(
input_transform='resize_concat', input_transform='resize_concat',
in_index=(0, 1, 2, 3), in_index=(0, 1, 2, 3),
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=21, num_classes=21,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(

View File

@ -18,7 +18,7 @@ model = dict(
num_convs=1, num_convs=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
concat_input=False, concat_input=False,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=150, num_classes=150,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(
@ -31,7 +31,7 @@ model = dict(
input_transform='resize_concat', input_transform='resize_concat',
in_index=(0, 1, 2, 3), in_index=(0, 1, 2, 3),
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
drop_out_ratio=-1, dropout_ratio=-1,
num_classes=150, num_classes=150,
align_corners=False, align_corners=False,
loss_decode=dict( loss_decode=dict(

View File

@ -224,8 +224,8 @@ log_config = dict( # config to register logger hook
dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set. dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set.
log_level = 'INFO' # The level of logging. log_level = 'INFO' # The level of logging.
load_from = None # load models as a pre-trained model from a given path. This will not resume training. load_from = None # load models as a pre-trained model from a given path. This will not resume training.
resume_from = None # Resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved. resume_from = None # Resume checkpoints from a given path, the training will be resumed from the iteration when the checkpoint's is saved.
workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. The workflow trains the model by 12 epochs according to the total_epochs. workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. The workflow trains the model by 40000 iterations according to the total_iters.
cudnn_benchmark = True # Whether use cudnn_benchmark to speed up, which is fast for fixed input size. cudnn_benchmark = True # Whether use cudnn_benchmark to speed up, which is fast for fixed input size.
optimizer = dict( # Config used to build optimizer, support all the optimizers in PyTorch whose arguments are also the same as those in PyTorch optimizer = dict( # Config used to build optimizer, support all the optimizers in PyTorch whose arguments are also the same as those in PyTorch
type='SGD', # Type of optimizers, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/optimizer/default_constructor.py#L13 for more details type='SGD', # Type of optimizers, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/optimizer/default_constructor.py#L13 for more details

View File

@ -125,23 +125,34 @@ Assume that you have already downloaded the checkpoints to the directory `checkp
--eval mAP --eval mAP
``` ```
4. Test PSPNet with 8 GPUs, and evaluate the standard mIoU and cityscapes metric. 4. Test PSPNet with 4 GPUs, and evaluate the standard mIoU and cityscapes metric.
```shell ```shell
./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ ./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \
checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \
8 --out results.pkl --eval mIoU cityscapes 4 --out results.pkl --eval mIoU cityscapes
``` ```
5. Test PSPNet on cityscapes test split with 8 GPUs, and generate the png files to be submit to the official evaluation server. 5. Test PSPNet on cityscapes test split with 4 GPUs, and generate the png files to be submit to the official evaluation server.
First, add following to config file `configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py`,
```python
data = dict(
test=dict(
img_dir='leftImg8bit/test',
ann_dir='gtFine/test'))
```
Then run test.
```shell ```shell
./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \ ./tools/dist_test.sh configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py \
checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \ checkpoints/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth \
8 --format-only --options "imgfile_prefix=./pspnet_test_results" 4 --format-only --options "imgfile_prefix=./pspnet_test_results"
``` ```
You will get png files under `./pspnet_test_results` directory. You will get png files under `./pspnet_test_results` directory.
You may run `zip -r results.zip pspnet_test_results/` and submit the zip file to [evaluation server](https://www.cityscapes-dataset.com/submit/).
### Image demo ### Image demo
@ -205,8 +216,10 @@ By default we evaluate the model on the validation set after some iterations, yo
evaluation = dict(interval=4000) # This evaluate the model per 4000 iterations. evaluation = dict(interval=4000) # This evaluate the model per 4000 iterations.
``` ```
**\*Important\***: The default learning rate in config files is for 8 GPUs and 1 img/gpu (batch size = 8x1 = 8). **\*Important\***: The default learning rate in config files is for 4 GPUs and 2 img/gpu (batch size = 4x2 = 8).
Equivalently, you may also use 4 GPUs and 2 imgs/gpu since all models using cross-GPU SyncBN. Equivalently, you may also use 8 GPUs and 1 imgs/gpu since all models using cross-GPU SyncBN.
To trade speed with GPU memory, you may pass in `--options model.backbone.with_cp=True` to enable checkpoint in backbone.
### Train with a single GPU ### Train with a single GPU

View File

@ -61,6 +61,7 @@ class CityscapesDataset(CustomDataset):
list[str: str]: result txt files which contains corresponding list[str: str]: result txt files which contains corresponding
semantic segmentation images. semantic segmentation images.
""" """
mmcv.mkdir_or_exist(imgfile_prefix)
result_files = [] result_files = []
prog_bar = mmcv.ProgressBar(len(self)) prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)): for idx in range(len(self)):
@ -135,9 +136,9 @@ class CityscapesDataset(CustomDataset):
the prefix of filename, e.g., "a/b/prefix". the prefix of filename, e.g., "a/b/prefix".
If results are evaluated with cityscapes protocol, it would be If results are evaluated with cityscapes protocol, it would be
the prefix of output png files. The output files would be the prefix of output png files. The output files would be
png images under folder "a/b/prefix/xxx/", where "xxx" is the png images under folder "a/b/prefix/xxx.png", where "xxx" is
video name of cityscapes. If not specified, a temp file will the image name of cityscapes. If not specified, a temp file
be created. will be created for evaluation.
Default: None. Default: None.
Returns: Returns:

View File

@ -17,7 +17,7 @@ class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
in_channels (int|Sequence[int]): Input channels. in_channels (int|Sequence[int]): Input channels.
channels (int): Channels after modules, before conv_seg. channels (int): Channels after modules, before conv_seg.
num_classes (int): Number of classes. num_classes (int): Number of classes.
drop_out_ratio (float): Ratio of dropout layer. Default: 0.1. dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
conv_cfg (dict|None): Config of conv layers. Default: None. conv_cfg (dict|None): Config of conv layers. Default: None.
norm_cfg (dict|None): Config of norm layers. Default: None. norm_cfg (dict|None): Config of norm layers. Default: None.
act_cfg (dict): Config of activation layers. act_cfg (dict): Config of activation layers.
@ -46,7 +46,7 @@ class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
channels, channels,
*, *,
num_classes, num_classes,
drop_out_ratio=0.1, dropout_ratio=0.1,
conv_cfg=None, conv_cfg=None,
norm_cfg=None, norm_cfg=None,
act_cfg=dict(type='ReLU'), act_cfg=dict(type='ReLU'),
@ -63,7 +63,7 @@ class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
self._init_inputs(in_channels, in_index, input_transform) self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels self.channels = channels
self.num_classes = num_classes self.num_classes = num_classes
self.drop_out_ratio = drop_out_ratio self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg self.norm_cfg = norm_cfg
self.act_cfg = act_cfg self.act_cfg = act_cfg
@ -77,8 +77,8 @@ class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
self.sampler = None self.sampler = None
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1) self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
if drop_out_ratio > 0: if dropout_ratio > 0:
self.dropout = nn.Dropout2d(drop_out_ratio) self.dropout = nn.Dropout2d(dropout_ratio)
else: else:
self.dropout = None self.dropout = None

View File

@ -1,3 +1,2 @@
# These must be installed before building mmsegmentation # These must be installed before building mmsegmentation
numpy numpy
# torch

View File

@ -1,4 +1,2 @@
matplotlib matplotlib
numpy numpy
# torch
# torchvision

View File

@ -71,7 +71,7 @@ def test_decode_head():
assert hasattr(head, 'dropout') and head.dropout.p == 0.1 assert hasattr(head, 'dropout') and head.dropout.p == 0.1
# test set dropout # test set dropout
head = BaseDecodeHead(32, 16, num_classes=19, drop_out_ratio=0.2) head = BaseDecodeHead(32, 16, num_classes=19, dropout_ratio=0.2)
assert hasattr(head, 'dropout') and head.dropout.p == 0.2 assert hasattr(head, 'dropout') and head.dropout.p == 0.2
# test no input_transform # test no input_transform

View File

@ -4,7 +4,6 @@ CONFIG=$1
CHECKPOINT=$2 CHECKPOINT=$2
GPUS=$3 GPUS=$3
PORT=${PORT:-29500} PORT=${PORT:-29500}
$CONFIG\/$GPUS/
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
$(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4}

View File

@ -31,6 +31,7 @@ def main():
raise ValueError('invalid input shape') raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config) cfg = Config.fromfile(args.config)
cfg.model.pretrained = None
model = build_segmentor( model = build_segmentor(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda() cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda()
model.eval() model.eval()