42 lines
1.5 KiB
Python
42 lines
1.5 KiB
Python
|
# dataset settings
|
||
|
dataset_type = 'VOC'
|
||
|
img_norm_cfg = dict(
|
||
|
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||
|
train_pipeline = [
|
||
|
dict(type='LoadImageFromFile'),
|
||
|
dict(type='RandomResizedCrop', size=224),
|
||
|
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
|
||
|
dict(type='Normalize', **img_norm_cfg),
|
||
|
dict(type='ImageToTensor', keys=['img']),
|
||
|
dict(type='ToTensor', keys=['gt_label']),
|
||
|
dict(type='Collect', keys=['img', 'gt_label'])
|
||
|
]
|
||
|
test_pipeline = [
|
||
|
dict(type='LoadImageFromFile'),
|
||
|
dict(type='Resize', size=(256, -1)),
|
||
|
dict(type='CenterCrop', crop_size=224),
|
||
|
dict(type='Normalize', **img_norm_cfg),
|
||
|
dict(type='ImageToTensor', keys=['img']),
|
||
|
dict(type='Collect', keys=['img'])
|
||
|
]
|
||
|
data = dict(
|
||
|
samples_per_gpu=16,
|
||
|
workers_per_gpu=2,
|
||
|
train=dict(
|
||
|
type=dataset_type,
|
||
|
data_prefix='data/VOCdevkit/VOC2007/',
|
||
|
ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt',
|
||
|
pipeline=train_pipeline),
|
||
|
val=dict(
|
||
|
type=dataset_type,
|
||
|
data_prefix='data/VOCdevkit/VOC2007/',
|
||
|
ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
|
||
|
pipeline=test_pipeline),
|
||
|
test=dict(
|
||
|
type=dataset_type,
|
||
|
data_prefix='data/VOCdevkit/VOC2007/',
|
||
|
ann_file='data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
|
||
|
pipeline=test_pipeline))
|
||
|
evaluation = dict(
|
||
|
interval=1, metric=['mAP', 'CP', 'OP', 'CR', 'OR', 'CF1', 'OF1'])
|