Update lib version in requeriments and fix some bugs (#40)

pull/1/head
Linyiqi 2021-10-11 19:02:26 +08:00 committed by GitHub
parent 7451fb7425
commit ae96fa303c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 63 additions and 889 deletions

View File

@ -1,54 +0,0 @@
_base_ = [
'../../_base_/datasets/query_aware/few_shot_coco.py',
'../../_base_/schedules/schedule.py',
'../attention_rpn_faster_rcnn_r50_c4.py', '../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
# FewShotCocoDefaultDataset predefine ann_cfg for model reproducibility
num_support_ways = 2
num_support_shots = 10
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=50,
dataset=dict(
type='FewShotCocoDefaultDataset',
ann_cfg=[dict(method='Attention_RPN', setting='30SHOT')],
num_novel_shots=30,
classes='NOVEL_CLASSES',
instance_wise=False)),
val=dict(classes='NOVEL_CLASSES'),
test=dict(classes='NOVEL_CLASSES'),
model_init=dict(
ann_cfg=[dict(method='Attention_RPN', setting='30SHOT')],
num_novel_shots=30,
classes='NOVEL_CLASSES',
instance_wise=True))
evaluation = dict(interval=3000)
checkpoint_config = dict(interval=3000)
optimizer = dict(
lr=0.001,
momentum=0.9,
paramwise_cfg=dict(custom_keys={'roi_head.bbox_head': dict(lr_mult=2.0)}))
lr_config = dict(
warmup_iters=200, warmup_ratio=0.1, step=[
2000,
3000,
])
log_config = dict(interval=10)
runner = dict(max_iters=3000)
# load_from = 'path of base training model'
load_from = 'work_dirs/arpn_faster_rcnn_r50_c4_coco_base_training/latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 9
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=10,
num_base_shots=10,
classes='NOVEL_CLASSES_SPLIT1',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT1'),
test=dict(classes='NOVEL_CLASSES_SPLIT1'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split1_10shot')],
num_novel_shots=10,
classes='NOVEL_CLASSES_SPLIT1'))
evaluation = dict(interval=1000, class_splits=['NOVEL_CLASSES_SPLIT1'])
checkpoint_config = dict(interval=1000)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[1000])
log_config = dict(interval=10)
runner = dict(max_iters=1000)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 1
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=1,
classes='NOVEL_CLASSES_SPLIT1',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT1'),
test=dict(classes='NOVEL_CLASSES_SPLIT1'),
model_init=dict(
type='FewShotVOCDefaultDataset',
ann_cfg=[dict(method='TFA', setting='SPLIT1_1SHOT')],
num_novel_shots=1,
classes='NOVEL_CLASSES_SPLIT1'))
evaluation = dict(interval=100, class_splits=['NOVEL_CLASSES_SPLIT1'])
checkpoint_config = dict(interval=100)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[100])
log_config = dict(interval=10)
runner = dict(max_iters=100)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 1
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=2,
num_base_shots=2,
classes='NOVEL_CLASSES_SPLIT1',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT1'),
test=dict(classes='NOVEL_CLASSES_SPLIT1'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split1_2shot')],
num_novel_shots=2,
classes='NOVEL_CLASSES_SPLIT1'))
evaluation = dict(interval=200, class_splits=['NOVEL_CLASSES_SPLIT1'])
checkpoint_config = dict(interval=200)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[200])
log_config = dict(interval=10)
runner = dict(max_iters=200)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 2
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=3,
num_base_shots=3,
classes='NOVEL_CLASSES_SPLIT1',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT1'),
test=dict(classes='NOVEL_CLASSES_SPLIT1'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split1_3shot')],
num_novel_shots=3,
classes='NOVEL_CLASSES_SPLIT1'))
evaluation = dict(interval=300, class_splits=['NOVEL_CLASSES_SPLIT1'])
checkpoint_config = dict(interval=300)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[300])
log_config = dict(interval=10)
runner = dict(max_iters=300)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 4
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=5,
num_base_shots=5,
classes='NOVEL_CLASSES_SPLIT1',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT1'),
test=dict(classes='NOVEL_CLASSES_SPLIT1'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split1_5shot')],
num_novel_shots=5,
classes='NOVEL_CLASSES_SPLIT1'))
evaluation = dict(interval=500, class_splits=['NOVEL_CLASSES_SPLIT1'])
checkpoint_config = dict(interval=500)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[500])
log_config = dict(interval=10)
runner = dict(max_iters=500)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,37 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/base_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 10
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
dataset=dict(classes='BASE_CLASSES_SPLIT1')),
val=dict(classes='BASE_CLASSES_SPLIT1'),
test=dict(classes='BASE_CLASSES_SPLIT1'),
model_init=dict(classes='BASE_CLASSES_SPLIT1'))
optimizer = dict(
lr=0.004,
momentum=0.9,
paramwise_cfg=dict(custom_keys={'roi_head.bbox_head': dict(lr_mult=2.0)}))
lr_config = dict(warmup_iters=500, warmup_ratio=0.1, step=[28000, 30000])
runner = dict(max_iters=30000)
evaluation = dict(interval=30000)
checkpoint_config = dict(interval=10000)
log_config = dict(interval=10)
model = dict(
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 9
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=10,
num_base_shots=10,
classes='NOVEL_CLASSES_SPLIT2',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT2'),
test=dict(classes='NOVEL_CLASSES_SPLIT2'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_10shot')],
num_novel_shots=10,
classes='NOVEL_CLASSES_SPLIT2'))
evaluation = dict(interval=1000, class_splits=['NOVEL_CLASSES_SPLIT2'])
checkpoint_config = dict(interval=1000)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[1000])
log_config = dict(interval=10)
runner = dict(max_iters=1000)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 1
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=1,
num_base_shots=1,
classes='NOVEL_CLASSES_SPLIT2',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT2'),
test=dict(classes='NOVEL_CLASSES_SPLIT2'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_1shot')],
num_novel_shots=1,
classes='NOVEL_CLASSES_SPLIT2'))
evaluation = dict(interval=100, class_splits=['NOVEL_CLASSES_SPLIT2'])
checkpoint_config = dict(interval=100)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[100])
log_config = dict(interval=10)
runner = dict(max_iters=100)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 1
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=2,
num_base_shots=2,
classes='NOVEL_CLASSES_SPLIT2',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT2'),
test=dict(classes='NOVEL_CLASSES_SPLIT2'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_2shot')],
num_novel_shots=2,
classes='NOVEL_CLASSES_SPLIT2'))
evaluation = dict(interval=200, class_splits=['NOVEL_CLASSES_SPLIT2'])
checkpoint_config = dict(interval=200)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[200])
log_config = dict(interval=10)
runner = dict(max_iters=200)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 2
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=3,
num_base_shots=3,
classes='NOVEL_CLASSES_SPLIT2',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT2'),
test=dict(classes='NOVEL_CLASSES_SPLIT2'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_3shot')],
num_novel_shots=3,
classes='NOVEL_CLASSES_SPLIT2'))
evaluation = dict(interval=300, class_splits=['NOVEL_CLASSES_SPLIT2'])
checkpoint_config = dict(interval=300)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[300])
log_config = dict(interval=10)
runner = dict(max_iters=300)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 4
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=5,
num_base_shots=5,
classes='NOVEL_CLASSES_SPLIT2',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT2'),
test=dict(classes='NOVEL_CLASSES_SPLIT2'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_5shot')],
num_novel_shots=5,
classes='NOVEL_CLASSES_SPLIT2'))
evaluation = dict(interval=500, class_splits=['NOVEL_CLASSES_SPLIT2'])
checkpoint_config = dict(interval=500)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[500])
log_config = dict(interval=10)
runner = dict(max_iters=500)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,37 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/base_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 10
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
dataset=dict(classes='BASE_CLASSES_SPLIT2')),
val=dict(classes='BASE_CLASSES_SPLIT2'),
test=dict(classes='BASE_CLASSES_SPLIT2'),
model_init=dict(classes='BASE_CLASSES_SPLIT2'))
optimizer = dict(
lr=0.004,
momentum=0.9,
paramwise_cfg=dict(custom_keys={'roi_head.bbox_head': dict(lr_mult=2.0)}))
lr_config = dict(warmup_iters=500, warmup_ratio=0.1, step=[28000, 30000])
runner = dict(max_iters=30000)
evaluation = dict(interval=30000)
checkpoint_config = dict(interval=10000)
log_config = dict(interval=10)
model = dict(
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 9
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=10,
num_base_shots=10,
classes='NOVEL_CLASSES_SPLIT3',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT3'),
test=dict(classes='NOVEL_CLASSES_SPLIT3'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_10shot')],
num_novel_shots=10,
classes='NOVEL_CLASSES_SPLIT3'))
evaluation = dict(interval=1000, class_splits=['NOVEL_CLASSES_SPLIT3'])
checkpoint_config = dict(interval=1000)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[1000])
log_config = dict(interval=10)
runner = dict(max_iters=1000)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 1
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=1,
num_base_shots=1,
classes='NOVEL_CLASSES_SPLIT3',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT3'),
test=dict(classes='NOVEL_CLASSES_SPLIT3'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_1shot')],
num_novel_shots=1,
classes='NOVEL_CLASSES_SPLIT3'))
evaluation = dict(interval=100, class_splits=['NOVEL_CLASSES_SPLIT3'])
checkpoint_config = dict(interval=100)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[100])
log_config = dict(interval=10)
runner = dict(max_iters=100)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 1
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=2,
num_base_shots=2,
classes='NOVEL_CLASSES_SPLIT3',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT3'),
test=dict(classes='NOVEL_CLASSES_SPLIT3'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_2shot')],
num_novel_shots=2,
classes='NOVEL_CLASSES_SPLIT3'))
evaluation = dict(interval=200, class_splits=['NOVEL_CLASSES_SPLIT3'])
checkpoint_config = dict(interval=200)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[200])
log_config = dict(interval=10)
runner = dict(max_iters=200)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 2
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=3,
num_base_shots=3,
classes='NOVEL_CLASSES_SPLIT3',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT3'),
test=dict(classes='NOVEL_CLASSES_SPLIT3'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_3shot')],
num_novel_shots=3,
classes='NOVEL_CLASSES_SPLIT3'))
evaluation = dict(interval=300, class_splits=['NOVEL_CLASSES_SPLIT3'])
checkpoint_config = dict(interval=300)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[300])
log_config = dict(interval=10)
runner = dict(max_iters=300)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,46 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/few_shot_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 4
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
repeat_times=500,
dataset=dict(
num_novel_shots=5,
num_base_shots=5,
classes='NOVEL_CLASSES_SPLIT3',
)),
val=dict(classes='NOVEL_CLASSES_SPLIT3'),
test=dict(classes='NOVEL_CLASSES_SPLIT3'),
model_init=dict(
ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_5shot')],
num_novel_shots=5,
classes='NOVEL_CLASSES_SPLIT3'))
evaluation = dict(interval=500, class_splits=['NOVEL_CLASSES_SPLIT3'])
checkpoint_config = dict(interval=500)
optimizer = dict(lr=0.001, momentum=0.9)
lr_config = dict(warmup=None, step=[500])
log_config = dict(interval=10)
runner = dict(max_iters=500)
# load_from = 'path of base training model'
load_from = 'work_dirs/' \
'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \
'latest.pth'
model = dict(
frozen_parameters=['backbone'],
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -1,37 +0,0 @@
_base_ = [
'../../../_base_/datasets/query_aware/base_voc.py',
'../../../_base_/schedules/schedule.py',
'../../attention_rpn_faster_rcnn_r50_c4.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
num_support_ways = 2
num_support_shots = 10
data = dict(
train=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
dataset=dict(classes='BASE_CLASSES_SPLIT3')),
val=dict(classes='BASE_CLASSES_SPLIT3'),
test=dict(classes='BASE_CLASSES_SPLIT3'),
model_init=dict(classes='BASE_CLASSES_SPLIT3'))
optimizer = dict(
lr=0.004,
momentum=0.9,
paramwise_cfg=dict(custom_keys={'roi_head.bbox_head': dict(lr_mult=2.0)}))
lr_config = dict(warmup_iters=500, warmup_ratio=0.1, step=[28000, 30000])
runner = dict(max_iters=30000)
evaluation = dict(interval=30000)
checkpoint_config = dict(interval=10000)
log_config = dict(interval=10)
model = dict(
rpn_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
roi_head=dict(
num_support_ways=num_support_ways,
num_support_shots=num_support_shots,
),
)

View File

@ -48,7 +48,7 @@ mmcls_version = digit_version(mmcls.__version__)
assert (mmcls_version >= digit_version(mmcls_minimum_version)
and mmcls_version <= digit_version(mmcls_maximum_version)), \
f'MMDET=={mmcls.__version__} is used but incompatible. ' \
f'MMCLS=={mmcls.__version__} is used but incompatible. ' \
f'Please install mmcls>={mmcls_minimum_version},\
<={mmcls_maximum_version}.'

View File

@ -559,4 +559,6 @@ class TwoBranchDataset(object):
for g in groups:
if len(g) < 50:
reweight_idx_map += g * (int(repeat_length / len(g)) + 1)
else:
reweight_idx_map += g
return reweight_idx_map

View File

@ -1,5 +1,5 @@
mmcls==0.15.0
mmcv==1.3.12
mmdet==2.16.0
mmcls>=0.15.0
mmcv>=1.3.12
mmdet>=2.16.0
torch==1.7.0
torchvision

View File

@ -1,7 +1,7 @@
matplotlib
mmcls==0.15.0
mmcv==1.3.12
mmdet==2.16.0
mmcls>=0.15.0
mmcv>=1.3.12
mmdet>=2.16.0
numpy
pycocotools; platform_system == "Linux"
pycocotools-windows; platform_system == "Windows"

View File

@ -5,9 +5,9 @@ interrogate
isort==4.3.21
# Note: used for kwarray.group_items, this may be ported to mmcv in the future.
kwarray
mmcls==0.15.0
mmcv==1.3.12
mmdet==2.16.0
mmcls>=0.15.0
mmcv>=1.3.12
mmdet>=2.16.0
pytest
ubelt
xdoctest>=0.10.0

View File

@ -3,7 +3,7 @@ line_length = 79
multi_line_output = 0
known_standard_library = setuptools
known_first_party = mmfewshot
known_third_party = cv2,mmcls,mmcv,mmdet,numpy,terminaltables,torch
known_third_party = mmcls,mmcv,mmdet,numpy,terminaltables,torch
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY

View File

@ -1,6 +1,6 @@
import torch
from mmfewshot.apis.train import set_random_seed
from mmfewshot.detection.apis.train import set_random_seed
from mmfewshot.detection.datasets.builder import (build_dataloader,
build_dataset)
@ -9,7 +9,7 @@ def test_dataloader():
set_random_seed(2021)
# test regular and few shot annotations
dataconfigs = [{
data_configs = [{
'type': 'NwayKshotDataset',
'support_way': 5,
'support_shot': 1,
@ -106,9 +106,9 @@ def test_dataloader():
}
}]
for dataconfig in dataconfigs:
for data_config in data_configs:
nway_kshot_dataset = build_dataset(cfg=dataconfig)
nway_kshot_dataset = build_dataset(cfg=data_config)
nway_kshot_dataloader = build_dataloader(
nway_kshot_dataset,
samples_per_gpu=2,
@ -124,11 +124,11 @@ def test_dataloader():
len(nway_kshot_dataloader.support_data_loader)
support_labels = data_batch['support_data']['gt_labels'].data[0]
assert len(set(torch.cat(
support_labels).tolist())) == dataconfig['support_way']
support_labels).tolist())) == data_config['support_way']
assert len(torch.cat(support_labels).tolist()) == \
dataconfig['support_way'] * dataconfig['support_shot']
data_config['support_way'] * data_config['support_shot']
dataconfigs = [{
data_configs = [{
'type': 'QueryAwareDataset',
'support_way': 3,
'support_shot': 5,
@ -225,8 +225,8 @@ def test_dataloader():
}
}]
for dataconfig in dataconfigs:
query_aware_dataset = build_dataset(cfg=dataconfig)
for data_config in data_configs:
query_aware_dataset = build_dataset(cfg=data_config)
query_aware_dataloader = build_dataloader(
query_aware_dataset,
samples_per_gpu=2,
@ -242,6 +242,6 @@ def test_dataloader():
support_labels = data_batch['support_data']['gt_labels'].data[0]
half_batch = len(support_labels) // 2
assert len(set(torch.cat(support_labels[:half_batch]).tolist())) \
== dataconfig['support_way']
== data_config['support_way']
assert len(set(torch.cat(support_labels[half_batch:]).tolist())) \
== dataconfig['support_way']
== data_config['support_way']

View File

@ -1,19 +1,23 @@
"""Visualized instances of saved dataset.
Example:
python3 -m tools.models.visualize_dataset \
--src1 ./work_dirs/xx_saved_data.json
python tools/detection/misc/visualize_saved_dataset.py \
--src ./work_dirs/xx_saved_data.json
--dir ./vis_images
"""
import argparse
import json
import os
import cv2
import mmcv
import numpy as np
from terminaltables import AsciiTable
try:
import cv2
except ():
raise ImportError('please install cv2 mutually')
class Visualizer(object):
"""Visualize instances of saved dataset.

View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
CONFIG=$1
GPUS=$2
TIMES=$3
PORT=${PORT:-29500}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH
python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
$(dirname "$0")/multi_test.py $CONFIG $TIMES --launcher pytorch ${@:4}

View File

@ -0,0 +1,13 @@
#!/usr/bin/env bash
CONFIG=$1
GPUS=$2
TIMES=$3
SEED=${SEED:-2021}
START=${START:-0}
PORT=${PORT:-29500}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH
python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
$(dirname "$0")/multi_train.py $CONFIG $TIMES --seed $SEED --start $START \
--launcher pytorch ${@:4}

View File

@ -141,15 +141,14 @@ def main():
rank, _ = get_dist_info()
eval_result_list = []
# build the dataloader
dataset = build_dataset(base_cfg.data.test, task_type=base_cfg.task_type)
dataset = build_dataset(base_cfg.data.test)
# currently only support single images testing
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=base_cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
round_up=False)
shuffle=False)
for i in range(args.start, args.times):
cfg = copy.deepcopy(base_cfg)
@ -253,7 +252,7 @@ def main():
else:
avg_results = dict()
for k in eval_result_list[0].keys():
avg_results[f'avg_{k}'] = sum([
avg_results[f'Avg {k}'] = sum([
eval_result_list[i][k] for i in range(num_results)
]) / num_results
mmcv.dump(avg_results,

View File

@ -159,8 +159,8 @@ def main():
f'It will cause UNFAIR data usage. Therefore, seed is set '
f'to {seed} for default.')
np.random.seed(seed)
all_seeds = np.random.randint(0, 1000000, args.times)
np.random.seed(int(seed))
all_seeds = np.random.randint(0, 1000000, args.times).tolist()
print(f'using seeds for {args.times} times training: ', all_seeds)
# train with saved dataset
@ -190,7 +190,7 @@ def main():
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
@ -270,7 +270,8 @@ def main():
'mode', 'epoch', 'iter', 'lr', 'memory', 'time',
'data_time'
]:
eval_result.pop(k)
if k in eval_result.keys():
eval_result.pop(k)
logger.info(' '.join(
[f'experiment {i} last eval result:'] +
[f'{k}: {eval_result[k]}' for k in eval_result.keys()]))
@ -284,7 +285,7 @@ def main():
[eval_result_list[i][k] for i in range(num_result)]) / num_result
logger.info(f'{num_result} times avg eval result:')
logger.info(' '.join([f'average {num_result} eval results:'] + [
f'avg_{k}: {eval_result_list[0][k]}'
f'Avg {k}: {eval_result_list[0][k]}'
for k in eval_result_list[0].keys()
]))