From ae96fa303ca5ebe44553bba5fa08e4b0897bf521 Mon Sep 17 00:00:00 2001 From: Linyiqi Date: Mon, 11 Oct 2021 19:02:26 +0800 Subject: [PATCH] Update lib version in requeriments and fix some bugs (#40) --- ...n_rpn_faster_rcnn_r50_c4_coco_30shot_ft.py | 54 ------------------- ...ster_rcnn_r101_fpn_voc_split1_10shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split1_1shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split1_2shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split1_3shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split1_5shot_ft.py | 46 ---------------- ..._rcnn_r101_fpn_voc_split1_base_training.py | 37 ------------- ...ster_rcnn_r101_fpn_voc_split2_10shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split2_1shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split2_2shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split2_3shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split2_5shot_ft.py | 46 ---------------- ..._rcnn_r101_fpn_voc_split2_base_training.py | 37 ------------- ...ster_rcnn_r101_fpn_voc_split3_10shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split3_1shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split3_2shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split3_3shot_ft.py | 46 ---------------- ...aster_rcnn_r101_fpn_voc_split3_5shot_ft.py | 46 ---------------- ..._rcnn_r101_fpn_voc_split3_base_training.py | 37 ------------- mmfewshot/__init__.py | 2 +- .../detection/datasets/dataset_wrappers.py | 2 + requirements/readthedocs.txt | 6 +-- requirements/runtime.txt | 6 +-- requirements/tests.txt | 6 +-- setup.cfg | 2 +- .../test_dataloader/test_dataloader.py | 22 ++++---- .../detection/misc/visualize_saved_dataset.py | 10 ++-- tools/detection/multi_dist_test.sh | 10 ++++ tools/detection/multi_dist_train.sh | 13 +++++ tools/detection/multi_test.py | 7 ++- tools/detection/multi_train.py | 11 ++-- 31 files changed, 63 insertions(+), 889 deletions(-) delete mode 100644 configs/detection/attention_rpn/coco/attention_rpn_faster_rcnn_r50_c4_coco_30shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_10shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_1shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_2shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_3shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_5shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training.py delete mode 100644 configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_10shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_1shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_2shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_3shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_5shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training.py delete mode 100644 configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_10shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_1shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_2shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_3shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_5shot_ft.py delete mode 100644 configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training.py create mode 100644 tools/detection/multi_dist_test.sh create mode 100644 tools/detection/multi_dist_train.sh diff --git a/configs/detection/attention_rpn/coco/attention_rpn_faster_rcnn_r50_c4_coco_30shot_ft.py b/configs/detection/attention_rpn/coco/attention_rpn_faster_rcnn_r50_c4_coco_30shot_ft.py deleted file mode 100644 index d08132c..0000000 --- a/configs/detection/attention_rpn/coco/attention_rpn_faster_rcnn_r50_c4_coco_30shot_ft.py +++ /dev/null @@ -1,54 +0,0 @@ -_base_ = [ - '../../_base_/datasets/query_aware/few_shot_coco.py', - '../../_base_/schedules/schedule.py', - '../attention_rpn_faster_rcnn_r50_c4.py', '../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -# FewShotCocoDefaultDataset predefine ann_cfg for model reproducibility -num_support_ways = 2 -num_support_shots = 10 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=50, - dataset=dict( - type='FewShotCocoDefaultDataset', - ann_cfg=[dict(method='Attention_RPN', setting='30SHOT')], - num_novel_shots=30, - classes='NOVEL_CLASSES', - instance_wise=False)), - val=dict(classes='NOVEL_CLASSES'), - test=dict(classes='NOVEL_CLASSES'), - model_init=dict( - ann_cfg=[dict(method='Attention_RPN', setting='30SHOT')], - num_novel_shots=30, - classes='NOVEL_CLASSES', - instance_wise=True)) -evaluation = dict(interval=3000) -checkpoint_config = dict(interval=3000) -optimizer = dict( - lr=0.001, - momentum=0.9, - paramwise_cfg=dict(custom_keys={'roi_head.bbox_head': dict(lr_mult=2.0)})) -lr_config = dict( - warmup_iters=200, warmup_ratio=0.1, step=[ - 2000, - 3000, - ]) -log_config = dict(interval=10) -runner = dict(max_iters=3000) -# load_from = 'path of base training model' -load_from = 'work_dirs/arpn_faster_rcnn_r50_c4_coco_base_training/latest.pth' - -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_10shot_ft.py b/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_10shot_ft.py deleted file mode 100644 index eb89ef4..0000000 --- a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_10shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 9 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=10, - num_base_shots=10, - classes='NOVEL_CLASSES_SPLIT1', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT1'), - test=dict(classes='NOVEL_CLASSES_SPLIT1'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split1_10shot')], - num_novel_shots=10, - classes='NOVEL_CLASSES_SPLIT1')) -evaluation = dict(interval=1000, class_splits=['NOVEL_CLASSES_SPLIT1']) -checkpoint_config = dict(interval=1000) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[1000]) -log_config = dict(interval=10) -runner = dict(max_iters=1000) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_1shot_ft.py b/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_1shot_ft.py deleted file mode 100644 index ae40d6f..0000000 --- a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_1shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 1 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=1, - classes='NOVEL_CLASSES_SPLIT1', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT1'), - test=dict(classes='NOVEL_CLASSES_SPLIT1'), - model_init=dict( - type='FewShotVOCDefaultDataset', - ann_cfg=[dict(method='TFA', setting='SPLIT1_1SHOT')], - num_novel_shots=1, - classes='NOVEL_CLASSES_SPLIT1')) -evaluation = dict(interval=100, class_splits=['NOVEL_CLASSES_SPLIT1']) -checkpoint_config = dict(interval=100) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[100]) -log_config = dict(interval=10) -runner = dict(max_iters=100) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_2shot_ft.py b/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_2shot_ft.py deleted file mode 100644 index 56adbe4..0000000 --- a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_2shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 1 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=2, - num_base_shots=2, - classes='NOVEL_CLASSES_SPLIT1', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT1'), - test=dict(classes='NOVEL_CLASSES_SPLIT1'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split1_2shot')], - num_novel_shots=2, - classes='NOVEL_CLASSES_SPLIT1')) -evaluation = dict(interval=200, class_splits=['NOVEL_CLASSES_SPLIT1']) -checkpoint_config = dict(interval=200) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[200]) -log_config = dict(interval=10) -runner = dict(max_iters=200) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_3shot_ft.py b/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_3shot_ft.py deleted file mode 100644 index cc8417b..0000000 --- a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_3shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 2 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=3, - num_base_shots=3, - classes='NOVEL_CLASSES_SPLIT1', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT1'), - test=dict(classes='NOVEL_CLASSES_SPLIT1'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split1_3shot')], - num_novel_shots=3, - classes='NOVEL_CLASSES_SPLIT1')) -evaluation = dict(interval=300, class_splits=['NOVEL_CLASSES_SPLIT1']) -checkpoint_config = dict(interval=300) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[300]) -log_config = dict(interval=10) -runner = dict(max_iters=300) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_5shot_ft.py b/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_5shot_ft.py deleted file mode 100644 index 77ca828..0000000 --- a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_5shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 4 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=5, - num_base_shots=5, - classes='NOVEL_CLASSES_SPLIT1', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT1'), - test=dict(classes='NOVEL_CLASSES_SPLIT1'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split1_5shot')], - num_novel_shots=5, - classes='NOVEL_CLASSES_SPLIT1')) -evaluation = dict(interval=500, class_splits=['NOVEL_CLASSES_SPLIT1']) -checkpoint_config = dict(interval=500) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[500]) -log_config = dict(interval=10) -runner = dict(max_iters=500) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training.py b/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training.py deleted file mode 100644 index 2e7201d..0000000 --- a/configs/detection/attention_rpn/voc/split1/attention_rpn_faster_rcnn_r101_fpn_voc_split1_base_training.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/base_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 10 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - dataset=dict(classes='BASE_CLASSES_SPLIT1')), - val=dict(classes='BASE_CLASSES_SPLIT1'), - test=dict(classes='BASE_CLASSES_SPLIT1'), - model_init=dict(classes='BASE_CLASSES_SPLIT1')) -optimizer = dict( - lr=0.004, - momentum=0.9, - paramwise_cfg=dict(custom_keys={'roi_head.bbox_head': dict(lr_mult=2.0)})) -lr_config = dict(warmup_iters=500, warmup_ratio=0.1, step=[28000, 30000]) -runner = dict(max_iters=30000) -evaluation = dict(interval=30000) -checkpoint_config = dict(interval=10000) -log_config = dict(interval=10) - -model = dict( - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_10shot_ft.py b/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_10shot_ft.py deleted file mode 100644 index 2140895..0000000 --- a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_10shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 9 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=10, - num_base_shots=10, - classes='NOVEL_CLASSES_SPLIT2', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT2'), - test=dict(classes='NOVEL_CLASSES_SPLIT2'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_10shot')], - num_novel_shots=10, - classes='NOVEL_CLASSES_SPLIT2')) -evaluation = dict(interval=1000, class_splits=['NOVEL_CLASSES_SPLIT2']) -checkpoint_config = dict(interval=1000) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[1000]) -log_config = dict(interval=10) -runner = dict(max_iters=1000) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_1shot_ft.py b/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_1shot_ft.py deleted file mode 100644 index 626bb37..0000000 --- a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_1shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 1 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=1, - num_base_shots=1, - classes='NOVEL_CLASSES_SPLIT2', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT2'), - test=dict(classes='NOVEL_CLASSES_SPLIT2'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_1shot')], - num_novel_shots=1, - classes='NOVEL_CLASSES_SPLIT2')) -evaluation = dict(interval=100, class_splits=['NOVEL_CLASSES_SPLIT2']) -checkpoint_config = dict(interval=100) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[100]) -log_config = dict(interval=10) -runner = dict(max_iters=100) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_2shot_ft.py b/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_2shot_ft.py deleted file mode 100644 index 29f17e0..0000000 --- a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_2shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 1 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=2, - num_base_shots=2, - classes='NOVEL_CLASSES_SPLIT2', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT2'), - test=dict(classes='NOVEL_CLASSES_SPLIT2'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_2shot')], - num_novel_shots=2, - classes='NOVEL_CLASSES_SPLIT2')) -evaluation = dict(interval=200, class_splits=['NOVEL_CLASSES_SPLIT2']) -checkpoint_config = dict(interval=200) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[200]) -log_config = dict(interval=10) -runner = dict(max_iters=200) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_3shot_ft.py b/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_3shot_ft.py deleted file mode 100644 index 4731705..0000000 --- a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_3shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 2 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=3, - num_base_shots=3, - classes='NOVEL_CLASSES_SPLIT2', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT2'), - test=dict(classes='NOVEL_CLASSES_SPLIT2'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_3shot')], - num_novel_shots=3, - classes='NOVEL_CLASSES_SPLIT2')) -evaluation = dict(interval=300, class_splits=['NOVEL_CLASSES_SPLIT2']) -checkpoint_config = dict(interval=300) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[300]) -log_config = dict(interval=10) -runner = dict(max_iters=300) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_5shot_ft.py b/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_5shot_ft.py deleted file mode 100644 index e4505b6..0000000 --- a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_5shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 4 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=5, - num_base_shots=5, - classes='NOVEL_CLASSES_SPLIT2', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT2'), - test=dict(classes='NOVEL_CLASSES_SPLIT2'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split2_5shot')], - num_novel_shots=5, - classes='NOVEL_CLASSES_SPLIT2')) -evaluation = dict(interval=500, class_splits=['NOVEL_CLASSES_SPLIT2']) -checkpoint_config = dict(interval=500) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[500]) -log_config = dict(interval=10) -runner = dict(max_iters=500) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training.py b/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training.py deleted file mode 100644 index a99b4d5..0000000 --- a/configs/detection/attention_rpn/voc/split2/attention_rpn_faster_rcnn_r101_fpn_voc_split2_base_training.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/base_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 10 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - dataset=dict(classes='BASE_CLASSES_SPLIT2')), - val=dict(classes='BASE_CLASSES_SPLIT2'), - test=dict(classes='BASE_CLASSES_SPLIT2'), - model_init=dict(classes='BASE_CLASSES_SPLIT2')) -optimizer = dict( - lr=0.004, - momentum=0.9, - paramwise_cfg=dict(custom_keys={'roi_head.bbox_head': dict(lr_mult=2.0)})) -lr_config = dict(warmup_iters=500, warmup_ratio=0.1, step=[28000, 30000]) -runner = dict(max_iters=30000) -evaluation = dict(interval=30000) -checkpoint_config = dict(interval=10000) -log_config = dict(interval=10) - -model = dict( - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_10shot_ft.py b/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_10shot_ft.py deleted file mode 100644 index 20a8675..0000000 --- a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_10shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 9 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=10, - num_base_shots=10, - classes='NOVEL_CLASSES_SPLIT3', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT3'), - test=dict(classes='NOVEL_CLASSES_SPLIT3'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_10shot')], - num_novel_shots=10, - classes='NOVEL_CLASSES_SPLIT3')) -evaluation = dict(interval=1000, class_splits=['NOVEL_CLASSES_SPLIT3']) -checkpoint_config = dict(interval=1000) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[1000]) -log_config = dict(interval=10) -runner = dict(max_iters=1000) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_1shot_ft.py b/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_1shot_ft.py deleted file mode 100644 index 99d3f3d..0000000 --- a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_1shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 1 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=1, - num_base_shots=1, - classes='NOVEL_CLASSES_SPLIT3', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT3'), - test=dict(classes='NOVEL_CLASSES_SPLIT3'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_1shot')], - num_novel_shots=1, - classes='NOVEL_CLASSES_SPLIT3')) -evaluation = dict(interval=100, class_splits=['NOVEL_CLASSES_SPLIT3']) -checkpoint_config = dict(interval=100) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[100]) -log_config = dict(interval=10) -runner = dict(max_iters=100) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_2shot_ft.py b/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_2shot_ft.py deleted file mode 100644 index ac89b42..0000000 --- a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_2shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 1 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=2, - num_base_shots=2, - classes='NOVEL_CLASSES_SPLIT3', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT3'), - test=dict(classes='NOVEL_CLASSES_SPLIT3'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_2shot')], - num_novel_shots=2, - classes='NOVEL_CLASSES_SPLIT3')) -evaluation = dict(interval=200, class_splits=['NOVEL_CLASSES_SPLIT3']) -checkpoint_config = dict(interval=200) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[200]) -log_config = dict(interval=10) -runner = dict(max_iters=200) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_3shot_ft.py b/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_3shot_ft.py deleted file mode 100644 index eac64d7..0000000 --- a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_3shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 2 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=3, - num_base_shots=3, - classes='NOVEL_CLASSES_SPLIT3', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT3'), - test=dict(classes='NOVEL_CLASSES_SPLIT3'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_3shot')], - num_novel_shots=3, - classes='NOVEL_CLASSES_SPLIT3')) -evaluation = dict(interval=300, class_splits=['NOVEL_CLASSES_SPLIT3']) -checkpoint_config = dict(interval=300) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[300]) -log_config = dict(interval=10) -runner = dict(max_iters=300) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_5shot_ft.py b/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_5shot_ft.py deleted file mode 100644 index 0d93bce..0000000 --- a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_5shot_ft.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/few_shot_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 4 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - repeat_times=500, - dataset=dict( - num_novel_shots=5, - num_base_shots=5, - classes='NOVEL_CLASSES_SPLIT3', - )), - val=dict(classes='NOVEL_CLASSES_SPLIT3'), - test=dict(classes='NOVEL_CLASSES_SPLIT3'), - model_init=dict( - ann_cfg=[('DEFAULT_ANN_FILE_VOC_TFA', 'split3_5shot')], - num_novel_shots=5, - classes='NOVEL_CLASSES_SPLIT3')) -evaluation = dict(interval=500, class_splits=['NOVEL_CLASSES_SPLIT3']) -checkpoint_config = dict(interval=500) -optimizer = dict(lr=0.001, momentum=0.9) -lr_config = dict(warmup=None, step=[500]) -log_config = dict(interval=10) -runner = dict(max_iters=500) -# load_from = 'path of base training model' -load_from = 'work_dirs/' \ - 'attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training/' \ - 'latest.pth' -model = dict( - frozen_parameters=['backbone'], - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training.py b/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training.py deleted file mode 100644 index 4006a5f..0000000 --- a/configs/detection/attention_rpn/voc/split3/attention_rpn_faster_rcnn_r101_fpn_voc_split3_base_training.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = [ - '../../../_base_/datasets/query_aware/base_voc.py', - '../../../_base_/schedules/schedule.py', - '../../attention_rpn_faster_rcnn_r50_c4.py', - '../../../_base_/default_runtime.py' -] -# classes splits are predefined in FewShotVOCDataset -num_support_ways = 2 -num_support_shots = 10 -data = dict( - train=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - dataset=dict(classes='BASE_CLASSES_SPLIT3')), - val=dict(classes='BASE_CLASSES_SPLIT3'), - test=dict(classes='BASE_CLASSES_SPLIT3'), - model_init=dict(classes='BASE_CLASSES_SPLIT3')) -optimizer = dict( - lr=0.004, - momentum=0.9, - paramwise_cfg=dict(custom_keys={'roi_head.bbox_head': dict(lr_mult=2.0)})) -lr_config = dict(warmup_iters=500, warmup_ratio=0.1, step=[28000, 30000]) -runner = dict(max_iters=30000) -evaluation = dict(interval=30000) -checkpoint_config = dict(interval=10000) -log_config = dict(interval=10) - -model = dict( - rpn_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), - roi_head=dict( - num_support_ways=num_support_ways, - num_support_shots=num_support_shots, - ), -) diff --git a/mmfewshot/__init__.py b/mmfewshot/__init__.py index dbe64c0..8dcb005 100644 --- a/mmfewshot/__init__.py +++ b/mmfewshot/__init__.py @@ -48,7 +48,7 @@ mmcls_version = digit_version(mmcls.__version__) assert (mmcls_version >= digit_version(mmcls_minimum_version) and mmcls_version <= digit_version(mmcls_maximum_version)), \ - f'MMDET=={mmcls.__version__} is used but incompatible. ' \ + f'MMCLS=={mmcls.__version__} is used but incompatible. ' \ f'Please install mmcls>={mmcls_minimum_version},\ <={mmcls_maximum_version}.' diff --git a/mmfewshot/detection/datasets/dataset_wrappers.py b/mmfewshot/detection/datasets/dataset_wrappers.py index 30083ed..e670675 100644 --- a/mmfewshot/detection/datasets/dataset_wrappers.py +++ b/mmfewshot/detection/datasets/dataset_wrappers.py @@ -559,4 +559,6 @@ class TwoBranchDataset(object): for g in groups: if len(g) < 50: reweight_idx_map += g * (int(repeat_length / len(g)) + 1) + else: + reweight_idx_map += g return reweight_idx_map diff --git a/requirements/readthedocs.txt b/requirements/readthedocs.txt index 6a1a504..d2f69a1 100644 --- a/requirements/readthedocs.txt +++ b/requirements/readthedocs.txt @@ -1,5 +1,5 @@ -mmcls==0.15.0 -mmcv==1.3.12 -mmdet==2.16.0 +mmcls>=0.15.0 +mmcv>=1.3.12 +mmdet>=2.16.0 torch==1.7.0 torchvision diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 205db03..f2c5af8 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,7 +1,7 @@ matplotlib -mmcls==0.15.0 -mmcv==1.3.12 -mmdet==2.16.0 +mmcls>=0.15.0 +mmcv>=1.3.12 +mmdet>=2.16.0 numpy pycocotools; platform_system == "Linux" pycocotools-windows; platform_system == "Windows" diff --git a/requirements/tests.txt b/requirements/tests.txt index 654ab6e..ec95ecb 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -5,9 +5,9 @@ interrogate isort==4.3.21 # Note: used for kwarray.group_items, this may be ported to mmcv in the future. kwarray -mmcls==0.15.0 -mmcv==1.3.12 -mmdet==2.16.0 +mmcls>=0.15.0 +mmcv>=1.3.12 +mmdet>=2.16.0 pytest ubelt xdoctest>=0.10.0 diff --git a/setup.cfg b/setup.cfg index 7ffa5ab..fecc9cc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,7 +3,7 @@ line_length = 79 multi_line_output = 0 known_standard_library = setuptools known_first_party = mmfewshot -known_third_party = cv2,mmcls,mmcv,mmdet,numpy,terminaltables,torch +known_third_party = mmcls,mmcv,mmdet,numpy,terminaltables,torch no_lines_before = STDLIB,LOCALFOLDER default_section = THIRDPARTY diff --git a/tests/test_detection_data/test_dataloader/test_dataloader.py b/tests/test_detection_data/test_dataloader/test_dataloader.py index 1f9f135..ea23c51 100644 --- a/tests/test_detection_data/test_dataloader/test_dataloader.py +++ b/tests/test_detection_data/test_dataloader/test_dataloader.py @@ -1,6 +1,6 @@ import torch -from mmfewshot.apis.train import set_random_seed +from mmfewshot.detection.apis.train import set_random_seed from mmfewshot.detection.datasets.builder import (build_dataloader, build_dataset) @@ -9,7 +9,7 @@ def test_dataloader(): set_random_seed(2021) # test regular and few shot annotations - dataconfigs = [{ + data_configs = [{ 'type': 'NwayKshotDataset', 'support_way': 5, 'support_shot': 1, @@ -106,9 +106,9 @@ def test_dataloader(): } }] - for dataconfig in dataconfigs: + for data_config in data_configs: - nway_kshot_dataset = build_dataset(cfg=dataconfig) + nway_kshot_dataset = build_dataset(cfg=data_config) nway_kshot_dataloader = build_dataloader( nway_kshot_dataset, samples_per_gpu=2, @@ -124,11 +124,11 @@ def test_dataloader(): len(nway_kshot_dataloader.support_data_loader) support_labels = data_batch['support_data']['gt_labels'].data[0] assert len(set(torch.cat( - support_labels).tolist())) == dataconfig['support_way'] + support_labels).tolist())) == data_config['support_way'] assert len(torch.cat(support_labels).tolist()) == \ - dataconfig['support_way'] * dataconfig['support_shot'] + data_config['support_way'] * data_config['support_shot'] - dataconfigs = [{ + data_configs = [{ 'type': 'QueryAwareDataset', 'support_way': 3, 'support_shot': 5, @@ -225,8 +225,8 @@ def test_dataloader(): } }] - for dataconfig in dataconfigs: - query_aware_dataset = build_dataset(cfg=dataconfig) + for data_config in data_configs: + query_aware_dataset = build_dataset(cfg=data_config) query_aware_dataloader = build_dataloader( query_aware_dataset, samples_per_gpu=2, @@ -242,6 +242,6 @@ def test_dataloader(): support_labels = data_batch['support_data']['gt_labels'].data[0] half_batch = len(support_labels) // 2 assert len(set(torch.cat(support_labels[:half_batch]).tolist())) \ - == dataconfig['support_way'] + == data_config['support_way'] assert len(set(torch.cat(support_labels[half_batch:]).tolist())) \ - == dataconfig['support_way'] + == data_config['support_way'] diff --git a/tools/detection/misc/visualize_saved_dataset.py b/tools/detection/misc/visualize_saved_dataset.py index e373a4b..7d43378 100644 --- a/tools/detection/misc/visualize_saved_dataset.py +++ b/tools/detection/misc/visualize_saved_dataset.py @@ -1,19 +1,23 @@ """Visualized instances of saved dataset. Example: - python3 -m tools.models.visualize_dataset \ - --src1 ./work_dirs/xx_saved_data.json + python tools/detection/misc/visualize_saved_dataset.py \ + --src ./work_dirs/xx_saved_data.json --dir ./vis_images """ import argparse import json import os -import cv2 import mmcv import numpy as np from terminaltables import AsciiTable +try: + import cv2 +except (): + raise ImportError('please install cv2 mutually') + class Visualizer(object): """Visualize instances of saved dataset. diff --git a/tools/detection/multi_dist_test.sh b/tools/detection/multi_dist_test.sh new file mode 100644 index 0000000..72dd430 --- /dev/null +++ b/tools/detection/multi_dist_test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +TIMES=$3 +PORT=${PORT:-29500} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/multi_test.py $CONFIG $TIMES --launcher pytorch ${@:4} diff --git a/tools/detection/multi_dist_train.sh b/tools/detection/multi_dist_train.sh new file mode 100644 index 0000000..e74674c --- /dev/null +++ b/tools/detection/multi_dist_train.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +TIMES=$3 +SEED=${SEED:-2021} +START=${START:-0} +PORT=${PORT:-29500} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/multi_train.py $CONFIG $TIMES --seed $SEED --start $START \ + --launcher pytorch ${@:4} diff --git a/tools/detection/multi_test.py b/tools/detection/multi_test.py index 0568b3a..57184c6 100644 --- a/tools/detection/multi_test.py +++ b/tools/detection/multi_test.py @@ -141,15 +141,14 @@ def main(): rank, _ = get_dist_info() eval_result_list = [] # build the dataloader - dataset = build_dataset(base_cfg.data.test, task_type=base_cfg.task_type) + dataset = build_dataset(base_cfg.data.test) # currently only support single images testing data_loader = build_dataloader( dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=base_cfg.data.workers_per_gpu, dist=distributed, - shuffle=False, - round_up=False) + shuffle=False) for i in range(args.start, args.times): cfg = copy.deepcopy(base_cfg) @@ -253,7 +252,7 @@ def main(): else: avg_results = dict() for k in eval_result_list[0].keys(): - avg_results[f'avg_{k}'] = sum([ + avg_results[f'Avg {k}'] = sum([ eval_result_list[i][k] for i in range(num_results) ]) / num_results mmcv.dump(avg_results, diff --git a/tools/detection/multi_train.py b/tools/detection/multi_train.py index d4d0286..9fa4030 100644 --- a/tools/detection/multi_train.py +++ b/tools/detection/multi_train.py @@ -159,8 +159,8 @@ def main(): f'It will cause UNFAIR data usage. Therefore, seed is set ' f'to {seed} for default.') - np.random.seed(seed) - all_seeds = np.random.randint(0, 1000000, args.times) + np.random.seed(int(seed)) + all_seeds = np.random.randint(0, 1000000, args.times).tolist() print(f'using seeds for {args.times} times training: ', all_seeds) # train with saved dataset @@ -190,7 +190,7 @@ def main(): meta = dict() # log env info env_info_dict = collect_env() - env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) + env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()]) dash_line = '-' * 60 + '\n' logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) @@ -270,7 +270,8 @@ def main(): 'mode', 'epoch', 'iter', 'lr', 'memory', 'time', 'data_time' ]: - eval_result.pop(k) + if k in eval_result.keys(): + eval_result.pop(k) logger.info(' '.join( [f'experiment {i} last eval result:'] + [f'{k}: {eval_result[k]}' for k in eval_result.keys()])) @@ -284,7 +285,7 @@ def main(): [eval_result_list[i][k] for i in range(num_result)]) / num_result logger.info(f'{num_result} times avg eval result:') logger.info(' '.join([f'average {num_result} eval results:'] + [ - f'avg_{k}: {eval_result_list[0][k]}' + f'Avg {k}: {eval_result_list[0][k]}' for k in eval_result_list[0].keys() ]))