mirror of https://github.com/open-mmlab/mmyolo.git
parent
8973096d04
commit
a321cfae80
|
@ -0,0 +1,70 @@
|
|||
_base_ = 'yolov5_s-v61_fast_1xb12-40e_cat.py'
|
||||
|
||||
# This configuration is used to provide non-square training examples
|
||||
# Must be a multiple of 32
|
||||
img_scale = (608, 352) # w h
|
||||
|
||||
anchors = [
|
||||
[(65, 35), (159, 45), (119, 80)], # P3/8
|
||||
[(215, 77), (224, 116), (170, 166)], # P4/16
|
||||
[(376, 108), (339, 176), (483, 190)] # P5/32
|
||||
]
|
||||
|
||||
# ===============================Unmodified in most cases====================
|
||||
_base_.model.bbox_head.loss_obj.loss_weight = 1.0 * ((img_scale[1] / 640)**2)
|
||||
_base_.model.bbox_head.prior_generator.base_sizes = anchors
|
||||
|
||||
train_pipeline = [
|
||||
*_base_.pre_transform,
|
||||
dict(
|
||||
type='Mosaic',
|
||||
img_scale=img_scale,
|
||||
pad_val=114.0,
|
||||
pre_transform=_base_.pre_transform),
|
||||
dict(
|
||||
type='YOLOv5RandomAffine',
|
||||
max_rotate_degree=0.0,
|
||||
max_shear_degree=0.0,
|
||||
scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale),
|
||||
# img_scale is (width, height)
|
||||
border=(-img_scale[0] // 2, -img_scale[1] // 2),
|
||||
border_val=(114, 114, 114)),
|
||||
dict(
|
||||
type='mmdet.Albu',
|
||||
transforms=_base_.albu_train_transforms,
|
||||
bbox_params=dict(
|
||||
type='BboxParams',
|
||||
format='pascal_voc',
|
||||
label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),
|
||||
keymap={
|
||||
'img': 'image',
|
||||
'gt_bboxes': 'bboxes'
|
||||
}),
|
||||
dict(type='YOLOv5HSVRandomAug'),
|
||||
dict(type='mmdet.RandomFlip', prob=0.5),
|
||||
dict(
|
||||
type='mmdet.PackDetInputs',
|
||||
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
|
||||
'flip_direction'))
|
||||
]
|
||||
|
||||
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
||||
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
|
||||
dict(type='YOLOv5KeepRatioResize', scale=img_scale),
|
||||
dict(
|
||||
type='LetterResize',
|
||||
scale=img_scale,
|
||||
allow_scale_up=False,
|
||||
pad_val=dict(img=114)),
|
||||
dict(type='mmdet.LoadAnnotations', with_bbox=True),
|
||||
dict(
|
||||
type='mmdet.PackDetInputs',
|
||||
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
|
||||
'scale_factor', 'pad_param'))
|
||||
]
|
||||
|
||||
val_dataloader = dict(
|
||||
dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None))
|
||||
test_dataloader = val_dataloader
|
|
@ -46,6 +46,9 @@ IGNORE_LOSS_PARAMS = {
|
|||
'yolov6': ['loss_cls'],
|
||||
'yolox': ['loss_obj'],
|
||||
'rtmdet': ['loss_cls'],
|
||||
'yolov7': ['loss_obj'],
|
||||
'yolov8': ['loss_cls'],
|
||||
'ppyoloe': ['loss_cls'],
|
||||
}
|
||||
|
||||
# This parameter is required in some algorithms
|
||||
|
|
|
@ -105,3 +105,7 @@ unset PYTHONPATH
|
|||
7. `coco/bbox_mAP_l` 将会根据 `bbox_mAP_l` 作为筛选条件。
|
||||
|
||||
此外用户还可以选择筛选的逻辑,通过设置配置中的 `default_hooks.checkpoint.rule` 来选择判断逻辑,如:`default_hooks.checkpoint.rule=greater` 表示指标越大越好。更详细的使用可以参考 [checkpoint_hook](https://github.com/open-mmlab/mmengine/blob/main/mmengine/hooks/checkpoint_hook.py) 来修改
|
||||
|
||||
## 如何进行非正方形输入尺寸训练和测试 ?
|
||||
|
||||
在 YOLO 系列算法中默认配置基本上都是 640x640 或者 1280x1280 正方形尺度输入训练的。用户如果想进行非正方形尺度训练,你可以修改配置中 `image_scale` 参数,并将其他对应位置进行修改即可。用户可以参考我们提供的 [yolov5_s-v61_fast_1xb12-40e_608x352_cat.py](https://github.com/open-mmlab/mmyolo/tree/dev/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_608x352_cat.py) 配置。
|
||||
|
|
|
@ -203,6 +203,7 @@ class BoxAMDetectorWrapper(nn.Module):
|
|||
# Maybe this is a direction that can be optimized
|
||||
# self.detector.init_weights()
|
||||
|
||||
self.detector.bbox_head.head_module.training = True
|
||||
if hasattr(self.detector.bbox_head, 'featmap_sizes'):
|
||||
# Prevent the model algorithm error when calculating loss
|
||||
self.detector.bbox_head.featmap_sizes = None
|
||||
|
@ -218,6 +219,7 @@ class BoxAMDetectorWrapper(nn.Module):
|
|||
|
||||
return [loss]
|
||||
else:
|
||||
self.detector.bbox_head.head_module.training = False
|
||||
with torch.no_grad():
|
||||
results = self.detector.test_step(self.input_data)
|
||||
return results
|
||||
|
|
Loading…
Reference in New Issue