mirror of https://github.com/JDAI-CV/fast-reid.git
fix(engine/defaults): fix precise bn bug
fix problem in precise bn, which will not use precise bn datasets, and throw some errorspull/45/head
parent
18fd7faff7
commit
e38a799b63
|
@ -253,13 +253,13 @@ class DefaultTrainer(SimpleTrainer):
|
|||
cfg = self.cfg.clone()
|
||||
cfg.defrost()
|
||||
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
|
||||
cfg.DATASETS.NAMES = tuple([cfg.TEST.PRECISE_BN.DATASET]) # set dataset name for PreciseBN
|
||||
|
||||
ret = [
|
||||
hooks.IterationTimer(),
|
||||
hooks.LRScheduler(self.optimizer, self.scheduler),
|
||||
hooks.PreciseBN(
|
||||
# Run at the same freq as (but before) evaluation.
|
||||
cfg.TEST.EVAL_PERIOD,
|
||||
self.model,
|
||||
# Build a new data loader to not affect training
|
||||
self.build_train_loader(cfg),
|
||||
|
@ -365,7 +365,7 @@ class DefaultTrainer(SimpleTrainer):
|
|||
"""
|
||||
Returns:
|
||||
iterable
|
||||
It now calls :func:`detectron2.data.build_detection_train_loader`.
|
||||
It now calls :func:`fastreid.data.build_detection_train_loader`.
|
||||
Overwrite it if you'd like a different data loader.
|
||||
"""
|
||||
return build_reid_train_loader(cfg)
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
# encoding: utf-8
|
||||
"""
|
||||
@author: sherlock
|
||||
@contact: sherlockliao01@gmail.com
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
sys.path.append('.')
|
||||
|
||||
from torch import nn
|
||||
|
||||
from fastreid.config import get_cfg
|
||||
from fastreid.engine import DefaultTrainer, default_argument_parser, default_setup
|
||||
from fastreid.utils.checkpoint import Checkpointer
|
||||
from fastreid.engine import hooks
|
||||
from fastreid.evaluation import ReidEvaluator
|
||||
|
||||
|
||||
class Trainer(DefaultTrainer):
|
||||
@classmethod
|
||||
def build_evaluator(cls, cfg, num_query, output_folder=None):
|
||||
if output_folder is None:
|
||||
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
|
||||
return ReidEvaluator(cfg, num_query)
|
||||
|
||||
|
||||
def setup(args):
|
||||
"""
|
||||
Create configs and perform basic setups.
|
||||
"""
|
||||
cfg = get_cfg()
|
||||
cfg.merge_from_file(args.config_file)
|
||||
cfg.merge_from_list(args.opts)
|
||||
cfg.freeze()
|
||||
default_setup(cfg, args)
|
||||
return cfg
|
||||
|
||||
|
||||
def main(args):
|
||||
cfg = setup(args)
|
||||
|
||||
if args.eval_only:
|
||||
cfg.defrost()
|
||||
cfg.MODEL.BACKBONE.PRETRAIN = False
|
||||
model = Trainer.build_model(cfg)
|
||||
model = nn.DataParallel(model)
|
||||
model = model.cuda()
|
||||
|
||||
Checkpointer(model, save_dir=cfg.OUTPUT_DIR).load(cfg.MODEL.WEIGHTS) # load trained model
|
||||
if cfg.TEST.PRECISE_BN.ENABLED and hooks.get_bn_modules(model):
|
||||
prebn_cfg = cfg.clone()
|
||||
prebn_cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
|
||||
prebn_cfg.DATASETS.NAMES = tuple([cfg.TEST.PRECISE_BN.DATASET]) # set dataset name for PreciseBN
|
||||
hooks.PreciseBN(
|
||||
# Run at the same freq as (but before) evaluation.
|
||||
model,
|
||||
# Build a new data loader to not affect training
|
||||
Trainer.build_train_loader(prebn_cfg),
|
||||
cfg.TEST.PRECISE_BN.NUM_ITER,
|
||||
).update_stats()
|
||||
res = Trainer.test(cfg, model)
|
||||
return res
|
||||
|
||||
trainer = Trainer(cfg)
|
||||
trainer.resume_or_load(resume=args.resume)
|
||||
return trainer.train()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = default_argument_parser().parse_args()
|
||||
print("Command Line Args:", args)
|
||||
main(args)
|
Loading…
Reference in New Issue