update README

pull/43/head
xieenze 2020-09-23 19:30:19 +08:00
parent c3eceef9cd
commit 371f9b0e51
2 changed files with 3 additions and 2 deletions

View File

@ -2,6 +2,7 @@
# OpenSelfSup
**News**
* OpenSelfSup now supports [Mixed Precision Training (apex AMP)](https://github.com/NVIDIA/apex)!
* A bug of MoCo v2 has been fixed and now the results are reproducible.
* OpenSelfSup now supports [BYOL](https://arxiv.org/pdf/2006.07733.pdf)!

View File

@ -184,13 +184,13 @@ def _dist_train(model, dataset, cfg, logger=None, timestamp=None, meta=None):
drop_last=getattr(cfg.data, 'drop_last', False)) for ds in dataset
]
optimizer = build_optimizer(model, cfg.optimizer)
if 'use_fp16' in cfg and cfg.use_fp16 == True:
if 'use_fp16' in cfg and cfg.use_fp16:
model, optimizer = apex.amp.initialize(model.cuda(), optimizer, opt_level="O1")
print_log('**** Initializing mixed precision done. ****')
# put model on gpus
model = MMDistributedDataParallel(
model.cuda(),
model if next(model.parameters()).is_cuda else model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)