From f67ef9c128eb2b643beaed8eb518c9fa09eb0912 Mon Sep 17 00:00:00 2001 From: xuuyangg <41278676+xuuyangg@users.noreply.github.com> Date: Sat, 22 Jul 2023 14:07:31 +0800 Subject: [PATCH] [Fix] Fix mix training on Ascend NPU (#3215) ## Motivation Address an issue where mix training is not enabled when optimizer_config is present in the config on Ascend NPU ## Modification Previously, mix training was not enabled when optimizer_config was present in the configuration on Ascend NPU. This commit addresses the issue by ensuring that mix training is enabled under these circumstances. ## Use cases It has been validated on the knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py config. ## Checklist 1. Pre-commit or other linting tools are used to fix the potential lint issues. 2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness. 3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMDet3D. 4. The documentation has been modified accordingly, like docstring or example tutorials. --- mmseg/apis/train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mmseg/apis/train.py b/mmseg/apis/train.py index d823412db..69e4d27bc 100644 --- a/mmseg/apis/train.py +++ b/mmseg/apis/train.py @@ -137,9 +137,9 @@ def train_segmentor(model, meta=meta)) if cfg.device == 'npu' and not is_npu_support_full_precision(): - optimiter_config = dict(type='Fp16OptimizerHook', loss_scale='dynamic') - cfg.optimizer_config = optimiter_config if \ - not cfg.optimizer_config else cfg.optimizer_config + cfg.optimizer_config = cfg.optimizer_config or {} + cfg.optimizer_config['type'] = 'Fp16OptimizerHook' + cfg.optimizer_config['loss_scale'] = 'dynamic' # register hooks runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,