zero-mAP fix 3 (#9058)
* zero-mAP fix 3 Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> * Update torch_utils.py Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update torch_utils.py Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>pull/9057/head
parent
841f312f93
commit
27fb6fd8fc
|
@ -412,7 +412,6 @@ class ModelEMA:
|
|||
for p in self.ema.parameters():
|
||||
p.requires_grad_(False)
|
||||
|
||||
@smart_inference_mode()
|
||||
def update(self, model):
|
||||
# Update EMA parameters
|
||||
self.updates += 1
|
||||
|
@ -423,7 +422,7 @@ class ModelEMA:
|
|||
if v.dtype.is_floating_point: # true for FP16 and FP32
|
||||
v *= d
|
||||
v += (1 - d) * msd[k].detach()
|
||||
assert v.dtype == msd[k].dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must be updated in FP32'
|
||||
assert v.dtype == msd[k].detach().dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must both be FP32'
|
||||
|
||||
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
|
||||
# Update EMA attributes
|
||||
|
|
Loading…
Reference in New Issue