mirror of
https://github.com/open-mmlab/mmcv.git
synced 2025-06-03 21:54:52 +08:00
* init npu * add npu extension and focal loss adapter * clean code * clean code * clean code * clean code * fix autocast bugs on npu (#2273) fix autocast bugs on npu (#2273) * code format * code format * code format * bug fix * pytorch_npu_helper.hpp clean code * Npu dev (#2306) * fix autocast bugs on npu * using scatter_kwargs in mmcv.device.scatter_gather * raise ImportError when compile with npu * add npu test case (#2307) * add npu test case * Update focal_loss.py * add comment * clean lint * update dtype assert * update DDP forward and comment * fix bug Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com> Co-authored-by: ckirchhoff <515629648@qq.com> Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
102 lines
3.8 KiB
Python
102 lines
3.8 KiB
Python
# Copyright (c) OpenMMLab. All rights reserved.
|
|
import pytest
|
|
import torch
|
|
|
|
from mmcv.device._functions import Scatter, scatter
|
|
from mmcv.utils import IS_MLU_AVAILABLE, IS_MPS_AVAILABLE, IS_NPU_AVAILABLE
|
|
|
|
|
|
def test_scatter():
|
|
# if the device is CPU, just return the input
|
|
input = torch.zeros([1, 3, 3, 3])
|
|
output = scatter(input=input, devices=[-1])
|
|
assert torch.allclose(input, output)
|
|
|
|
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
|
|
outputs = scatter(input=inputs, devices=[-1])
|
|
for input, output in zip(inputs, outputs):
|
|
assert torch.allclose(input, output)
|
|
|
|
# if the device is MLU, copy the input from CPU to MLU
|
|
if IS_MLU_AVAILABLE:
|
|
input = torch.zeros([1, 3, 3, 3])
|
|
output = scatter(input=input, devices=[0])
|
|
assert torch.allclose(input.to('mlu'), output)
|
|
|
|
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
|
|
outputs = scatter(input=inputs, devices=[0])
|
|
for input, output in zip(inputs, outputs):
|
|
assert torch.allclose(input.to('mlu'), output)
|
|
|
|
# if the device is NPU, copy the input from CPU to NPU
|
|
if IS_NPU_AVAILABLE:
|
|
input = torch.zeros([1, 3, 3, 3])
|
|
output = scatter(input=input, devices=[0])
|
|
assert torch.allclose(input.to('npu'), output)
|
|
|
|
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
|
|
outputs = scatter(input=inputs, devices=[0])
|
|
for input, output in zip(inputs, outputs):
|
|
assert torch.allclose(input.to('npu'), output)
|
|
|
|
# if the device is MPS, copy the input from CPU to MPS
|
|
if IS_MPS_AVAILABLE:
|
|
input = torch.zeros([1, 3, 3, 3])
|
|
output = scatter(input=input, devices=[0])
|
|
assert torch.allclose(input.to('mps'), output)
|
|
|
|
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
|
|
outputs = scatter(input=inputs, devices=[0])
|
|
for input, output in zip(inputs, outputs):
|
|
assert torch.allclose(input.to('mps'), output)
|
|
|
|
# input should be a tensor or list of tensor
|
|
with pytest.raises(Exception):
|
|
scatter(5, [-1])
|
|
|
|
|
|
def test_Scatter():
|
|
# if the device is CPU, just return the input
|
|
target_devices = [-1]
|
|
input = torch.zeros([1, 3, 3, 3])
|
|
outputs = Scatter.forward(target_devices, input)
|
|
assert isinstance(outputs, tuple)
|
|
assert torch.allclose(input, outputs[0])
|
|
|
|
target_devices = [-1]
|
|
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
|
|
outputs = Scatter.forward(target_devices, inputs)
|
|
assert isinstance(outputs, tuple)
|
|
for input, output in zip(inputs, outputs):
|
|
assert torch.allclose(input, output)
|
|
|
|
# if the device is MLU, copy the input from CPU to MLU
|
|
if IS_MLU_AVAILABLE:
|
|
target_devices = [0]
|
|
input = torch.zeros([1, 3, 3, 3])
|
|
outputs = Scatter.forward(target_devices, input)
|
|
assert isinstance(outputs, tuple)
|
|
assert torch.allclose(input.to('mlu'), outputs[0])
|
|
|
|
target_devices = [0]
|
|
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
|
|
outputs = Scatter.forward(target_devices, inputs)
|
|
assert isinstance(outputs, tuple)
|
|
for input, output in zip(inputs, outputs):
|
|
assert torch.allclose(input.to('mlu'), output[0])
|
|
|
|
# if the device is MPS, copy the input from CPU to MPS
|
|
if IS_MPS_AVAILABLE:
|
|
target_devices = [0]
|
|
input = torch.zeros([1, 3, 3, 3])
|
|
outputs = Scatter.forward(target_devices, input)
|
|
assert isinstance(outputs, tuple)
|
|
assert torch.allclose(input.to('mps'), outputs[0])
|
|
|
|
target_devices = [0]
|
|
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
|
|
outputs = Scatter.forward(target_devices, inputs)
|
|
assert isinstance(outputs, tuple)
|
|
for input, output in zip(inputs, outputs):
|
|
assert torch.allclose(input.to('mps'), output[0])
|