[Fix]Fix the deprecation of np.float and fix ci configuration (#2451)
## Motivation 1. numpy from 1.24 deprecated the aliases np.object, np.bool, np.float, np.complex, np.str, and np.int https://numpy.org/devdocs/release/1.24.0-notes.html 2. timm needs to pytorch>=1.7, so ignore test timm in pytorch 1.5 and 1.6 3. Remove install pillow as it doesn't test torchvision < 0.5 ## Modification 1. np.float->np.float32 2. torch >= '1.7.0' -> matrix.torch != '1.5.1+cu101' && matrix.torch != '1.6.0+cu101' (as '1.10' < '1.7' 3. Remove install pillowpull/2489/head
parent
c0515a1be5
commit
632fb9b651
|
@ -71,13 +71,13 @@ jobs:
|
|||
coverage xml
|
||||
coverage report -m
|
||||
# timm from v0.6.11 requires torch>=1.7
|
||||
if: ${{matrix.torch >= '1.7.0'}}
|
||||
if: ${{matrix.torch != '1.5.1' && matrix.torch != '1.6.0'}}
|
||||
- name: Skip timm unittests and generate coverage report
|
||||
run: |
|
||||
coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
|
||||
coverage xml
|
||||
coverage report -m
|
||||
if: ${{matrix.torch < '1.7.0'}}
|
||||
if: ${{matrix.torch == '1.5.1' || matrix.torch == '1.6.0'}}
|
||||
|
||||
build_cuda101:
|
||||
runs-on: ubuntu-18.04
|
||||
|
@ -123,15 +123,13 @@ jobs:
|
|||
apt-get update && apt-get install -y libgl1-mesa-glx ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev
|
||||
apt-get clean
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
- name: Install Pillow
|
||||
run: python -m pip install Pillow==6.2.2
|
||||
if: ${{matrix.torchvision < 0.5}}
|
||||
- name: Install PyTorch
|
||||
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- name: Install mmseg dependencies
|
||||
run: |
|
||||
python -V
|
||||
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/${{matrix.torch_version}}/index.html
|
||||
python -m pip install -U openmim
|
||||
mim install mmcv-full
|
||||
python -m pip install -r requirements.txt
|
||||
python -c 'import mmcv; print(mmcv.__version__)'
|
||||
- name: Build and install
|
||||
|
@ -146,13 +144,13 @@ jobs:
|
|||
coverage xml
|
||||
coverage report -m
|
||||
# timm from v0.6.11 requires torch>=1.7
|
||||
if: ${{matrix.torch >= '1.7.0'}}
|
||||
if: ${{matrix.torch != '1.5.1+cu101' && matrix.torch != '1.6.0+cu101'}}
|
||||
- name: Skip timm unittests and generate coverage report
|
||||
run: |
|
||||
coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
|
||||
coverage xml
|
||||
coverage report -m
|
||||
if: ${{matrix.torch < '1.7.0'}}
|
||||
if: ${{matrix.torch == '1.5.1+cu101' || matrix.torch == '1.6.0+cu101'}}
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v1.0.10
|
||||
with:
|
||||
|
@ -163,10 +161,12 @@ jobs:
|
|||
fail_ci_if_error: false
|
||||
|
||||
build_cuda102:
|
||||
env:
|
||||
LC_ALL: C.UTF-8
|
||||
LANG: C.UTF-8
|
||||
runs-on: ubuntu-18.04
|
||||
container:
|
||||
image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.6, 3.7, 3.8, 3.9]
|
||||
|
@ -175,7 +175,6 @@ jobs:
|
|||
- torch: 1.9.0+cu102
|
||||
torch_version: torch1.9
|
||||
torchvision: 0.10.0+cu102
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
@ -191,15 +190,13 @@ jobs:
|
|||
apt-get update && apt-get install -y libgl1-mesa-glx ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6
|
||||
apt-get clean
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
- name: Install Pillow
|
||||
run: python -m pip install Pillow==6.2.2
|
||||
if: ${{matrix.torchvision < 0.5}}
|
||||
- name: Install PyTorch
|
||||
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- name: Install mmseg dependencies
|
||||
run: |
|
||||
python -V
|
||||
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu102/${{matrix.torch_version}}/index.html
|
||||
python -m pip install openmim
|
||||
mim install mmcv-full
|
||||
python -m pip install -r requirements.txt
|
||||
python -c 'import mmcv; print(mmcv.__version__)'
|
||||
- name: Build and install
|
||||
|
@ -256,15 +253,13 @@ jobs:
|
|||
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- name: Install system dependencies
|
||||
run: apt-get update && apt-get install -y ffmpeg libturbojpeg ninja-build
|
||||
- name: Install Pillow
|
||||
run: python -m pip install Pillow==6.2.2
|
||||
if: ${{matrix.torchvision < 0.5}}
|
||||
- name: Install PyTorch
|
||||
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- name: Install mmseg dependencies
|
||||
run: |
|
||||
python -V
|
||||
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu116/${{matrix.torch_version}}/index.html
|
||||
python -m pip install openmim
|
||||
mim install mmcv-full
|
||||
python -m pip install -r requirements.txt
|
||||
python -c 'import mmcv; print(mmcv.__version__)'
|
||||
- name: Build and install
|
||||
|
@ -301,14 +296,15 @@ jobs:
|
|||
run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
|
||||
- name: Install MMCV
|
||||
run: |
|
||||
pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8/index.html --only-binary mmcv-full
|
||||
pip install -U openmim
|
||||
mim install mmcv-full
|
||||
- name: Install unittest dependencies
|
||||
run: pip install -r requirements/tests.txt -r requirements/optional.txt
|
||||
- name: Build and install
|
||||
run: pip install -e .
|
||||
- name: Run unittests
|
||||
run: |
|
||||
python -m pip install 'timm<0.6.11'
|
||||
python -m pip install timm
|
||||
coverage run --branch --source mmseg -m pytest tests/
|
||||
- name: Generate coverage report
|
||||
run: |
|
||||
|
|
|
@ -31,7 +31,7 @@ def get_confusion_matrix(pred_label, label, num_classes, ignore_index):
|
|||
def legacy_mean_iou(results, gt_seg_maps, num_classes, ignore_index):
|
||||
num_imgs = len(results)
|
||||
assert len(gt_seg_maps) == num_imgs
|
||||
total_mat = np.zeros((num_classes, num_classes), dtype=np.float)
|
||||
total_mat = np.zeros((num_classes, num_classes), dtype=np.float32)
|
||||
for i in range(num_imgs):
|
||||
mat = get_confusion_matrix(
|
||||
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
|
||||
|
@ -48,7 +48,7 @@ def legacy_mean_iou(results, gt_seg_maps, num_classes, ignore_index):
|
|||
def legacy_mean_dice(results, gt_seg_maps, num_classes, ignore_index):
|
||||
num_imgs = len(results)
|
||||
assert len(gt_seg_maps) == num_imgs
|
||||
total_mat = np.zeros((num_classes, num_classes), dtype=np.float)
|
||||
total_mat = np.zeros((num_classes, num_classes), dtype=np.float32)
|
||||
for i in range(num_imgs):
|
||||
mat = get_confusion_matrix(
|
||||
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
|
||||
|
@ -69,7 +69,7 @@ def legacy_mean_fscore(results,
|
|||
beta=1):
|
||||
num_imgs = len(results)
|
||||
assert len(gt_seg_maps) == num_imgs
|
||||
total_mat = np.zeros((num_classes, num_classes), dtype=np.float)
|
||||
total_mat = np.zeros((num_classes, num_classes), dtype=np.float32)
|
||||
for i in range(num_imgs):
|
||||
mat = get_confusion_matrix(
|
||||
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
|
||||
|
@ -100,7 +100,7 @@ def test_metrics():
|
|||
'IoU']
|
||||
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, label, num_classes,
|
||||
ignore_index)
|
||||
assert all_acc == all_acc_l
|
||||
assert np.allclose(all_acc, all_acc_l)
|
||||
assert np.allclose(acc, acc_l)
|
||||
assert np.allclose(iou, iou_l)
|
||||
# Test the correctness of the implementation of mDice calculation.
|
||||
|
@ -110,7 +110,7 @@ def test_metrics():
|
|||
'Dice']
|
||||
all_acc_l, acc_l, dice_l = legacy_mean_dice(results, label, num_classes,
|
||||
ignore_index)
|
||||
assert all_acc == all_acc_l
|
||||
assert np.allclose(all_acc, all_acc_l)
|
||||
assert np.allclose(acc, acc_l)
|
||||
assert np.allclose(dice, dice_l)
|
||||
# Test the correctness of the implementation of mDice calculation.
|
||||
|
@ -120,7 +120,7 @@ def test_metrics():
|
|||
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
|
||||
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
|
||||
results, label, num_classes, ignore_index)
|
||||
assert all_acc == all_acc_l
|
||||
assert np.allclose(all_acc, all_acc_l)
|
||||
assert np.allclose(recall, recall_l)
|
||||
assert np.allclose(precision, precision_l)
|
||||
assert np.allclose(fscore, fscore_l)
|
||||
|
@ -135,7 +135,7 @@ def test_metrics():
|
|||
'aAcc'], ret_metrics['Acc'], ret_metrics['IoU'], ret_metrics[
|
||||
'Dice'], ret_metrics['Precision'], ret_metrics[
|
||||
'Recall'], ret_metrics['Fscore']
|
||||
assert all_acc == all_acc_l
|
||||
assert np.allclose(all_acc, all_acc_l)
|
||||
assert np.allclose(acc, acc_l)
|
||||
assert np.allclose(iou, iou_l)
|
||||
assert np.allclose(dice, dice_l)
|
||||
|
@ -228,7 +228,7 @@ def test_mean_iou():
|
|||
'IoU']
|
||||
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, label, num_classes,
|
||||
ignore_index)
|
||||
assert all_acc == all_acc_l
|
||||
assert np.allclose(all_acc, all_acc_l)
|
||||
assert np.allclose(acc, acc_l)
|
||||
assert np.allclose(iou, iou_l)
|
||||
|
||||
|
@ -254,7 +254,7 @@ def test_mean_dice():
|
|||
'Dice']
|
||||
all_acc_l, acc_l, dice_l = legacy_mean_dice(results, label, num_classes,
|
||||
ignore_index)
|
||||
assert all_acc == all_acc_l
|
||||
assert np.allclose(all_acc, all_acc_l)
|
||||
assert np.allclose(acc, acc_l)
|
||||
assert np.allclose(iou, dice_l)
|
||||
|
||||
|
@ -280,7 +280,7 @@ def test_mean_fscore():
|
|||
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
|
||||
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
|
||||
results, label, num_classes, ignore_index)
|
||||
assert all_acc == all_acc_l
|
||||
assert np.allclose(all_acc, all_acc_l)
|
||||
assert np.allclose(recall, recall_l)
|
||||
assert np.allclose(precision, precision_l)
|
||||
assert np.allclose(fscore, fscore_l)
|
||||
|
@ -291,7 +291,7 @@ def test_mean_fscore():
|
|||
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
|
||||
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
|
||||
results, label, num_classes, ignore_index, beta=2)
|
||||
assert all_acc == all_acc_l
|
||||
assert np.allclose(all_acc, all_acc_l)
|
||||
assert np.allclose(recall, recall_l)
|
||||
assert np.allclose(precision, precision_l)
|
||||
assert np.allclose(fscore, fscore_l)
|
||||
|
@ -346,6 +346,6 @@ def test_filename_inputs():
|
|||
'Acc'], ret_metrics['IoU']
|
||||
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, labels, num_classes,
|
||||
ignore_index)
|
||||
assert all_acc == all_acc_l
|
||||
assert np.allclose(all_acc, all_acc_l)
|
||||
assert np.allclose(acc, acc_l)
|
||||
assert np.allclose(iou, iou_l)
|
||||
|
|
Loading…
Reference in New Issue