Disable CircleCI builds (#3477)

Summary:
Pull Request resolved: https://github.com/facebookresearch/faiss/pull/3477

AVX-512 must remain on CircleCI until GitHub provides runners with AVX-512 support (ETA: Q1 2025).

Reviewed By: algoriddle

Differential Revision: D57707621

fbshipit-source-id: e8a0885f8363cf8f20854cccca3ec0adc946362b
pull/3479/head
Ramil Bakhshyiev 2024-05-23 06:42:48 -07:00 committed by Facebook GitHub Bot
parent 93bc9b6470
commit eec4cba025
1 changed files with 12 additions and 421 deletions

View File

@ -5,190 +5,8 @@ executors:
docker:
- image: continuumio/miniconda3
resource_class: large
linux-x86_64-gpu:
environment:
CONDA_ARCH: Linux-x86_64
machine:
image: linux-cuda-12:default
resource_class: gpu.nvidia.medium
linux-arm64-cpu:
environment:
CONDA_ARCH: Linux-aarch64
machine:
image: ubuntu-2204:current
resource_class: arm.medium
macosx-arm64-cpu:
environment:
CONDA_ARCH: MacOSX-arm64
macos:
xcode: 14.2.0 # minimum supported for M1
resource_class: macos.m1.large.gen1
windows-x86_64-cpu:
machine:
image: windows-server-2019-vs2019:2023.04.1
shell: bash.exe
resource_class: windows.medium
jobs:
format:
docker:
- image: ubuntu:22.04
steps:
- checkout
- run:
name: Install clang-format
command: |
apt-get update -y
apt-get install -y wget
apt install -y lsb-release wget software-properties-common gnupg
wget https://apt.llvm.org/llvm.sh
chmod u+x llvm.sh
./llvm.sh 18
apt-get install -y git-core clang-format-18
- run:
name: Verify clang-format
command: |
git ls-files | grep -E '\.(cpp|h|cu|cuh)$' | xargs clang-format-18 -i
if git diff --quiet; then
echo "Formatting OK!"
else
echo "Formatting not OK!"
echo "------------------"
git --no-pager diff --color
exit 1
fi
build_conda:
parameters:
label:
type: string
default: ""
cuda:
type: string
default: ""
raft:
type: string
default: ""
cuda_archs:
type: string
default: ""
compiler_version:
type: string
default: ""
exec:
type: executor
executor: << parameters.exec >>
environment:
OMP_NUM_THREADS: 10
PACKAGE_TYPE: <<parameters.label>>
CUDA_ARCHS: <<parameters.cuda_archs>>
steps:
- checkout
- run:
name: Install conda
command: |
if [ -n "${CONDA_ARCH}" ]
then
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-${CONDA_ARCH}.sh --output miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
~/miniconda/bin/conda init
fi
- run:
name: Install conda build tools
command: |
# conda config --set solver libmamba
# conda config --set verbosity 3
conda update -y -q conda
conda install -y -q conda-build
- when:
condition: << parameters.label >>
steps:
- run:
name: Enable anaconda uploads
command: |
conda install -y -q anaconda-client
conda config --set anaconda_upload yes
- when:
condition:
and:
- not: << parameters.label >>
- not: << parameters.cuda >>
steps:
- run:
name: Conda build (CPU)
no_output_timeout: 30m
command: |
cd conda
conda build faiss --python 3.11 -c pytorch
- when:
condition:
and:
- << parameters.label >>
- not: << parameters.cuda >>
steps:
- run:
name: Conda build (CPU) w/ anaconda upload
no_output_timeout: 30m
command: |
cd conda
conda build faiss --user pytorch --label <<parameters.label>> -c pytorch
- when:
condition:
and:
- not: << parameters.label >>
- << parameters.cuda >>
- not: << parameters.raft >>
steps:
- run:
name: Conda build (GPU)
no_output_timeout: 60m
command: |
cd conda
conda build faiss-gpu --variants '{ "cudatoolkit": "<<parameters.cuda>>", "c_compiler_version": "<<parameters.compiler_version>>", "cxx_compiler_version": "<<parameters.compiler_version>>" }' \
-c pytorch -c nvidia/label/cuda-<<parameters.cuda>> -c nvidia
- when:
condition:
and:
- << parameters.label >>
- << parameters.cuda >>
- not: << parameters.raft >>
steps:
- run:
name: Conda build (GPU) w/ anaconda upload
no_output_timeout: 60m
command: |
cd conda
conda build faiss-gpu --variants '{ "cudatoolkit": "<<parameters.cuda>>", "c_compiler_version": "<<parameters.compiler_version>>", "cxx_compiler_version": "<<parameters.compiler_version>>" }' \
--user pytorch --label <<parameters.label>> -c pytorch -c nvidia/label/cuda-<<parameters.cuda>> -c nvidia
- when:
condition:
and:
- not: << parameters.label >>
- << parameters.cuda >>
- << parameters.raft >>
steps:
- run:
name: Conda build (GPU w/ RAFT)
no_output_timeout: 60m
command: |
cd conda
conda build faiss-gpu-raft --variants '{ "cudatoolkit": "<<parameters.cuda>>", "c_compiler_version": "<<parameters.compiler_version>>", "cxx_compiler_version": "<<parameters.compiler_version>>" }' \
-c pytorch -c nvidia/label/cuda-<<parameters.cuda>> -c nvidia -c rapidsai -c rapidsai-nightly -c conda-forge
- when:
condition:
and:
- << parameters.label >>
- << parameters.cuda >>
- << parameters.raft >>
steps:
- run:
name: Conda build (GPU w/ RAFT) w/ anaconda upload
no_output_timeout: 60m
command: |
cd conda
conda build faiss-gpu-raft --variants '{ "cudatoolkit": "<<parameters.cuda>>", "c_compiler_version": "<<parameters.compiler_version>>", "cxx_compiler_version": "<<parameters.compiler_version>>" }' \
--user pytorch --label <<parameters.label>> -c pytorch -c nvidia/label/cuda-<<parameters.cuda>> -c nvidia -c rapidsai -c rapidsai-nightly -c conda-forge
build_cmake:
parameters:
exec:
@ -196,12 +14,6 @@ jobs:
opt_level:
type: string
default: generic
gpu:
type: string
default: "OFF"
raft:
type: string
default: "OFF"
executor: << parameters.exec >>
environment:
OMP_NUM_THREADS: 10
@ -222,32 +34,10 @@ jobs:
command: |
conda config --set solver libmamba
conda update -y -q conda
- when:
condition:
equal: [ "OFF", << parameters.raft >> ]
steps:
- run:
name: Install env using main channel
command: |
conda install -y -q python=3.11 cmake make swig mkl=2023 mkl-devel=2023 numpy scipy pytest gxx_linux-64=11.2 sysroot_linux-64
- when:
condition:
equal: [ "ON", << parameters.raft >> ]
steps:
- run:
name: Install env using conda-forge channel
command: |
conda install -y -q python=3.11 cmake make swig mkl=2023 mkl-devel=2023 numpy scipy pytest gxx_linux-64=11.2 sysroot_linux-64=2.28 libraft cuda-version=11.8 cuda-toolkit -c rapidsai-nightly -c "nvidia/label/cuda-11.8.0" -c conda-forge
- when:
condition:
and:
- equal: [ "ON", << parameters.gpu >> ]
- equal: [ "OFF", << parameters.raft >> ]
steps:
- run:
name: Install CUDA
command: |
conda install -y -q cuda-toolkit -c "nvidia/label/cuda-11.8.0"
- run:
name: Install env using main channel
command: |
conda install -y -q python=3.11 cmake make swig mkl=2023 mkl-devel=2023 numpy scipy pytest gxx_linux-64=11.2 sysroot_linux-64
- run:
name: Build all targets
no_output_timeout: 30m
@ -257,8 +47,8 @@ jobs:
cmake -B build \
-DBUILD_TESTING=ON \
-DBUILD_SHARED_LIBS=ON \
-DFAISS_ENABLE_GPU=<< parameters.gpu >> \
-DFAISS_ENABLE_RAFT=<< parameters.raft >> \
-DFAISS_ENABLE_GPU=OFF \
-DFAISS_ENABLE_RAFT=OFF \
-DFAISS_OPT_LEVEL=<< parameters.opt_level >> \
-DFAISS_ENABLE_C_API=ON \
-DPYTHON_EXECUTABLE=$(which python) \
@ -277,38 +67,12 @@ jobs:
command: |
cd build/faiss/python
python setup.py install
- when:
condition:
equal: [ "OFF", << parameters.gpu >> ]
steps:
- run:
name: Python tests (CPU only)
command: |
conda install -y -q pytorch -c pytorch
pytest --junitxml=test-results/pytest/results.xml tests/test_*.py
pytest --junitxml=test-results/pytest/results-torch.xml tests/torch_*.py
- when:
condition:
equal: [ "ON", << parameters.gpu >> ]
steps:
- run:
name: Python tests (CPU + GPU)
command: |
conda install -y -q pytorch pytorch-cuda=11.8 -c pytorch -c nvidia/label/cuda-11.8.0
pytest --junitxml=test-results/pytest/results.xml tests/test_*.py
pytest --junitxml=test-results/pytest/results-torch.xml tests/torch_*.py
cp tests/common_faiss_tests.py faiss/gpu/test
pytest --junitxml=test-results/pytest/results-gpu.xml faiss/gpu/test/test_*.py
pytest --junitxml=test-results/pytest/results-gpu-torch.xml faiss/gpu/test/torch_*.py
- when:
condition:
equal: [ "avx2", << parameters.opt_level >> ]
steps:
- run:
name: Test avx2 loading
command: |
FAISS_DISABLE_CPU_FEATURES=AVX2 LD_DEBUG=libs python -c "import faiss" 2>&1 | grep faiss.so
LD_DEBUG=libs python -c "import faiss" 2>&1 | grep faiss_avx2.so
- run:
name: Python tests (CPU only)
command: |
conda install -y -q pytorch -c pytorch
pytest --junitxml=test-results/pytest/results.xml tests/test_*.py
pytest --junitxml=test-results/pytest/results-torch.xml tests/torch_*.py
- store_test_results:
path: test-results
@ -316,180 +80,7 @@ workflows:
version: 2
build:
jobs:
- format:
name: Format
- build_cmake:
name: Linux x86_64 (cmake)
exec: linux-x86_64-cpu
- build_cmake:
name: Linux x86_64 AVX2 (cmake)
exec: linux-x86_64-cpu
opt_level: "avx2"
- build_cmake:
name: Linux x86_64 AVX512 (cmake)
exec: linux-x86_64-cpu
opt_level: "avx512"
- build_cmake:
name: Linux x86_64 GPU (cmake)
exec: linux-x86_64-gpu
gpu: "ON"
requires:
- Linux x86_64 AVX2 (cmake)
- build_cmake:
name: Linux x86_64 GPU w/ RAFT (cmake)
exec: linux-x86_64-gpu
gpu: "ON"
raft: "ON"
requires:
- Linux x86_64 GPU (cmake)
- build_conda:
name: Linux x86_64 (conda)
exec: linux-x86_64-cpu
- build_conda:
name: Windows x86_64 (conda)
exec: windows-x86_64-cpu
- build_conda:
name: Linux arm64 (conda)
exec: linux-arm64-cpu
- build_conda:
name: Linux x86_64 packages
exec: linux-x86_64-cpu
label: main
filters:
tags:
only: /^v.*/
branches:
ignore: /.*/
- build_conda:
name: Linux x86_64 GPU packages (CUDA 11.4.4)
exec: linux-x86_64-gpu
label: main
cuda: "11.4.4"
cuda_archs: "60-real;61-real;62-real;70-real;72-real;75-real;80;86-real"
compiler_version: "11.2"
filters:
tags:
only: /^v.*/
branches:
ignore: /.*/
- build_conda:
name: Linux x86_64 GPU w/ RAFT packages (CUDA 11.8.0)
exec: linux-x86_64-gpu
label: main
raft: "ON"
cuda: "11.8.0"
cuda_archs: "70-real;72-real;75-real;80;86-real"
compiler_version: "11.2"
filters:
tags:
only: /^v.*/
branches:
ignore: /.*/
- build_conda:
name: Linux x86_64 GPU packages (CUDA 12.1.1)
exec: linux-x86_64-gpu
label: main
cuda: "12.1.1"
cuda_archs: "70-real;72-real;75-real;80;86-real"
compiler_version: "11.2"
filters:
tags:
only: /^v.*/
branches:
ignore: /.*/
- build_conda:
name: Linux x86_64 GPU w/ RAFT packages (CUDA 12.1.1)
exec: linux-x86_64-gpu
label: main
raft: "ON"
cuda: "12.1.1"
cuda_archs: "70-real;72-real;75-real;80;86-real"
compiler_version: "11.2"
filters:
tags:
only: /^v.*/
branches:
ignore: /.*/
- build_conda:
name: Windows x86_64 packages
exec: windows-x86_64-cpu
label: main
filters:
tags:
only: /^v.*/
branches:
ignore: /.*/
- build_conda:
name: OSX arm64 packages
exec: macosx-arm64-cpu
label: main
filters:
tags:
only: /^v.*/
branches:
ignore: /.*/
- build_conda:
name: Linux arm64 packages
exec: linux-arm64-cpu
label: main
filters:
tags:
only: /^v.*/
branches:
ignore: /.*/
nightly:
triggers:
- schedule:
cron: "0 0 * * *"
filters:
branches:
only:
- main
jobs:
- build_conda:
name: Linux x86_64 nightlies
exec: linux-x86_64-cpu
label: nightly
- build_conda:
name: Linux x86_64 GPU nightlies (CUDA 11.4.4)
exec: linux-x86_64-gpu
label: nightly
cuda: "11.4.4"
cuda_archs: "60-real;61-real;62-real;70-real;72-real;75-real;80;86-real"
compiler_version: "11.2"
- build_conda:
name: Linux x86_64 GPU w/ RAFT nightlies (CUDA 11.8.0)
exec: linux-x86_64-gpu
label: nightly
raft: "ON"
cuda: "11.8.0"
cuda_archs: "70-real;72-real;75-real;80;86-real"
compiler_version: "11.2"
- build_conda:
name: Linux x86_64 GPU nightlies (CUDA 12.1.1)
exec: linux-x86_64-gpu
label: nightly
cuda: "12.1.1"
cuda_archs: "70-real;72-real;75-real;80;86-real"
compiler_version: "11.2"
- build_conda:
name: Linux x86_64 GPU w/ RAFT nightlies (CUDA 12.1.1)
exec: linux-x86_64-gpu
label: nightly
raft: "ON"
cuda: "12.1.1"
cuda_archs: "70-real;72-real;75-real;80;86-real"
compiler_version: "11.2"
- build_conda:
name: Windows x86_64 nightlies
exec: windows-x86_64-cpu
label: nightly
- build_conda:
name: OSX arm64 nightlies
exec: macosx-arm64-cpu
label: nightly
- build_conda:
name: Linux arm64 nightlies
exec: linux-arm64-cpu
label: nightly