mirror of
https://github.com/open-mmlab/mmclassification.git
synced 2025-06-03 21:53:55 +08:00
* [Feat] Migrate blip caption to mmpretrain. (#50) * Migrate blip caption to mmpretrain * minor fix * support train * [Feature] Support OFA caption task. (#51) * [Feature] Support OFA caption task. * Remove duplicated files. * [Feature] Support OFA vqa task. (#58) * [Feature] Support OFA vqa task. * Fix lint. * [Feat] Add BLIP retrieval to mmpretrain. (#55) * init * minor fix for train * fix according to comments * refactor * Update Blip retrieval. (#62) * [Feature] Support OFA visual grounding task. (#59) * [Feature] Support OFA visual grounding task. * minor add TODO --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * [Feat] Add flamingos coco caption and vqa. (#60) * first init * init flamingo coco * add vqa * minor fix * remove unnecessary modules * Update config * Use `ApplyToList`. --------- Co-authored-by: mzr1996 <mzr1996@163.com> * [Feature]: BLIP2 coco retrieval (#53) * [Feature]: Add blip2 retriever * [Feature]: Add blip2 all modules * [Feature]: Refine model * [Feature]: x1 * [Feature]: Runnable coco ret * [Feature]: Runnable version * [Feature]: Fix lint * [Fix]: Fix lint * [Feature]: Use 364 img size * [Feature]: Refactor blip2 * [Fix]: Fix lint * refactor files * minor fix * minor fix --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * Remove * fix blip caption inputs (#68) * [Feat] Add BLIP NLVR support. (#67) * first init * init flamingo coco * add vqa * add nlvr * refactor nlvr * minor fix * minor fix * Update dataset --------- Co-authored-by: mzr1996 <mzr1996@163.com> * [Feature]: BLIP2 Caption (#70) * [Feature]: Add language model * [Feature]: blip2 caption forward * [Feature]: Reproduce the results * [Feature]: Refactor caption * refine config --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * [Feat] Migrate BLIP VQA to mmpretrain (#69) * reformat * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * change * refactor code --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * Update RefCOCO dataset * [Fix] fix lint * [Feature] Implement inference APIs for multi-modal tasks. (#65) * [Feature] Implement inference APIs for multi-modal tasks. * [Project] Add gradio demo. * [Improve] Update requirements * Update flamingo * Update blip * Add NLVR inferencer * Update flamingo * Update hugging face model register * Update ofa vqa * Update BLIP-vqa (#71) * Update blip-vqa docstring (#72) * Refine flamingo docstring (#73) * [Feature]: BLIP2 VQA (#61) * [Feature]: VQA forward * [Feature]: Reproduce accuracy * [Fix]: Fix lint * [Fix]: Add blank line * minor fix --------- Co-authored-by: yingfhu <yingfhu@gmail.com> * [Feature]: BLIP2 docstring (#74) * [Feature]: Add caption docstring * [Feature]: Add docstring to blip2 vqa * [Feature]: Add docstring to retrieval * Update BLIP-2 metafile and README (#75) * [Feature]: Add readme and docstring * Update blip2 results --------- Co-authored-by: mzr1996 <mzr1996@163.com> * [Feature] BLIP Visual Grounding on MMPretrain Branch (#66) * blip grounding merge with mmpretrain * remove commit * blip grounding test and inference api * refcoco dataset * refcoco dataset refine config * rebasing * gitignore * rebasing * minor edit * minor edit * Update blip-vqa docstring (#72) * rebasing * Revert "minor edit" This reverts commit 639cec757c215e654625ed0979319e60f0be9044. * blip grounding final * precommit * refine config * refine config * Update blip visual grounding --------- Co-authored-by: Yiqin Wang 王逸钦 <wyq1217@outlook.com> Co-authored-by: mzr1996 <mzr1996@163.com> * Update visual grounding metric * Update OFA docstring, README and metafiles. (#76) * [Docs] Update installation docs and gradio demo docs. (#77) * Update OFA name * Update Visual Grounding Visualizer * Integrate accelerate support * Fix imports. * Fix timm backbone * Update imports * Update README * Update circle ci * Update flamingo config * Add gradio demo README * [Feature]: Add scienceqa (#1571) * [Feature]: Add scienceqa * [Feature]: Change param name * Update docs * Update video --------- Co-authored-by: Hubert <42952108+yingfhu@users.noreply.github.com> Co-authored-by: yingfhu <yingfhu@gmail.com> Co-authored-by: Yuan Liu <30762564+YuanLiuuuuuu@users.noreply.github.com> Co-authored-by: Yiqin Wang 王逸钦 <wyq1217@outlook.com> Co-authored-by: Rongjie Li <limo97@163.com>
251 lines
7.6 KiB
YAML
251 lines
7.6 KiB
YAML
# Use the latest 2.1 version of CircleCI pipeline process engine.
|
|
# See: https://circleci.com/docs/2.0/configuration-reference
|
|
version: 2.1
|
|
|
|
# the default pipeline parameters, which will be updated according to
|
|
# the results of the path-filtering orb
|
|
parameters:
|
|
lint_only:
|
|
type: boolean
|
|
default: true
|
|
|
|
# Define a job to be invoked later in a workflow.
|
|
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
|
|
jobs:
|
|
lint:
|
|
docker:
|
|
- image: cimg/python:3.7.4
|
|
# Add steps to the job
|
|
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
|
steps:
|
|
- checkout
|
|
- run:
|
|
name: Install pre-commit hook
|
|
command: |
|
|
pip install pre-commit
|
|
pre-commit install
|
|
- run:
|
|
name: Linting
|
|
command: pre-commit run --all-files
|
|
- run:
|
|
name: Check docstring coverage
|
|
command: |
|
|
pip install interrogate
|
|
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 60 mmpretrain
|
|
build_cpu_with_3rdparty:
|
|
parameters:
|
|
# The python version must match available image tags in
|
|
# https://circleci.com/developer/images/image/cimg/python
|
|
python:
|
|
type: string
|
|
torch:
|
|
type: string
|
|
torchvision:
|
|
type: string
|
|
docker:
|
|
- image: cimg/python:<< parameters.python >>
|
|
resource_class: large
|
|
steps:
|
|
- checkout
|
|
- run:
|
|
name: Install Libraries
|
|
command: |
|
|
sudo apt-get update
|
|
sudo apt-get install -y libjpeg8-dev zlib1g-dev
|
|
- run:
|
|
name: Configure Python & pip
|
|
command: |
|
|
pip install --upgrade pip
|
|
pip install wheel
|
|
- run:
|
|
name: Install PyTorch
|
|
command: |
|
|
python -V
|
|
pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
|
- run:
|
|
name: Install mmpretrain dependencies
|
|
command: |
|
|
pip install git+https://github.com/open-mmlab/mmengine.git@main
|
|
pip install -U openmim
|
|
mim install 'mmcv >= 2.0.0rc4'
|
|
pip install timm
|
|
pip install transformers
|
|
pip install -r requirements.txt
|
|
python -c 'import mmcv; print(mmcv.__version__)'
|
|
- run:
|
|
name: Build and install
|
|
command: |
|
|
pip install -e .
|
|
- run:
|
|
name: Run unittests
|
|
command: |
|
|
coverage run --branch --source mmpretrain -m pytest tests/
|
|
coverage xml
|
|
coverage report -m
|
|
|
|
build_cpu:
|
|
parameters:
|
|
# The python version must match available image tags in
|
|
# https://circleci.com/developer/images/image/cimg/python
|
|
python:
|
|
type: string
|
|
torch:
|
|
type: string
|
|
torchvision:
|
|
type: string
|
|
docker:
|
|
- image: cimg/python:<< parameters.python >>
|
|
resource_class: large
|
|
steps:
|
|
- checkout
|
|
- run:
|
|
name: Install Libraries
|
|
command: |
|
|
sudo apt-get update
|
|
sudo apt-get install -y libjpeg8-dev zlib1g-dev
|
|
- run:
|
|
name: Configure Python & pip
|
|
command: |
|
|
pip install --upgrade pip
|
|
pip install wheel
|
|
- run:
|
|
name: Install PyTorch
|
|
command: |
|
|
python -V
|
|
pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
|
- run:
|
|
name: Install mmpretrain dependencies
|
|
command: |
|
|
pip install git+https://github.com/open-mmlab/mmengine.git@main
|
|
pip install -U openmim
|
|
mim install 'mmcv >= 2.0.0rc4'
|
|
pip install -r requirements.txt
|
|
python -c 'import mmcv; print(mmcv.__version__)'
|
|
- run:
|
|
name: Build and install
|
|
command: |
|
|
pip install -e .
|
|
- run:
|
|
name: Run unittests
|
|
command: |
|
|
coverage run --branch --source mmpretrain -m pytest tests/
|
|
coverage xml
|
|
coverage report -m
|
|
|
|
build_cuda:
|
|
machine:
|
|
image: ubuntu-2004-cuda-11.4:202110-01
|
|
resource_class: gpu.nvidia.small
|
|
environment:
|
|
MKL_SERVICE_FORCE_INTEL: 1
|
|
parameters:
|
|
torch:
|
|
type: string
|
|
cuda:
|
|
type: enum
|
|
enum: ["11.1", "11.7"]
|
|
cudnn:
|
|
type: integer
|
|
default: 8
|
|
steps:
|
|
- checkout
|
|
- run:
|
|
# Cloning repos in VM since Docker doesn't have access to the private key
|
|
name: Clone Repos
|
|
command: |
|
|
git clone -b main --depth 1 https://github.com/open-mmlab/mmengine.git /home/circleci/mmengine
|
|
- run:
|
|
name: Build Docker image
|
|
command: |
|
|
docker build .circleci/docker -t mmpretrain:gpu --build-arg PYTORCH=<< parameters.torch >> --build-arg CUDA=<< parameters.cuda >> --build-arg CUDNN=<< parameters.cudnn >>
|
|
docker run --gpus all -t -d -v /home/circleci/project:/mmpretrain -v /home/circleci/mmengine:/mmengine -w /mmpretrain --name mmpretrain mmpretrain:gpu
|
|
- run:
|
|
name: Install mmpretrain dependencies
|
|
command: |
|
|
docker exec mmpretrain pip install -e /mmengine
|
|
docker exec mmpretrain pip install -U openmim
|
|
docker exec mmpretrain mim install 'mmcv >= 2.0.0rc4'
|
|
docker exec mmpretrain pip install -r requirements.txt
|
|
docker exec mmpretrain python -c 'import mmcv; print(mmcv.__version__)'
|
|
- run:
|
|
name: Build and install
|
|
command: |
|
|
docker exec mmpretrain pip install -e .
|
|
- run:
|
|
name: Run unittests
|
|
command: |
|
|
docker exec mmpretrain python -m pytest tests/
|
|
|
|
# Invoke jobs via workflows
|
|
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
|
|
workflows:
|
|
pr_stage_lint:
|
|
when: << pipeline.parameters.lint_only >>
|
|
jobs:
|
|
- lint:
|
|
name: lint
|
|
filters:
|
|
branches:
|
|
ignore:
|
|
- dev
|
|
- main
|
|
pr_stage_test:
|
|
when:
|
|
not:
|
|
<< pipeline.parameters.lint_only >>
|
|
jobs:
|
|
- lint:
|
|
name: lint
|
|
filters:
|
|
branches:
|
|
ignore:
|
|
- dev
|
|
- build_cpu:
|
|
name: minimum_version_cpu
|
|
torch: 1.8.0
|
|
torchvision: 0.9.0
|
|
python: 3.7.16
|
|
requires:
|
|
- lint
|
|
- build_cpu_with_3rdparty:
|
|
name: maximum_version_cpu
|
|
torch: 2.0.0
|
|
torchvision: 0.15.1
|
|
python: 3.10.0
|
|
requires:
|
|
- minimum_version_cpu
|
|
- hold:
|
|
type: approval
|
|
requires:
|
|
- maximum_version_cpu
|
|
- build_cuda:
|
|
name: mainstream_version_gpu
|
|
torch: 1.8.1
|
|
# Use double quotation mark to explicitly specify its type
|
|
# as string instead of number
|
|
cuda: "11.1"
|
|
requires:
|
|
- hold
|
|
- build_cuda:
|
|
name: maximum_version_gpu
|
|
torch: 2.0.0
|
|
cuda: "11.7"
|
|
cudnn: 8
|
|
requires:
|
|
- hold
|
|
merge_stage_test:
|
|
when:
|
|
not:
|
|
<< pipeline.parameters.lint_only >>
|
|
jobs:
|
|
- build_cuda:
|
|
name: minimum_version_gpu
|
|
torch: 1.8.0
|
|
# Use double quotation mark to explicitly specify its type
|
|
# as string instead of number
|
|
cuda: "11.1"
|
|
filters:
|
|
branches:
|
|
only:
|
|
- pretrain
|