add github action (manually squash)

pull/2/head
yl-1993 2020-07-10 13:36:44 +08:00 committed by yl-1993
parent 4e6875d44e
commit 71e3b86ed3
8 changed files with 169 additions and 109 deletions

69
.github/workflows/build.yml vendored 100644
View File

@ -0,0 +1,69 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: build
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-latest
env:
CUDA: 10.1.105-1
CUDA_SHORT: 10.1
UBUNTU_VERSION: ubuntu1804
FORCE_CUDA: 1
CUDA_ARCH: ${{matrix.cuda_arch}}
strategy:
matrix:
python-version: [3.6, 3.7]
torch: [1.3.0, 1.5.0]
include:
- torch: 1.3.0
torchvision: 0.4.2
cuda_arch: "6.0"
- torch: 1.5.0
torchvision: 0.6.0
cuda_arch: "7.0"
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install Pillow
run: pip install Pillow==6.2.2
if: ${{matrix.torchvision < 0.5}}
- name: Install PyTorch
run: pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
- name: Install mmcls dependencies
run: |
pip install -r requirements.txt
- name: Lint with flake8
run: flake8 .
- name: Lint with isort
run: isort --recursive --check-only --diff mmcls/ tools/ configs/ tests/
- name: Format python codes with yapf
run: yapf -r -d mmcls/ tools/ configs/ tests/
- name: Build and install
run: |
rm -rf .eggs
python setup.py check -m -s
TORCH_CUDA_ARCH_LIST=${CUDA_ARCH} python setup.py build_ext --inplace
- name: Run unittests and generate coverage report
run: |
coverage run --branch --source mmcls -m pytest tests/
coverage xml
coverage report -m --omit="mmcls/utils/*","mmcls/apis/*"
# Only upload coverage report for python3.7 && pytorch1.5
- name: Upload coverage to Codecov
if: ${{matrix.torch == '1.5.0+cu101' && matrix.python-version == '3.7'}}
uses: codecov/codecov-action@v1.0.10
with:
file: ./coverage.xml
flags: unittests
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false

View File

@ -1,98 +0,0 @@
import os.path as osp
# run with 8 GPUs
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearHead',
num_classes=1000,
in_channels=2048,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
))
# dataset settings
dataset_type = 'ImageNet'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
memcached_root = '/mnt/lustre/share/memcached_client/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args=dict(
backend='memcached',
server_list_cfg=osp.join(memcached_root, 'server_list.conf'),
client_cfg=osp.join(memcached_root, 'client.conf'))),
dict(type='ToPIL'),
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip', p=0.5),
dict(type='ToNumpy'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_labels']),
dict(type='Collect', keys=['img', 'gt_labels'])
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args=dict(
backend='memcached',
server_list_cfg=osp.join(memcached_root, 'server_list.conf'),
client_cfg=osp.join(memcached_root, 'client.conf'))),
dict(type='ToPIL'),
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
dict(type='ToNumpy'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_labels']),
dict(type='Collect', keys=['img', 'gt_labels'])
]
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_prefix='data/imagenet/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_prefix='data/imagenet/val',
ann_file='data/imagenet/meta/val.txt',
pipeline=test_pipeline),
test=dict(
# replace `data/val` with `data/test` for standard test
type=dataset_type,
data_prefix='data/imagenet/val',
ann_file='data/imagenet/meta/val.txt',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[30, 60, 90])
# checkpoint saving
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 100
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/imagenet_resnet50_batch256'
load_from = None
resume_from = None
workflow = [('train', 1)]

View File

@ -1,2 +1,2 @@
mmcv
numpy
-r requirements/runtime.txt
-r requirements/tests.txt

View File

@ -0,0 +1 @@
numpy

View File

@ -0,0 +1,2 @@
mmcv
numpy

View File

@ -0,0 +1,8 @@
asynctest
codecov
flake8
interrogate
isort==4.3.21
pytest
xdoctest >= 0.10.0
yapf

View File

@ -79,11 +79,80 @@ def get_version():
return locals()['__version__']
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
@ -91,8 +160,12 @@ if __name__ == '__main__':
setup(
name='mmcls',
version=get_version(),
description='A template for pytorch projects.',
description='OpenMMLab Image Classification Toolbox and Benchmark',
long_description=readme(),
author='OpenMMLab',
author_email='yangleidev@gmail.com',
keywords='computer vision, image classification',
url='https://github.com/open-mmlab/mmclassification',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
package_data={'mmcls.ops': ['*/*.so']},
classifiers=[
@ -105,7 +178,12 @@ if __name__ == '__main__':
'Programming Language :: Python :: 3.7',
],
license='Apache License 2.0',
setup_requires=['pytest-runner', 'cython', 'numpy'],
tests_require=['pytest', 'xdoctest'],
install_requires=get_requirements(),
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
},
zip_safe=False)