mirror of
https://github.com/huggingface/pytorch-image-models.git
synced 2025-06-03 15:01:08 +08:00
Merge pull request #2292 from huggingface/inference_tests
Do full inference test against test vectors for test_* models
This commit is contained in:
commit
14d55a7cf3
8
.github/workflows/tests.yml
vendored
8
.github/workflows/tests.yml
vendored
@ -16,11 +16,11 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python: ['3.10', '3.11']
|
||||
torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.1.0', vision: '0.16.0'}]
|
||||
python: ['3.10', '3.12']
|
||||
torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.4.1', vision: '0.19.1'}]
|
||||
testmarker: ['-k "not test_models"', '-m base', '-m cfg', '-m torchscript', '-m features', '-m fxforward', '-m fxbackward']
|
||||
exclude:
|
||||
- python: '3.11'
|
||||
- python: '3.12'
|
||||
torch: {base: '1.13.0', vision: '0.14.0'}
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
@ -46,7 +46,7 @@ jobs:
|
||||
sudo sed -i 's/azure\.//' /etc/apt/sources.list
|
||||
sudo apt update
|
||||
sudo apt install -y google-perftools
|
||||
pip install --no-cache-dir torch==${{ matrix.torch.base }}+cpu torchvision==${{ matrix.torch.vision }}+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
||||
pip install --no-cache-dir torch==${{ matrix.torch.base }}+cpu torchvision==${{ matrix.torch.vision }}+cpu --index-url https://download.pytorch.org/whl/cpu
|
||||
- name: Install requirements
|
||||
run: |
|
||||
pip install -r requirements.txt
|
||||
|
@ -26,7 +26,7 @@ except ImportError:
|
||||
has_fx_feature_extraction = False
|
||||
|
||||
import timm
|
||||
from timm import list_models, create_model, set_scriptable, get_pretrained_cfg_value
|
||||
from timm import list_models, list_pretrained, create_model, set_scriptable, get_pretrained_cfg_value
|
||||
from timm.layers import Format, get_spatial_dim, get_channel_dim
|
||||
from timm.models import get_notrace_modules, get_notrace_functions
|
||||
|
||||
@ -39,7 +39,8 @@ if torch_backend is not None:
|
||||
torch_device = os.environ.get('TORCH_DEVICE', 'cpu')
|
||||
timeout = os.environ.get('TIMEOUT')
|
||||
timeout120 = int(timeout) if timeout else 120
|
||||
timeout300 = int(timeout) if timeout else 300
|
||||
timeout240 = int(timeout) if timeout else 240
|
||||
timeout360 = int(timeout) if timeout else 360
|
||||
|
||||
if hasattr(torch._C, '_jit_set_profiling_executor'):
|
||||
# legacy executor is too slow to compile large models for unit tests
|
||||
@ -118,6 +119,50 @@ def _get_input_size(model=None, model_name='', target=None):
|
||||
return input_size
|
||||
|
||||
|
||||
@pytest.mark.base
|
||||
@pytest.mark.timeout(timeout240)
|
||||
@pytest.mark.parametrize('model_name', list_pretrained('test_*'))
|
||||
@pytest.mark.parametrize('batch_size', [1])
|
||||
def test_model_inference(model_name, batch_size):
|
||||
"""Run a single forward pass with each model"""
|
||||
from PIL import Image
|
||||
from huggingface_hub import snapshot_download
|
||||
import tempfile
|
||||
import safetensors
|
||||
|
||||
model = create_model(model_name, pretrained=True)
|
||||
model.eval()
|
||||
pp = timm.data.create_transform(**timm.data.resolve_data_config(model=model))
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
snapshot_download(
|
||||
repo_id='timm/' + model_name, repo_type='model', local_dir=temp_dir, allow_patterns='test/*'
|
||||
)
|
||||
rand_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'rand_tensors.safetensors'))
|
||||
owl_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'owl_tensors.safetensors'))
|
||||
test_owl = Image.open(os.path.join(temp_dir, 'test', 'test_owl.jpg'))
|
||||
|
||||
with torch.no_grad():
|
||||
rand_output = model(rand_tensors['input'])
|
||||
rand_features = model.forward_features(rand_tensors['input'])
|
||||
rand_pre_logits = model.forward_head(rand_features, pre_logits=True)
|
||||
assert torch.allclose(rand_output, rand_tensors['output'], rtol=1e-3, atol=1e-4)
|
||||
assert torch.allclose(rand_features, rand_tensors['features'], rtol=1e-3, atol=1e-4)
|
||||
assert torch.allclose(rand_pre_logits, rand_tensors['pre_logits'], rtol=1e-3, atol=1e-4)
|
||||
|
||||
def _test_owl(owl_input):
|
||||
owl_output = model(owl_input)
|
||||
owl_features = model.forward_features(owl_input)
|
||||
owl_pre_logits = model.forward_head(owl_features.clone(), pre_logits=True)
|
||||
assert owl_output.softmax(1).argmax(1) == 24 # owl
|
||||
assert torch.allclose(owl_output, owl_tensors['output'], rtol=1e-3, atol=1e-4)
|
||||
assert torch.allclose(owl_features, owl_tensors['features'], rtol=1e-3, atol=1e-4)
|
||||
assert torch.allclose(owl_pre_logits, owl_tensors['pre_logits'], rtol=1e-3, atol=1e-4)
|
||||
|
||||
_test_owl(owl_tensors['input']) # test with original pp owl tensor
|
||||
_test_owl(pp(test_owl).unsqueeze(0)) # re-process from original jpg
|
||||
|
||||
|
||||
@pytest.mark.base
|
||||
@pytest.mark.timeout(timeout120)
|
||||
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS))
|
||||
@ -182,7 +227,7 @@ EARLY_POOL_MODELS = (
|
||||
)
|
||||
|
||||
@pytest.mark.cfg
|
||||
@pytest.mark.timeout(timeout300)
|
||||
@pytest.mark.timeout(timeout360)
|
||||
@pytest.mark.parametrize('model_name', list_models(
|
||||
exclude_filters=EXCLUDE_FILTERS + NON_STD_FILTERS, include_tags=True))
|
||||
@pytest.mark.parametrize('batch_size', [1])
|
||||
@ -260,7 +305,7 @@ def test_model_default_cfgs(model_name, batch_size):
|
||||
|
||||
|
||||
@pytest.mark.cfg
|
||||
@pytest.mark.timeout(timeout300)
|
||||
@pytest.mark.timeout(timeout360)
|
||||
@pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS, exclude_filters=NON_STD_EXCLUDE_FILTERS, include_tags=True))
|
||||
@pytest.mark.parametrize('batch_size', [1])
|
||||
def test_model_default_cfgs_non_std(model_name, batch_size):
|
||||
|
Loading…
x
Reference in New Issue
Block a user