create engine SDK
parent
81fa81ad74
commit
37f92b7cc3
|
@ -1,6 +1,6 @@
|
|||
|
||||
<p align="center">
|
||||
<img src="imgs/deep-person-reid-logo.png" alt="logo" width="260">
|
||||
<img src="docs/figures/deep-person-reid-logo.png" alt="logo" width="260">
|
||||
</p>
|
||||
|
||||
This project aims to provide an efficient framework for training and evaluating deep person re-identification models in [Pytorch](http://pytorch.org/).
|
||||
|
@ -164,7 +164,7 @@ Use `--eval-freq` to control the evaluation frequency and `--start-eval` to indi
|
|||
To visualize the ranked results, you can use `--visualize-ranks`, which works along with `--evaluate`. The ranked images will be saved in `save_dir/ranked_results/dataset_name` where `save_dir` is the directory you specify with `--save-dir`. This function is implemented in [torchreid/utils/reidtools.py](torchreid/utils/reidtools.py).
|
||||
|
||||
<p align="center">
|
||||
<img src="imgs/ranked_results.jpg" alt="ranked_results" width="600">
|
||||
<img src="docs/figures/ranked_results.jpg" alt="ranked_results" width="600">
|
||||
</p>
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,196 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
|
||||
|
||||
def init_parser():
|
||||
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
|
||||
# ************************************************************
|
||||
# Method
|
||||
# ************************************************************
|
||||
parser.add_argument('--application', type=str, default='image', choices=['image', 'video'],
|
||||
help='image-reid or video-reid')
|
||||
parser.add_argument('--method', type=str, default='softmax',
|
||||
help='methodology')
|
||||
|
||||
# ************************************************************
|
||||
# Datasets
|
||||
# ************************************************************
|
||||
parser.add_argument('--root', type=str, default='data',
|
||||
help='root path to data directory')
|
||||
parser.add_argument('-s', '--source-names', type=str, required=True, nargs='+',
|
||||
help='source datasets (delimited by space)')
|
||||
parser.add_argument('-t', '--target-names', type=str, required=True, nargs='+',
|
||||
help='target datasets (delimited by space)')
|
||||
parser.add_argument('-j', '--workers', type=int, default=4,
|
||||
help='number of data loading workers (tips: 4 or 8 times number of gpus)')
|
||||
parser.add_argument('--split-id', type=int, default=0,
|
||||
help='split index (note: 0-based)')
|
||||
parser.add_argument('--height', type=int, default=256,
|
||||
help='height of an image')
|
||||
parser.add_argument('--width', type=int, default=128,
|
||||
help='width of an image')
|
||||
parser.add_argument('--train-sampler', type=str, default='RandomSampler',
|
||||
help='sampler for trainloader')
|
||||
parser.add_argument('--combineall', action='store_true',
|
||||
help='combine all data in a dataset (train+query+gallery) for training')
|
||||
|
||||
# ************************************************************
|
||||
# Data augmentation
|
||||
# ************************************************************
|
||||
parser.add_argument('--random-erase', action='store_true',
|
||||
help='use random erasing for data augmentation')
|
||||
parser.add_argument('--color-jitter', action='store_true',
|
||||
help='randomly change the brightness, contrast and saturation')
|
||||
parser.add_argument('--color-aug', action='store_true',
|
||||
help='randomly alter the intensities of RGB channels')
|
||||
|
||||
# ************************************************************
|
||||
# Video datasets
|
||||
# ************************************************************
|
||||
parser.add_argument('--seq-len', type=int, default=15,
|
||||
help='number of images to sample in a tracklet')
|
||||
parser.add_argument('--sample-method', type=str, default='evenly',
|
||||
help='how to sample images from a tracklet')
|
||||
parser.add_argument('--pooling-method', type=str, default='avg', choices=['avg', 'max'],
|
||||
help='how to pool features over a tracklet (for video reid)')
|
||||
|
||||
# ************************************************************
|
||||
# Dataset-specific setting
|
||||
# ************************************************************
|
||||
parser.add_argument('--cuhk03-labeled', action='store_true',
|
||||
help='use labeled images, if false, use detected images')
|
||||
parser.add_argument('--cuhk03-classic-split', action='store_true',
|
||||
help='use classic split by Li et al. CVPR\'14')
|
||||
parser.add_argument('--use-metric-cuhk03', action='store_true',
|
||||
help='use cuhk03\'s metric for evaluation')
|
||||
|
||||
parser.add_argument('--market1501-500k', action='store_true',
|
||||
help='add 500k distractors to the gallery set for market1501')
|
||||
|
||||
# ************************************************************
|
||||
# Optimization options
|
||||
# ************************************************************
|
||||
parser.add_argument('--optim', type=str, default='adam',
|
||||
help='optimization algorithm (see optimizers.py)')
|
||||
parser.add_argument('--lr', type=float, default=0.0003,
|
||||
help='initial learning rate')
|
||||
parser.add_argument('--weight-decay', type=float, default=5e-04,
|
||||
help='weight decay')
|
||||
# sgd
|
||||
parser.add_argument('--momentum', type=float, default=0.9,
|
||||
help='momentum factor for sgd and rmsprop')
|
||||
parser.add_argument('--sgd-dampening', type=float, default=0,
|
||||
help='sgd\'s dampening for momentum')
|
||||
parser.add_argument('--sgd-nesterov', action='store_true',
|
||||
help='whether to enable sgd\'s Nesterov momentum')
|
||||
# rmsprop
|
||||
parser.add_argument('--rmsprop-alpha', type=float, default=0.99,
|
||||
help='rmsprop\'s smoothing constant')
|
||||
# adam/amsgrad
|
||||
parser.add_argument('--adam-beta1', type=float, default=0.9,
|
||||
help='exponential decay rate for adam\'s first moment')
|
||||
parser.add_argument('--adam-beta2', type=float, default=0.999,
|
||||
help='exponential decay rate for adam\'s second moment')
|
||||
|
||||
# ************************************************************
|
||||
# Training hyperparameters
|
||||
# ************************************************************
|
||||
parser.add_argument('--max-epoch', type=int, default=60,
|
||||
help='maximum epochs to run')
|
||||
parser.add_argument('--start-epoch', type=int, default=0,
|
||||
help='manual epoch number (useful when restart)')
|
||||
|
||||
parser.add_argument('--train-batch-size', type=int, default=32,
|
||||
help='training batch size')
|
||||
parser.add_argument('--test-batch-size', type=int, default=100,
|
||||
help='test batch size')
|
||||
|
||||
parser.add_argument('--always-fixbase', action='store_true',
|
||||
help='always fix base network and only train specified layers')
|
||||
parser.add_argument('--fixbase-epoch', type=int, default=0,
|
||||
help='how many epochs to fix base network (only train randomly initialized classifier)')
|
||||
parser.add_argument('--open-layers', type=str, nargs='+', default=['classifier'],
|
||||
help='open specified layers for training while keeping others frozen')
|
||||
|
||||
parser.add_argument('--staged-lr', action='store_true',
|
||||
help='set different lr to different layers')
|
||||
parser.add_argument('--new-layers', type=str, nargs='+', default=['classifier'],
|
||||
help='newly added layers with default lr')
|
||||
parser.add_argument('--base-lr-mult', type=float, default=0.1,
|
||||
help='learning rate multiplier for base layers')
|
||||
|
||||
# ************************************************************
|
||||
# Learning rate scheduler options
|
||||
# ************************************************************
|
||||
parser.add_argument('--lr-scheduler', type=str, default='multi_step',
|
||||
help='learning rate scheduler (see lr_schedulers.py)')
|
||||
parser.add_argument('--stepsize', type=int, default=[20, 40], nargs='+',
|
||||
help='stepsize to decay learning rate')
|
||||
parser.add_argument('--gamma', type=float, default=0.1,
|
||||
help='learning rate decay')
|
||||
|
||||
# ************************************************************
|
||||
# Cross entropy loss-specific setting
|
||||
# ************************************************************
|
||||
parser.add_argument('--label-smooth', action='store_true',
|
||||
help='use label smoothing regularizer in cross entropy loss')
|
||||
|
||||
# ************************************************************
|
||||
# Hard triplet loss-specific setting
|
||||
# ************************************************************
|
||||
parser.add_argument('--margin', type=float, default=0.3,
|
||||
help='margin for triplet loss')
|
||||
parser.add_argument('--num-instances', type=int, default=4,
|
||||
help='number of instances per identity')
|
||||
parser.add_argument('--weight-t', type=float, default=1,
|
||||
help='weight to balance hard triplet loss')
|
||||
parser.add_argument('--weight-x', type=float, default=1,
|
||||
help='weight to balance cross entropy loss')
|
||||
|
||||
# ************************************************************
|
||||
# Architecture
|
||||
# ************************************************************
|
||||
parser.add_argument('-a', '--arch', type=str, default='resnet50')
|
||||
parser.add_argument('--no-pretrained', action='store_true',
|
||||
help='do not load pretrained weights')
|
||||
|
||||
# ************************************************************
|
||||
# Test settings
|
||||
# ************************************************************
|
||||
parser.add_argument('--load-weights', type=str, default='',
|
||||
help='load pretrained weights but ignore layers that do not match in size')
|
||||
parser.add_argument('--evaluate', action='store_true',
|
||||
help='evaluate only')
|
||||
parser.add_argument('--eval-freq', type=int, default=-1,
|
||||
help='evaluation frequency (set to -1 to test only in the end)')
|
||||
parser.add_argument('--start-eval', type=int, default=0,
|
||||
help='start to evaluate after a specific epoch')
|
||||
parser.add_argument('--dist-metric', type=str, default='euclidean',
|
||||
help='distance metric')
|
||||
|
||||
# ************************************************************
|
||||
# Miscs
|
||||
# ************************************************************
|
||||
parser.add_argument('--print-freq', type=int, default=10,
|
||||
help='print frequency')
|
||||
parser.add_argument('--seed', type=int, default=1,
|
||||
help='manual seed')
|
||||
parser.add_argument('--resume', type=str, default='', metavar='PATH',
|
||||
help='resume from a checkpoint')
|
||||
parser.add_argument('--save-dir', type=str, default='log',
|
||||
help='path to save log and model weights')
|
||||
parser.add_argument('--use-cpu', action='store_true',
|
||||
help='use cpu')
|
||||
parser.add_argument('--gpu-devices', type=str, default='0',
|
||||
help='gpu device ids for CUDA_VISIBLE_DEVICES')
|
||||
parser.add_argument('--use-avai-gpus', action='store_true',
|
||||
help='use available gpus instead of specified devices (useful when using managed clusters)')
|
||||
parser.add_argument('--visrank', action='store_true',
|
||||
help='visualize ranked results, only available in evaluation mode')
|
||||
parser.add_argument('--visrank-topk', type=int, default=20,
|
||||
help='visualize topk ranks')
|
||||
|
||||
return parser
|
Before Width: | Height: | Size: 97 KiB After Width: | Height: | Size: 97 KiB |
Before Width: | Height: | Size: 92 KiB After Width: | Height: | Size: 92 KiB |
|
@ -0,0 +1,17 @@
|
|||
import torchreid
|
||||
|
||||
dataset = torchreid.data.ImageDatasetManager(
|
||||
root='data',
|
||||
source_names='grid',
|
||||
height=128,
|
||||
width=64
|
||||
)
|
||||
model = torchreid.models.build_model(
|
||||
name='squeezenet1_0',
|
||||
num_classes=dataset.num_train_pids,
|
||||
loss='softmax'
|
||||
)
|
||||
optimizer = torchreid.optim.build_optimizer(model)
|
||||
scheduler = torchreid.optim.build_lr_scheduler(optimizer, lr_scheduler='multi_step', stepsize=[10, 20])
|
||||
engine = torchreid.engine.ImageSoftmaxEngine(dataset, model, optimizer, scheduler=scheduler)
|
||||
engine.run(max_epoch=3, print_freq=1, fixbase_epoch=3, open_layers='classifier')
|
|
@ -1,7 +0,0 @@
|
|||
Cython
|
||||
h5py
|
||||
numpy
|
||||
Pillow
|
||||
scipy>=1.0.0
|
||||
torch>=0.4.1
|
||||
torchvision>=0.2.1
|
|
@ -0,0 +1,42 @@
|
|||
from setuptools import find_packages, setup
|
||||
|
||||
|
||||
def readme():
|
||||
with open('README.md') as f:
|
||||
content = f.read()
|
||||
return content
|
||||
|
||||
|
||||
def find_version():
|
||||
version_file = 'torchreid/__init__.py'
|
||||
with open(version_file, 'r') as f:
|
||||
exec(compile(f.read(), version_file, 'exec'))
|
||||
return locals()['__version__']
|
||||
|
||||
|
||||
setup(
|
||||
name='torchreid',
|
||||
version=find_version(),
|
||||
description='Pytorch framework for deep-learning person re-identification',
|
||||
author='Kaiyang Zhou',
|
||||
author_email='k.zhou.vision@gmail.com',
|
||||
license='MIT',
|
||||
long_description=readme(),
|
||||
url='https://github.com/KaiyangZhou/deep-person-reid',
|
||||
packages=find_packages(),
|
||||
install_requires=[
|
||||
'numpy',
|
||||
'Cython',
|
||||
'h5py',
|
||||
'Pillow',
|
||||
'six',
|
||||
'scipy>=1.0.0',
|
||||
'torch>=0.4.1',
|
||||
'torchvision>=0.2.1'
|
||||
],
|
||||
keywords=[
|
||||
'Person Re-Identification',
|
||||
'Deep Learning',
|
||||
'Computer Vision'
|
||||
]
|
||||
)
|
|
@ -1,11 +1,16 @@
|
|||
"""
|
||||
deep-person-reid
|
||||
==
|
||||
|
||||
Description: PyTorch implementation of deep person re-identification models.
|
||||
|
||||
Github page: https://github.com/KaiyangZhou/deep-person-reid
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
__version__ = '0.7.0'
|
||||
__author__ = 'Kaiyang Zhou'
|
||||
__homepage__ = 'https://kaiyangzhou.github.io/'
|
||||
__description__ = 'Pytorch framework for deep-learning person re-identification'
|
||||
|
||||
from torchreid import (
|
||||
engine,
|
||||
models,
|
||||
losses,
|
||||
metrics,
|
||||
data,
|
||||
optim,
|
||||
utils
|
||||
)
|
|
@ -1,306 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from .dataset_loader import ImageDataset, VideoDataset
|
||||
from .datasets import init_imgreid_dataset, init_vidreid_dataset
|
||||
from .transforms import build_transforms
|
||||
from .samplers import build_train_sampler
|
||||
|
||||
|
||||
class BaseDataManager(object):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
use_gpu,
|
||||
source_names,
|
||||
target_names,
|
||||
root='data',
|
||||
split_id=0,
|
||||
height=256,
|
||||
width=128,
|
||||
combineall=False, # combine all data in a dataset for training
|
||||
train_batch_size=32,
|
||||
test_batch_size=100,
|
||||
workers=4,
|
||||
train_sampler='',
|
||||
random_erase=False, # use random erasing for data augmentation
|
||||
color_jitter=False, # randomly change the brightness, contrast and saturation
|
||||
color_aug=False, # randomly alter the intensities of RGB channels
|
||||
num_instances=4, # number of instances per identity (for RandomIdentitySampler)
|
||||
**kwargs
|
||||
):
|
||||
self.use_gpu = use_gpu
|
||||
self.source_names = source_names
|
||||
self.target_names = target_names
|
||||
self.root = root
|
||||
self.split_id = split_id
|
||||
self.height = height
|
||||
self.width = width
|
||||
self.combineall = combineall
|
||||
self.train_batch_size = train_batch_size
|
||||
self.test_batch_size = test_batch_size
|
||||
self.workers = workers
|
||||
self.train_sampler = train_sampler
|
||||
self.random_erase = random_erase
|
||||
self.color_jitter = color_jitter
|
||||
self.color_aug = color_aug
|
||||
self.num_instances = num_instances
|
||||
|
||||
transform_train, transform_test = build_transforms(
|
||||
self.height, self.width,
|
||||
random_erase=self.random_erase,
|
||||
color_jitter=self.color_jitter,
|
||||
color_aug=self.color_aug
|
||||
)
|
||||
self.transform_train = transform_train
|
||||
self.transform_test = transform_test
|
||||
|
||||
@property
|
||||
def num_train_pids(self):
|
||||
return self._num_train_pids
|
||||
|
||||
@property
|
||||
def num_train_cams(self):
|
||||
return self._num_train_cams
|
||||
|
||||
def return_dataloaders(self):
|
||||
"""Return trainloader and testloader dictionary"""
|
||||
return self.trainloader, self.testloader_dict
|
||||
|
||||
def return_testdataset_by_name(self, name):
|
||||
"""Return query and gallery, each containing a list of (img_path, pid, camid)"""
|
||||
return self.testdataset_dict[name]['query'], self.testdataset_dict[name]['gallery']
|
||||
|
||||
|
||||
class ImageDataManager(BaseDataManager):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
use_gpu,
|
||||
source_names,
|
||||
target_names,
|
||||
cuhk03_labeled=False, # use cuhk03's labeled or detected images
|
||||
cuhk03_classic_split=False, # use cuhk03's classic split or 767/700 split
|
||||
market1501_500k=False, # add 500k distractors to the gallery set for market1501
|
||||
**kwargs
|
||||
):
|
||||
super(ImageDataManager, self).__init__(use_gpu, source_names, target_names, **kwargs)
|
||||
self.cuhk03_labeled = cuhk03_labeled
|
||||
self.cuhk03_classic_split = cuhk03_classic_split
|
||||
self.market1501_500k = market1501_500k
|
||||
|
||||
print('=> Initializing train (source) datasets')
|
||||
train = []
|
||||
self._num_train_pids = 0
|
||||
self._num_train_cams = 0
|
||||
|
||||
for name in self.source_names:
|
||||
dataset = init_imgreid_dataset(
|
||||
root=self.root,
|
||||
name=name,
|
||||
split_id=self.split_id,
|
||||
combineall=self.combineall,
|
||||
cuhk03_labeled=self.cuhk03_labeled,
|
||||
cuhk03_classic_split=self.cuhk03_classic_split,
|
||||
market1501_500k=self.market1501_500k
|
||||
)
|
||||
|
||||
for img_path, pid, camid in dataset.train:
|
||||
pid += self._num_train_pids
|
||||
camid += self._num_train_cams
|
||||
train.append((img_path, pid, camid))
|
||||
|
||||
self._num_train_pids += dataset.num_train_pids
|
||||
self._num_train_cams += dataset.num_train_cams
|
||||
|
||||
self.train_sampler = build_train_sampler(
|
||||
train,
|
||||
self.train_sampler,
|
||||
train_batch_size=self.train_batch_size,
|
||||
num_instances=self.num_instances,
|
||||
)
|
||||
|
||||
self.trainloader = DataLoader(
|
||||
ImageDataset(train, transform=self.transform_train),
|
||||
sampler=self.train_sampler,
|
||||
batch_size=self.train_batch_size,
|
||||
shuffle=False,
|
||||
num_workers=self.workers,
|
||||
pin_memory=self.use_gpu,
|
||||
drop_last=True
|
||||
)
|
||||
|
||||
print('=> Initializing test (target) datasets')
|
||||
self.testloader_dict = {name: {'query': None, 'gallery': None} for name in target_names}
|
||||
self.testdataset_dict = {name: {'query': None, 'gallery': None} for name in target_names}
|
||||
|
||||
for name in self.target_names:
|
||||
dataset = init_imgreid_dataset(
|
||||
root=self.root,
|
||||
name=name,
|
||||
split_id=self.split_id,
|
||||
combineall=self.combineall,
|
||||
cuhk03_labeled=self.cuhk03_labeled,
|
||||
cuhk03_classic_split=self.cuhk03_classic_split,
|
||||
market1501_500k=self.market1501_500k
|
||||
)
|
||||
|
||||
self.testloader_dict[name]['query'] = DataLoader(
|
||||
ImageDataset(dataset.query, transform=self.transform_test),
|
||||
batch_size=self.test_batch_size,
|
||||
shuffle=False,
|
||||
num_workers=self.workers,
|
||||
pin_memory=self.use_gpu,
|
||||
drop_last=False
|
||||
)
|
||||
|
||||
self.testloader_dict[name]['gallery'] = DataLoader(
|
||||
ImageDataset(dataset.gallery, transform=self.transform_test),
|
||||
batch_size=self.test_batch_size,
|
||||
shuffle=False,
|
||||
num_workers=self.workers,
|
||||
pin_memory=self.use_gpu,
|
||||
drop_last=False
|
||||
)
|
||||
|
||||
self.testdataset_dict[name]['query'] = dataset.query
|
||||
self.testdataset_dict[name]['gallery'] = dataset.gallery
|
||||
|
||||
print('\n')
|
||||
print(' **************** Summary ****************')
|
||||
print(' train names : {}'.format(self.source_names))
|
||||
print(' # train datasets : {}'.format(len(self.source_names)))
|
||||
print(' # train ids : {}'.format(self.num_train_pids))
|
||||
print(' # train images : {}'.format(len(train)))
|
||||
print(' # train cameras : {}'.format(self.num_train_cams))
|
||||
print(' test names : {}'.format(self.target_names))
|
||||
print(' *****************************************')
|
||||
print('\n')
|
||||
|
||||
|
||||
class VideoDataManager(BaseDataManager):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
use_gpu,
|
||||
source_names,
|
||||
target_names,
|
||||
seq_len=15,
|
||||
sample_method='evenly',
|
||||
image_training=True, # train the video-reid model with images rather than tracklets
|
||||
**kwargs
|
||||
):
|
||||
super(VideoDataManager, self).__init__(use_gpu, source_names, target_names, **kwargs)
|
||||
self.seq_len = seq_len
|
||||
self.sample_method = sample_method
|
||||
self.image_training = image_training
|
||||
|
||||
print('=> Initializing train (source) datasets')
|
||||
train = []
|
||||
self._num_train_pids = 0
|
||||
self._num_train_cams = 0
|
||||
|
||||
for name in self.source_names:
|
||||
dataset = init_vidreid_dataset(root=self.root, name=name, split_id=self.split_id, combineall=self.combineall)
|
||||
|
||||
for img_paths, pid, camid in dataset.train:
|
||||
pid += self._num_train_pids
|
||||
camid += self._num_train_cams
|
||||
if image_training:
|
||||
# decompose tracklets into images
|
||||
for img_path in img_paths:
|
||||
train.append((img_path, pid, camid))
|
||||
else:
|
||||
train.append((img_paths, pid, camid))
|
||||
|
||||
self._num_train_pids += dataset.num_train_pids
|
||||
self._num_train_cams += dataset.num_train_cams
|
||||
|
||||
self.train_sampler = build_train_sampler(
|
||||
train, self.train_sampler,
|
||||
train_batch_size=self.train_batch_size,
|
||||
num_instances=self.num_instances,
|
||||
)
|
||||
|
||||
if image_training:
|
||||
# each batch has image data of shape (batch, channel, height, width)
|
||||
self.trainloader = DataLoader(
|
||||
ImageDataset(train, transform=self.transform_train),
|
||||
sampler=self.train_sampler,
|
||||
batch_size=self.train_batch_size,
|
||||
shuffle=False,
|
||||
num_workers=self.workers,
|
||||
pin_memory=self.use_gpu,
|
||||
drop_last=True
|
||||
)
|
||||
|
||||
else:
|
||||
# each batch has image data of shape (batch, seq_len, channel, height, width)
|
||||
self.trainloader = DataLoader(
|
||||
VideoDataset(
|
||||
train,
|
||||
seq_len=self.seq_len,
|
||||
sample_method=self.sample_method,
|
||||
transform=self.transform_train
|
||||
),
|
||||
batch_size=self.train_batch_size,
|
||||
shuffle=True,
|
||||
num_workers=self.workers,
|
||||
pin_memory=self.use_gpu,
|
||||
drop_last=True
|
||||
)
|
||||
raise NotImplementedError('This requires a new trainer')
|
||||
|
||||
print('=> Initializing test (target) datasets')
|
||||
self.testloader_dict = {name: {'query': None, 'gallery': None} for name in target_names}
|
||||
self.testdataset_dict = {name: {'query': None, 'gallery': None} for name in target_names}
|
||||
|
||||
for name in self.target_names:
|
||||
dataset = init_vidreid_dataset(root=self.root, name=name, split_id=self.split_id, combineall=self.combineall,)
|
||||
|
||||
self.testloader_dict[name]['query'] = DataLoader(
|
||||
VideoDataset(
|
||||
dataset.query,
|
||||
seq_len=self.seq_len,
|
||||
sample_method=self.sample_method,
|
||||
transform=self.transform_test
|
||||
),
|
||||
batch_size=self.test_batch_size,
|
||||
shuffle=False,
|
||||
num_workers=self.workers,
|
||||
pin_memory=self.use_gpu,
|
||||
drop_last=False
|
||||
)
|
||||
|
||||
self.testloader_dict[name]['gallery'] = DataLoader(
|
||||
VideoDataset(
|
||||
dataset.gallery,
|
||||
seq_len=self.seq_len,
|
||||
sample_method=self.sample_method,
|
||||
transform=self.transform_test
|
||||
),
|
||||
batch_size=self.test_batch_size,
|
||||
shuffle=False,
|
||||
num_workers=self.workers,
|
||||
pin_memory=self.use_gpu,
|
||||
drop_last=False
|
||||
)
|
||||
|
||||
self.testdataset_dict[name]['query'] = dataset.query
|
||||
self.testdataset_dict[name]['gallery'] = dataset.gallery
|
||||
|
||||
print('\n')
|
||||
print(' **************** Summary ****************')
|
||||
print(' train names : {}'.format(self.source_names))
|
||||
print(' # train datasets : {}'.format(len(self.source_names)))
|
||||
print(' # train ids : {}'.format(self.num_train_pids))
|
||||
if self.image_training:
|
||||
print(' # train images : {}'.format(len(train)))
|
||||
else:
|
||||
print(' # train tracklets: {}'.format(len(train)))
|
||||
print(' # train cameras : {}'.format(self.num_train_cams))
|
||||
print(' test names : {}'.format(self.target_names))
|
||||
print(' *****************************************')
|
||||
print('\n')
|
|
@ -1,115 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
import os
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
import os.path as osp
|
||||
import io
|
||||
|
||||
import torch
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
|
||||
def read_image(img_path):
|
||||
"""Keep reading image until succeed.
|
||||
This can avoid IOError incurred by heavy IO process."""
|
||||
got_img = False
|
||||
if not osp.exists(img_path):
|
||||
raise IOError('{} does not exist'.format(img_path))
|
||||
while not got_img:
|
||||
try:
|
||||
img = Image.open(img_path).convert('RGB')
|
||||
got_img = True
|
||||
except IOError:
|
||||
print('IOError incurred when reading "{}". Will redo. Don\'t worry. Just chill.'.format(img_path))
|
||||
pass
|
||||
return img
|
||||
|
||||
|
||||
class ImageDataset(Dataset):
|
||||
"""Image Person ReID Dataset"""
|
||||
def __init__(self, dataset, transform=None):
|
||||
self.dataset = dataset
|
||||
self.transform = transform
|
||||
|
||||
def __len__(self):
|
||||
return len(self.dataset)
|
||||
|
||||
def __getitem__(self, index):
|
||||
img_path, pid, camid = self.dataset[index]
|
||||
img = read_image(img_path)
|
||||
|
||||
if self.transform is not None:
|
||||
img = self.transform(img)
|
||||
|
||||
return img, pid, camid, img_path
|
||||
|
||||
|
||||
class VideoDataset(Dataset):
|
||||
"""Video Person ReID Dataset.
|
||||
Note batch data has shape (batch, seq_len, channel, height, width).
|
||||
"""
|
||||
_sample_methods = ['evenly', 'random', 'all']
|
||||
|
||||
def __init__(self, dataset, seq_len=15, sample_method='evenly', transform=None):
|
||||
self.dataset = dataset
|
||||
self.seq_len = seq_len
|
||||
self.sample_method = sample_method
|
||||
self.transform = transform
|
||||
|
||||
def __len__(self):
|
||||
return len(self.dataset)
|
||||
|
||||
def __getitem__(self, index):
|
||||
img_paths, pid, camid = self.dataset[index]
|
||||
num = len(img_paths)
|
||||
|
||||
if self.sample_method == 'random':
|
||||
"""
|
||||
Randomly sample seq_len items from num items,
|
||||
if num is smaller than seq_len, then replicate items
|
||||
"""
|
||||
indices = np.arange(num)
|
||||
replace = False if num >= self.seq_len else True
|
||||
indices = np.random.choice(indices, size=self.seq_len, replace=replace)
|
||||
# sort indices to keep temporal order (comment it to be order-agnostic)
|
||||
indices = np.sort(indices)
|
||||
|
||||
elif self.sample_method == 'evenly':
|
||||
"""
|
||||
Evenly sample seq_len items from num items.
|
||||
"""
|
||||
if num >= self.seq_len:
|
||||
num -= num % self.seq_len
|
||||
indices = np.arange(0, num, num/self.seq_len)
|
||||
else:
|
||||
# if num is smaller than seq_len, simply replicate the last image
|
||||
# until the seq_len requirement is satisfied
|
||||
indices = np.arange(0, num)
|
||||
num_pads = self.seq_len - num
|
||||
indices = np.concatenate([indices, np.ones(num_pads).astype(np.int32)*(num-1)])
|
||||
assert len(indices) == self.seq_len
|
||||
|
||||
elif self.sample_method == 'all':
|
||||
"""
|
||||
Sample all items, seq_len is useless now and batch_size needs
|
||||
to be set to 1.
|
||||
"""
|
||||
indices = np.arange(num)
|
||||
|
||||
else:
|
||||
raise ValueError('Unknown sample method: {}. Expected one of {}'.format(self.sample_method, self._sample_methods))
|
||||
|
||||
imgs = []
|
||||
for index in indices:
|
||||
img_path = img_paths[int(index)]
|
||||
img = read_image(img_path)
|
||||
if self.transform is not None:
|
||||
img = self.transform(img)
|
||||
img = img.unsqueeze(0)
|
||||
imgs.append(img)
|
||||
imgs = torch.cat(imgs, dim=0)
|
||||
|
||||
return imgs, pid, camid
|
|
@ -1,57 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from .market1501 import Market1501
|
||||
from .cuhk03 import CUHK03
|
||||
from .dukemtmcreid import DukeMTMCreID
|
||||
from .msmt17 import MSMT17
|
||||
from .viper import VIPeR
|
||||
from .grid import GRID
|
||||
from .cuhk01 import CUHK01
|
||||
from .prid450s import PRID450S
|
||||
from .ilids import iLIDS
|
||||
from .sensereid import SenseReID
|
||||
from .prid import PRID
|
||||
|
||||
from .mars import Mars
|
||||
from .ilidsvid import iLIDSVID
|
||||
from .prid2011 import PRID2011
|
||||
from .dukemtmcvidreid import DukeMTMCVidReID
|
||||
|
||||
|
||||
__imgreid_factory = {
|
||||
'market1501': Market1501,
|
||||
'cuhk03': CUHK03,
|
||||
'dukemtmcreid': DukeMTMCreID,
|
||||
'msmt17': MSMT17,
|
||||
'viper': VIPeR,
|
||||
'grid': GRID,
|
||||
'cuhk01': CUHK01,
|
||||
'prid450s': PRID450S,
|
||||
'ilids': iLIDS,
|
||||
'sensereid': SenseReID,
|
||||
'prid': PRID
|
||||
}
|
||||
|
||||
|
||||
__vidreid_factory = {
|
||||
'mars': Mars,
|
||||
'ilidsvid': iLIDSVID,
|
||||
'prid2011': PRID2011,
|
||||
'dukemtmcvidreid': DukeMTMCVidReID
|
||||
}
|
||||
|
||||
|
||||
def init_imgreid_dataset(name, **kwargs):
|
||||
avai_datasets = list(__imgreid_factory.keys())
|
||||
if name not in avai_datasets:
|
||||
raise KeyError('Invalid dataset name. Received "{}", but expected to be one of {}'.format(name, avai_datasets))
|
||||
return __imgreid_factory[name](**kwargs)
|
||||
|
||||
|
||||
def init_vidreid_dataset(name, **kwargs):
|
||||
avai_datasets = list(__vidreid_factory.keys())
|
||||
if name not in avai_datasets:
|
||||
raise KeyError('Invalid dataset name. Received "{}", but expected to be one of {}'.format(name, avai_datasets))
|
||||
return __vidreid_factory[name](**kwargs)
|
|
@ -1,192 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import os.path as osp
|
||||
import numpy as np
|
||||
import copy
|
||||
|
||||
|
||||
class BaseDataset(object):
|
||||
"""Base class of reid dataset"""
|
||||
|
||||
def __init__(self, root):
|
||||
self.root = osp.expanduser(root)
|
||||
|
||||
def check_before_run(self, required_files):
|
||||
"""Check if required files exist before going deeper"""
|
||||
for f in required_files:
|
||||
if not osp.exists(f):
|
||||
raise RuntimeError('"{}" is not found'.format(f))
|
||||
|
||||
def extract_data_info(self, data):
|
||||
"""Extract info from data list
|
||||
|
||||
Args:
|
||||
data (list): contains a list of (img_path, pid, camid)
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_num_pids(self, data):
|
||||
return self.extract_data_info(data)[0]
|
||||
|
||||
def get_num_cams(self, data):
|
||||
return self.extract_data_info(data)[2]
|
||||
|
||||
def init_attributes(self, train, query, gallery, combineall=False, **kwargs):
|
||||
"""Initialize class attributes
|
||||
|
||||
Args:
|
||||
train (list): contains a list of (img_path, pid, camid)
|
||||
query (list): contains a list of (img_path, pid, camid)
|
||||
gallery (list): contains a list of (img_path, pid, camid)
|
||||
combineall (bool): if set to True, combine all data for training, default is False
|
||||
|
||||
Notes:
|
||||
This method has to be called (at the end) in each dataset class.
|
||||
"""
|
||||
self._train = train
|
||||
self._query = query
|
||||
self._gallery = gallery
|
||||
self._num_train_pids = self.get_num_pids(train)
|
||||
self._num_train_cams = self.get_num_cams(train)
|
||||
|
||||
if combineall:
|
||||
self._train = self.combine_all(train, query, gallery)
|
||||
self._num_train_pids = self.get_num_pids(self.train)
|
||||
|
||||
def combine_all(self, train, query, gallery):
|
||||
"""Combine all data for training
|
||||
|
||||
Notes:
|
||||
1. In general, we assume that all query identities appear in gallery set.
|
||||
2. All pids in train have been relabeled (starts from 0)
|
||||
3. pid=0 (background) and pid=-1 (junk) are discarded.
|
||||
4. Camera views remain the same across train, query and gallery.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def train(self):
|
||||
# train list containing (img_path, pid, camid)
|
||||
return self._train
|
||||
|
||||
@property
|
||||
def query(self):
|
||||
# query list containing (img_path, pid, camid)
|
||||
return self._query
|
||||
|
||||
@property
|
||||
def gallery(self):
|
||||
# gallery list containing (img_path, pid, camid)
|
||||
return self._gallery
|
||||
|
||||
@property
|
||||
def num_train_pids(self):
|
||||
# number of train identities
|
||||
return self._num_train_pids
|
||||
|
||||
@property
|
||||
def num_train_cams(self):
|
||||
# number of train camera views
|
||||
return self._num_train_cams
|
||||
|
||||
def print_dataset_statistics(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BaseImageDataset(BaseDataset):
|
||||
"""Base class of image-reid dataset"""
|
||||
|
||||
def extract_data_info(self, data):
|
||||
pids = set()
|
||||
cams = set()
|
||||
for _, pid, camid in data:
|
||||
pids.add(pid)
|
||||
cams.add(camid)
|
||||
num_pids = len(pids)
|
||||
num_cams = len(cams)
|
||||
num_imgs = len(data)
|
||||
return num_pids, num_imgs, num_cams
|
||||
|
||||
def combine_all(self, train, query, gallery):
|
||||
combined = copy.deepcopy(train)
|
||||
|
||||
# relabel pids in gallery
|
||||
g_pids = set()
|
||||
for _, pid, _ in gallery:
|
||||
if pid==0 or pid==-1:
|
||||
continue
|
||||
g_pids.add(pid)
|
||||
pid2label = {pid: i for i, pid in enumerate(g_pids)}
|
||||
|
||||
def _combine_data(data):
|
||||
for img_path, pid, camid in data:
|
||||
if pid==0 or pid==-1:
|
||||
continue
|
||||
pid = pid2label[pid] + self.num_train_pids
|
||||
combined.append((img_path, pid, camid))
|
||||
|
||||
_combine_data(query)
|
||||
_combine_data(gallery)
|
||||
|
||||
return combined
|
||||
|
||||
def print_dataset_statistics(self, train, query, gallery):
|
||||
num_train_pids, num_train_imgs, num_train_cams = self.extract_data_info(train)
|
||||
num_query_pids, num_query_imgs, num_query_cams = self.extract_data_info(query)
|
||||
num_gallery_pids, num_gallery_imgs, num_gallery_cams = self.extract_data_info(gallery)
|
||||
|
||||
print('=> Loaded {}'.format(self.__class__.__name__))
|
||||
print(' ----------------------------------------')
|
||||
print(' subset | # ids | # images | # cameras')
|
||||
print(' ----------------------------------------')
|
||||
print(' train | {:5d} | {:8d} | {:9d}'.format(num_train_pids, num_train_imgs, num_train_cams))
|
||||
print(' query | {:5d} | {:8d} | {:9d}'.format(num_query_pids, num_query_imgs, num_query_cams))
|
||||
print(' gallery | {:5d} | {:8d} | {:9d}'.format(num_gallery_pids, num_gallery_imgs, num_gallery_cams))
|
||||
print(' ----------------------------------------')
|
||||
|
||||
|
||||
class BaseVideoDataset(BaseDataset):
|
||||
"""Base class of video-reid dataset"""
|
||||
|
||||
def extract_data_info(self, data, return_tracklet_stats=False):
|
||||
pids = set()
|
||||
cams = set()
|
||||
tracklet_stats = []
|
||||
for img_paths, pid, camid in data:
|
||||
pids.add(pid)
|
||||
cams.add(camid)
|
||||
tracklet_stats += [len(img_paths)]
|
||||
num_pids = len(pids)
|
||||
num_cams = len(cams)
|
||||
num_tracklets = len(data)
|
||||
if return_tracklet_stats:
|
||||
return num_pids, num_tracklets, num_cams, tracklet_stats
|
||||
return num_pids, num_tracklets, num_cams
|
||||
|
||||
def print_dataset_statistics(self, train, query, gallery):
|
||||
num_train_pids, num_train_tracklets, num_train_cams, train_tracklet_stats = \
|
||||
self.extract_data_info(train, return_tracklet_stats=True)
|
||||
|
||||
num_query_pids, num_query_tracklets, num_query_cams, query_tracklet_stats = \
|
||||
self.extract_data_info(query, return_tracklet_stats=True)
|
||||
|
||||
num_gallery_pids, num_gallery_tracklets, num_gallery_cams, gallery_tracklet_stats = \
|
||||
self.extract_data_info(gallery, return_tracklet_stats=True)
|
||||
|
||||
tracklet_stats = train_tracklet_stats + query_tracklet_stats + gallery_tracklet_stats
|
||||
min_num = np.min(tracklet_stats)
|
||||
max_num = np.max(tracklet_stats)
|
||||
avg_num = np.mean(tracklet_stats)
|
||||
|
||||
print('=> Loaded {}'.format(self.__class__.__name__))
|
||||
print(' -------------------------------------------')
|
||||
print(' subset | # ids | # tracklets | # cameras')
|
||||
print(' -------------------------------------------')
|
||||
print(' train | {:5d} | {:11d} | {:9d}'.format(num_train_pids, num_train_tracklets, num_train_cams))
|
||||
print(' query | {:5d} | {:11d} | {:9d}'.format(num_query_pids, num_query_tracklets, num_query_cams))
|
||||
print(' gallery | {:5d} | {:11d} | {:9d}'.format(num_gallery_pids, num_gallery_tracklets, num_gallery_cams))
|
||||
print(' -------------------------------------------')
|
||||
print(' number of images per tracklet: {} ~ {}, average {:.2f}'.format(min_num, max_num, avg_num))
|
||||
print(' -------------------------------------------')
|
|
@ -1,141 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class CUHK01(BaseImageDataset):
|
||||
"""CUHK01
|
||||
|
||||
Reference:
|
||||
Li et al. Human Reidentification with Transferred Metric Learning. ACCV 2012.
|
||||
|
||||
URL: http://www.ee.cuhk.edu.hk/~xgwang/CUHK_identification.html
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 971
|
||||
# images: 3884
|
||||
# cameras: 4
|
||||
"""
|
||||
dataset_dir = 'cuhk01'
|
||||
|
||||
def __init__(self, root='data', split_id=0, verbose=True, **kwargs):
|
||||
super(CUHK01, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.zip_path = osp.join(self.dataset_dir, 'CUHK01.zip')
|
||||
self.campus_dir = osp.join(self.dataset_dir, 'campus')
|
||||
self.split_path = osp.join(self.dataset_dir, 'splits.json')
|
||||
|
||||
self.extract_file()
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.campus_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
self.prepare_split()
|
||||
splits = read_json(self.split_path)
|
||||
if split_id >= len(splits):
|
||||
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
|
||||
split = splits[split_id]
|
||||
|
||||
train = split['train']
|
||||
query = split['query']
|
||||
gallery = split['gallery']
|
||||
|
||||
train = [tuple(item) for item in train]
|
||||
query = [tuple(item) for item in query]
|
||||
gallery = [tuple(item) for item in gallery]
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def extract_file(self):
|
||||
if not osp.exists(self.campus_dir):
|
||||
print('Extracting files')
|
||||
zip_ref = zipfile.ZipFile(self.zip_path, 'r')
|
||||
zip_ref.extractall(self.dataset_dir)
|
||||
zip_ref.close()
|
||||
|
||||
def prepare_split(self):
|
||||
"""
|
||||
Image name format: 0001001.png, where first four digits represent identity
|
||||
and last four digits represent cameras. Camera 1&2 are considered the same
|
||||
view and camera 3&4 are considered the same view.
|
||||
"""
|
||||
if not osp.exists(self.split_path):
|
||||
print('Creating 10 random splits of train ids and test ids')
|
||||
img_paths = sorted(glob.glob(osp.join(self.campus_dir, '*.png')))
|
||||
img_list = []
|
||||
pid_container = set()
|
||||
for img_path in img_paths:
|
||||
img_name = osp.basename(img_path)
|
||||
pid = int(img_name[:4]) - 1
|
||||
camid = (int(img_name[4:7]) - 1) // 2 # result is either 0 or 1
|
||||
img_list.append((img_path, pid, camid))
|
||||
pid_container.add(pid)
|
||||
|
||||
num_pids = len(pid_container)
|
||||
num_train_pids = num_pids // 2
|
||||
|
||||
splits = []
|
||||
for _ in range(10):
|
||||
order = np.arange(num_pids)
|
||||
np.random.shuffle(order)
|
||||
train_idxs = order[:num_train_pids]
|
||||
train_idxs = np.sort(train_idxs)
|
||||
idx2label = {idx: label for label, idx in enumerate(train_idxs)}
|
||||
|
||||
train, test_a, test_b = [], [], []
|
||||
for img_path, pid, camid in img_list:
|
||||
if pid in train_idxs:
|
||||
train.append((img_path, idx2label[pid], camid))
|
||||
else:
|
||||
if camid == 0:
|
||||
test_a.append((img_path, pid, camid))
|
||||
else:
|
||||
test_b.append((img_path, pid, camid))
|
||||
|
||||
# use cameraA as query and cameraB as gallery
|
||||
split = {
|
||||
'train': train,
|
||||
'query': test_a,
|
||||
'gallery': test_b,
|
||||
'num_train_pids': num_train_pids,
|
||||
'num_query_pids': num_pids - num_train_pids,
|
||||
'num_gallery_pids': num_pids - num_train_pids
|
||||
}
|
||||
splits.append(split)
|
||||
|
||||
# use cameraB as query and cameraA as gallery
|
||||
split = {
|
||||
'train': train,
|
||||
'query': test_b,
|
||||
'gallery': test_a,
|
||||
'num_train_pids': num_train_pids,
|
||||
'num_query_pids': num_pids - num_train_pids,
|
||||
'num_gallery_pids': num_pids - num_train_pids
|
||||
}
|
||||
splits.append(split)
|
||||
|
||||
print('Totally {} splits are created'.format(len(splits)))
|
||||
write_json(splits, self.split_path)
|
||||
print('Split file saved to {}'.format(self.split_path))
|
|
@ -1,258 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class CUHK03(BaseImageDataset):
|
||||
"""CUHK03
|
||||
|
||||
Reference:
|
||||
Li et al. DeepReID: Deep Filter Pairing Neural Network for Person Re-identification. CVPR 2014.
|
||||
|
||||
URL: http://www.ee.cuhk.edu.hk/~xgwang/CUHK_identification.html#!
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 1360
|
||||
# images: 13164
|
||||
# cameras: 6
|
||||
# splits: 20 (classic)
|
||||
|
||||
Args:
|
||||
split_id (int): split index (default: 0)
|
||||
cuhk03_labeled (bool): whether to load labeled images; if false, detected images are loaded (default: False)
|
||||
"""
|
||||
dataset_dir = 'cuhk03'
|
||||
|
||||
def __init__(self, root='data', split_id=0, cuhk03_labeled=False, cuhk03_classic_split=False, verbose=True, **kwargs):
|
||||
super(CUHK03, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.data_dir = osp.join(self.dataset_dir, 'cuhk03_release')
|
||||
self.raw_mat_path = osp.join(self.data_dir, 'cuhk-03.mat')
|
||||
|
||||
self.imgs_detected_dir = osp.join(self.dataset_dir, 'images_detected')
|
||||
self.imgs_labeled_dir = osp.join(self.dataset_dir, 'images_labeled')
|
||||
|
||||
self.split_classic_det_json_path = osp.join(self.dataset_dir, 'splits_classic_detected.json')
|
||||
self.split_classic_lab_json_path = osp.join(self.dataset_dir, 'splits_classic_labeled.json')
|
||||
|
||||
self.split_new_det_json_path = osp.join(self.dataset_dir, 'splits_new_detected.json')
|
||||
self.split_new_lab_json_path = osp.join(self.dataset_dir, 'splits_new_labeled.json')
|
||||
|
||||
self.split_new_det_mat_path = osp.join(self.dataset_dir, 'cuhk03_new_protocol_config_detected.mat')
|
||||
self.split_new_lab_mat_path = osp.join(self.dataset_dir, 'cuhk03_new_protocol_config_labeled.mat')
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.data_dir,
|
||||
self.raw_mat_path,
|
||||
self.split_new_det_mat_path,
|
||||
self.split_new_lab_mat_path
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
self.preprocess_split()
|
||||
|
||||
if cuhk03_labeled:
|
||||
split_path = self.split_classic_lab_json_path if cuhk03_classic_split else self.split_new_lab_json_path
|
||||
else:
|
||||
split_path = self.split_classic_det_json_path if cuhk03_classic_split else self.split_new_det_json_path
|
||||
|
||||
splits = read_json(split_path)
|
||||
assert split_id < len(splits), 'Condition split_id ({}) < len(splits) ({}) is false'.format(split_id, len(splits))
|
||||
split = splits[split_id]
|
||||
|
||||
train = split['train']
|
||||
query = split['query']
|
||||
gallery = split['gallery']
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def preprocess_split(self):
|
||||
"""
|
||||
This function is a bit complex and ugly, what it does is
|
||||
1. extract data from cuhk-03.mat and save as png images
|
||||
2. create 20 classic splits (Li et al. CVPR'14)
|
||||
3. create new split (Zhong et al. CVPR'17)
|
||||
"""
|
||||
if osp.exists(self.imgs_labeled_dir) \
|
||||
and osp.exists(self.imgs_detected_dir) \
|
||||
and osp.exists(self.split_classic_det_json_path) \
|
||||
and osp.exists(self.split_classic_lab_json_path) \
|
||||
and osp.exists(self.split_new_det_json_path) \
|
||||
and osp.exists(self.split_new_lab_json_path):
|
||||
return
|
||||
|
||||
mkdir_if_missing(self.imgs_detected_dir)
|
||||
mkdir_if_missing(self.imgs_labeled_dir)
|
||||
|
||||
print('Extract image data from "{}" and save as png'.format(self.raw_mat_path))
|
||||
mat = h5py.File(self.raw_mat_path, 'r')
|
||||
|
||||
def _deref(ref):
|
||||
return mat[ref][:].T
|
||||
|
||||
def _process_images(img_refs, campid, pid, save_dir):
|
||||
img_paths = [] # Note: some persons only have images for one view
|
||||
for imgid, img_ref in enumerate(img_refs):
|
||||
img = _deref(img_ref)
|
||||
if img.size==0 or img.ndim<3:
|
||||
continue # skip empty cell
|
||||
# images are saved with the following format, index-1 (ensure uniqueness)
|
||||
# campid: index of camera pair (1-5)
|
||||
# pid: index of person in 'campid'-th camera pair
|
||||
# viewid: index of view, {1, 2}
|
||||
# imgid: index of image, (1-10)
|
||||
viewid = 1 if imgid<5 else 2
|
||||
img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)
|
||||
img_path = osp.join(save_dir, img_name)
|
||||
if not osp.isfile(img_path):
|
||||
imsave(img_path, img)
|
||||
img_paths.append(img_path)
|
||||
return img_paths
|
||||
|
||||
def _extract_img(image_type):
|
||||
print('Processing {} images ...'.format(image_type))
|
||||
meta_data = []
|
||||
imgs_dir = self.imgs_detected_dir if image_type=='detected' else self.imgs_labeled_dir
|
||||
for campid, camp_ref in enumerate(mat[image_type][0]):
|
||||
camp = _deref(camp_ref)
|
||||
num_pids = camp.shape[0]
|
||||
for pid in range(num_pids):
|
||||
img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)
|
||||
assert len(img_paths) > 0, 'campid{}-pid{} has no images'.format(campid, pid)
|
||||
meta_data.append((campid+1, pid+1, img_paths))
|
||||
print('- done camera pair {} with {} identities'.format(campid+1, num_pids))
|
||||
return meta_data
|
||||
|
||||
meta_detected = _extract_img('detected')
|
||||
meta_labeled = _extract_img('labeled')
|
||||
|
||||
def _extract_classic_split(meta_data, test_split):
|
||||
train, test = [], []
|
||||
num_train_pids, num_test_pids = 0, 0
|
||||
num_train_imgs, num_test_imgs = 0, 0
|
||||
for i, (campid, pid, img_paths) in enumerate(meta_data):
|
||||
|
||||
if [campid, pid] in test_split:
|
||||
for img_path in img_paths:
|
||||
camid = int(osp.basename(img_path).split('_')[2]) - 1 # make it 0-based
|
||||
test.append((img_path, num_test_pids, camid))
|
||||
num_test_pids += 1
|
||||
num_test_imgs += len(img_paths)
|
||||
else:
|
||||
for img_path in img_paths:
|
||||
camid = int(osp.basename(img_path).split('_')[2]) - 1 # make it 0-based
|
||||
train.append((img_path, num_train_pids, camid))
|
||||
num_train_pids += 1
|
||||
num_train_imgs += len(img_paths)
|
||||
return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs
|
||||
|
||||
print('Creating classic splits (# = 20) ...')
|
||||
splits_classic_det, splits_classic_lab = [], []
|
||||
for split_ref in mat['testsets'][0]:
|
||||
test_split = _deref(split_ref).tolist()
|
||||
|
||||
# create split for detected images
|
||||
train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
|
||||
_extract_classic_split(meta_detected, test_split)
|
||||
splits_classic_det.append({
|
||||
'train': train, 'query': test, 'gallery': test,
|
||||
'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,
|
||||
'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,
|
||||
'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,
|
||||
})
|
||||
|
||||
# create split for labeled images
|
||||
train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
|
||||
_extract_classic_split(meta_labeled, test_split)
|
||||
splits_classic_lab.append({
|
||||
'train': train, 'query': test, 'gallery': test,
|
||||
'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,
|
||||
'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,
|
||||
'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,
|
||||
})
|
||||
|
||||
write_json(splits_classic_det, self.split_classic_det_json_path)
|
||||
write_json(splits_classic_lab, self.split_classic_lab_json_path)
|
||||
|
||||
def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):
|
||||
tmp_set = []
|
||||
unique_pids = set()
|
||||
for idx in idxs:
|
||||
img_name = filelist[idx][0]
|
||||
camid = int(img_name.split('_')[2]) - 1 # make it 0-based
|
||||
pid = pids[idx]
|
||||
if relabel:
|
||||
pid = pid2label[pid]
|
||||
img_path = osp.join(img_dir, img_name)
|
||||
tmp_set.append((img_path, int(pid), camid))
|
||||
unique_pids.add(pid)
|
||||
return tmp_set, len(unique_pids), len(idxs)
|
||||
|
||||
def _extract_new_split(split_dict, img_dir):
|
||||
train_idxs = split_dict['train_idx'].flatten() - 1 # index-0
|
||||
pids = split_dict['labels'].flatten()
|
||||
train_pids = set(pids[train_idxs])
|
||||
pid2label = {pid: label for label, pid in enumerate(train_pids)}
|
||||
query_idxs = split_dict['query_idx'].flatten() - 1
|
||||
gallery_idxs = split_dict['gallery_idx'].flatten() - 1
|
||||
filelist = split_dict['filelist'].flatten()
|
||||
train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)
|
||||
query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)
|
||||
gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)
|
||||
return train_info, query_info, gallery_info
|
||||
|
||||
print('Creating new split for detected images (767/700) ...')
|
||||
train_info, query_info, gallery_info = _extract_new_split(
|
||||
loadmat(self.split_new_det_mat_path),
|
||||
self.imgs_detected_dir,
|
||||
)
|
||||
split = [{
|
||||
'train': train_info[0],
|
||||
'query': query_info[0],
|
||||
'gallery': gallery_info[0],
|
||||
'num_train_pids': train_info[1],
|
||||
'num_train_imgs': train_info[2],
|
||||
'num_query_pids': query_info[1],
|
||||
'num_query_imgs': query_info[2],
|
||||
'num_gallery_pids': gallery_info[1],
|
||||
'num_gallery_imgs': gallery_info[2],
|
||||
}]
|
||||
write_json(split, self.split_new_det_json_path)
|
||||
|
||||
print('Creating new split for labeled images (767/700) ...')
|
||||
train_info, query_info, gallery_info = _extract_new_split(
|
||||
loadmat(self.split_new_lab_mat_path),
|
||||
self.imgs_labeled_dir,
|
||||
)
|
||||
split = [{
|
||||
'train': train_info[0],
|
||||
'query': query_info[0],
|
||||
'gallery': gallery_info[0],
|
||||
'num_train_pids': train_info[1],
|
||||
'num_train_imgs': train_info[2],
|
||||
'num_query_pids': query_info[1],
|
||||
'num_query_imgs': query_info[2],
|
||||
'num_gallery_pids': gallery_info[1],
|
||||
'num_gallery_imgs': gallery_info[2],
|
||||
}]
|
||||
write_json(split, self.split_new_lab_json_path)
|
|
@ -1,99 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class DukeMTMCreID(BaseImageDataset):
|
||||
"""DukeMTMC-reID
|
||||
|
||||
Reference:
|
||||
1. Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016.
|
||||
2. Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017.
|
||||
|
||||
URL: https://github.com/layumi/DukeMTMC-reID_evaluation
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 1404 (train + query)
|
||||
# images:16522 (train) + 2228 (query) + 17661 (gallery)
|
||||
# cameras: 8
|
||||
"""
|
||||
dataset_dir = 'dukemtmc-reid'
|
||||
|
||||
def __init__(self, root='data', verbose=True, **kwargs):
|
||||
super(DukeMTMCreID, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-reID.zip'
|
||||
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_train')
|
||||
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/query')
|
||||
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_test')
|
||||
|
||||
self.download_data()
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.train_dir,
|
||||
self.query_dir,
|
||||
self.gallery_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
train = self.process_dir(self.train_dir, relabel=True)
|
||||
query = self.process_dir(self.query_dir, relabel=False)
|
||||
gallery = self.process_dir(self.gallery_dir, relabel=False)
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def download_data(self):
|
||||
if osp.exists(self.dataset_dir):
|
||||
return
|
||||
|
||||
print('Creating directory {}'.format(self.dataset_dir))
|
||||
mkdir_if_missing(self.dataset_dir)
|
||||
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
|
||||
|
||||
print('Downloading DukeMTMC-reID dataset')
|
||||
urllib.urlretrieve(self.dataset_url, fpath)
|
||||
|
||||
print('Extracting files')
|
||||
zip_ref = zipfile.ZipFile(fpath, 'r')
|
||||
zip_ref.extractall(self.dataset_dir)
|
||||
zip_ref.close()
|
||||
|
||||
def process_dir(self, dir_path, relabel=False):
|
||||
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
|
||||
pattern = re.compile(r'([-\d]+)_c(\d)')
|
||||
|
||||
pid_container = set()
|
||||
for img_path in img_paths:
|
||||
pid, _ = map(int, pattern.search(img_path).groups())
|
||||
pid_container.add(pid)
|
||||
pid2label = {pid:label for label, pid in enumerate(pid_container)}
|
||||
|
||||
dataset = []
|
||||
for img_path in img_paths:
|
||||
pid, camid = map(int, pattern.search(img_path).groups())
|
||||
assert 1 <= camid <= 8
|
||||
camid -= 1 # index starts from 0
|
||||
if relabel: pid = pid2label[pid]
|
||||
dataset.append((img_path, pid, camid))
|
||||
|
||||
return dataset
|
|
@ -1,137 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseVideoDataset
|
||||
|
||||
|
||||
class DukeMTMCVidReID(BaseVideoDataset):
|
||||
"""DukeMTMCVidReID
|
||||
|
||||
Reference:
|
||||
Wu et al. Exploit the Unknown Gradually: One-Shot Video-Based Person
|
||||
Re-Identification by Stepwise Learning. CVPR 2018.
|
||||
|
||||
URL: https://github.com/Yu-Wu/DukeMTMC-VideoReID
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 702 (train) + 702 (test)
|
||||
# tracklets: 2196 (train) + 2636 (test)
|
||||
"""
|
||||
dataset_dir = 'dukemtmc-vidreid'
|
||||
|
||||
def __init__(self, root='data', min_seq_len=0, verbose=True, **kwargs):
|
||||
super(DukeMTMCVidReID, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-VideoReID.zip'
|
||||
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/train')
|
||||
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/query')
|
||||
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/gallery')
|
||||
self.split_train_json_path = osp.join(self.dataset_dir, 'split_train.json')
|
||||
self.split_query_json_path = osp.join(self.dataset_dir, 'split_query.json')
|
||||
self.split_gallery_json_path = osp.join(self.dataset_dir, 'split_gallery.json')
|
||||
self.min_seq_len = min_seq_len
|
||||
|
||||
self.download_data()
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.train_dir,
|
||||
self.query_dir,
|
||||
self.gallery_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
train = self.process_dir(self.train_dir, self.split_train_json_path, relabel=True)
|
||||
query = self.process_dir(self.query_dir, self.split_query_json_path, relabel=False)
|
||||
gallery = self.process_dir(self.gallery_dir, self.split_gallery_json_path, relabel=False)
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def download_data(self):
|
||||
if osp.exists(self.dataset_dir):
|
||||
return
|
||||
|
||||
print('Creating directory {}'.format(self.dataset_dir))
|
||||
mkdir_if_missing(self.dataset_dir)
|
||||
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
|
||||
|
||||
print('Downloading DukeMTMC-VideoReID dataset')
|
||||
urllib.urlretrieve(self.dataset_url, fpath)
|
||||
|
||||
print('Extracting files')
|
||||
zip_ref = zipfile.ZipFile(fpath, 'r')
|
||||
zip_ref.extractall(self.dataset_dir)
|
||||
zip_ref.close()
|
||||
|
||||
def process_dir(self, dir_path, json_path, relabel):
|
||||
if osp.exists(json_path):
|
||||
split = read_json(json_path)
|
||||
return split['tracklets']
|
||||
|
||||
print('=> Generating split json file (** this might take a while **)')
|
||||
pdirs = glob.glob(osp.join(dir_path, '*')) # avoid .DS_Store
|
||||
print('Processing "{}" with {} person identities'.format(dir_path, len(pdirs)))
|
||||
|
||||
pid_container = set()
|
||||
for pdir in pdirs:
|
||||
pid = int(osp.basename(pdir))
|
||||
pid_container.add(pid)
|
||||
pid2label = {pid:label for label, pid in enumerate(pid_container)}
|
||||
|
||||
tracklets = []
|
||||
for pdir in pdirs:
|
||||
pid = int(osp.basename(pdir))
|
||||
if relabel:
|
||||
pid = pid2label[pid]
|
||||
tdirs = glob.glob(osp.join(pdir, '*'))
|
||||
for tdir in tdirs:
|
||||
raw_img_paths = glob.glob(osp.join(tdir, '*.jpg'))
|
||||
num_imgs = len(raw_img_paths)
|
||||
|
||||
if num_imgs < self.min_seq_len:
|
||||
continue
|
||||
|
||||
img_paths = []
|
||||
for img_idx in range(num_imgs):
|
||||
# some tracklet starts from 0002 instead of 0001
|
||||
img_idx_name = 'F' + str(img_idx+1).zfill(4)
|
||||
res = glob.glob(osp.join(tdir, '*' + img_idx_name + '*.jpg'))
|
||||
if len(res) == 0:
|
||||
print('Warn: index name {} in {} is missing, jump to next'.format(img_idx_name, tdir))
|
||||
continue
|
||||
img_paths.append(res[0])
|
||||
img_name = osp.basename(img_paths[0])
|
||||
if img_name.find('_') == -1:
|
||||
# old naming format: 0001C6F0099X30823.jpg
|
||||
camid = int(img_name[5]) - 1
|
||||
else:
|
||||
# new naming format: 0001_C6_F0099_X30823.jpg
|
||||
camid = int(img_name[6]) - 1
|
||||
img_paths = tuple(img_paths)
|
||||
tracklets.append((img_paths, pid, camid))
|
||||
|
||||
print('Saving split to {}'.format(json_path))
|
||||
split_dict = {
|
||||
'tracklets': tracklets,
|
||||
}
|
||||
write_json(split_dict, json_path)
|
||||
|
||||
return tracklets
|
|
@ -1,139 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class GRID(BaseImageDataset):
|
||||
"""GRID
|
||||
|
||||
Reference:
|
||||
Loy et al. Multi-camera activity correlation analysis. CVPR 2009.
|
||||
|
||||
URL: http://personal.ie.cuhk.edu.hk/~ccloy/downloads_qmul_underground_reid.html
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 250
|
||||
# images: 1275
|
||||
# cameras: 8
|
||||
"""
|
||||
dataset_dir = 'grid'
|
||||
|
||||
def __init__(self, root='data', split_id=0, verbose=True, **kwargs):
|
||||
super(GRID, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.dataset_url = 'http://personal.ie.cuhk.edu.hk/~ccloy/files/datasets/underground_reid.zip'
|
||||
self.probe_path = osp.join(self.dataset_dir, 'underground_reid', 'probe')
|
||||
self.gallery_path = osp.join(self.dataset_dir, 'underground_reid', 'gallery')
|
||||
self.split_mat_path = osp.join(self.dataset_dir, 'underground_reid', 'features_and_partitions.mat')
|
||||
self.split_path = osp.join(self.dataset_dir, 'splits.json')
|
||||
|
||||
self.download_data()
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.probe_path,
|
||||
self.gallery_path,
|
||||
self.split_mat_path
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
self.prepare_split()
|
||||
splits = read_json(self.split_path)
|
||||
if split_id >= len(splits):
|
||||
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
|
||||
split = splits[split_id]
|
||||
|
||||
train = split['train']
|
||||
query = split['query']
|
||||
gallery = split['gallery']
|
||||
|
||||
train = [tuple(item) for item in train]
|
||||
query = [tuple(item) for item in query]
|
||||
gallery = [tuple(item) for item in gallery]
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def download_data(self):
|
||||
if osp.exists(self.dataset_dir):
|
||||
return
|
||||
|
||||
print('Creating directory {}'.format(self.dataset_dir))
|
||||
mkdir_if_missing(self.dataset_dir)
|
||||
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
|
||||
|
||||
print('Downloading GRID dataset')
|
||||
urllib.urlretrieve(self.dataset_url, fpath)
|
||||
|
||||
print('Extracting files')
|
||||
zip_ref = zipfile.ZipFile(fpath, 'r')
|
||||
zip_ref.extractall(self.dataset_dir)
|
||||
zip_ref.close()
|
||||
|
||||
def prepare_split(self):
|
||||
if not osp.exists(self.split_path):
|
||||
print('Creating 10 random splits')
|
||||
split_mat = loadmat(self.split_mat_path)
|
||||
trainIdxAll = split_mat['trainIdxAll'][0] # length = 10
|
||||
probe_img_paths = sorted(glob.glob(osp.join(self.probe_path, '*.jpeg')))
|
||||
gallery_img_paths = sorted(glob.glob(osp.join(self.gallery_path, '*.jpeg')))
|
||||
|
||||
splits = []
|
||||
for split_idx in range(10):
|
||||
train_idxs = trainIdxAll[split_idx][0][0][2][0].tolist()
|
||||
assert len(train_idxs) == 125
|
||||
idx2label = {idx: label for label, idx in enumerate(train_idxs)}
|
||||
|
||||
train, query, gallery = [], [], []
|
||||
|
||||
# processing probe folder
|
||||
for img_path in probe_img_paths:
|
||||
img_name = osp.basename(img_path)
|
||||
img_idx = int(img_name.split('_')[0])
|
||||
camid = int(img_name.split('_')[1]) - 1 # index starts from 0
|
||||
if img_idx in train_idxs:
|
||||
train.append((img_path, idx2label[img_idx], camid))
|
||||
else:
|
||||
query.append((img_path, img_idx, camid))
|
||||
|
||||
# process gallery folder
|
||||
for img_path in gallery_img_paths:
|
||||
img_name = osp.basename(img_path)
|
||||
img_idx = int(img_name.split('_')[0])
|
||||
camid = int(img_name.split('_')[1]) - 1 # index starts from 0
|
||||
if img_idx in train_idxs:
|
||||
train.append((img_path, idx2label[img_idx], camid))
|
||||
else:
|
||||
gallery.append((img_path, img_idx, camid))
|
||||
|
||||
split = {
|
||||
'train': train,
|
||||
'query': query,
|
||||
'gallery': gallery,
|
||||
'num_train_pids': 125,
|
||||
'num_query_pids': 125,
|
||||
'num_gallery_pids': 900
|
||||
}
|
||||
splits.append(split)
|
||||
|
||||
print('Totally {} splits are created'.format(len(splits)))
|
||||
write_json(splits, self.split_path)
|
||||
print('Split file saved to {}'.format(self.split_path))
|
|
@ -1,162 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
from collections import defaultdict
|
||||
import copy
|
||||
import random
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class iLIDS(BaseImageDataset):
|
||||
"""QMUL-iLIDS
|
||||
|
||||
Reference:
|
||||
Zheng et al. Associating Groups of People. BMVC 2009.
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 119
|
||||
# images: 476
|
||||
# cameras: 8 (not explicitly provided)
|
||||
"""
|
||||
dataset_dir = 'ilids'
|
||||
|
||||
def __init__(self, root='data', split_id=0, verbose=True, **kwargs):
|
||||
super(iLIDS, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.dataset_url = 'http://www.eecs.qmul.ac.uk/~jason/data/i-LIDS_Pedestrian.tgz'
|
||||
self.data_dir = osp.join(self.dataset_dir, 'i-LIDS_Pedestrian/Persons')
|
||||
self.split_path = osp.join(self.dataset_dir, 'splits.json')
|
||||
|
||||
self.download_data()
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.data_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
self.prepare_split()
|
||||
splits = read_json(self.split_path)
|
||||
if split_id >= len(splits):
|
||||
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
|
||||
split = splits[split_id]
|
||||
|
||||
train, query, gallery = self.process_split(split)
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def download_data(self):
|
||||
if osp.exists(self.dataset_dir):
|
||||
return
|
||||
|
||||
mkdir_if_missing(self.dataset_dir)
|
||||
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
|
||||
|
||||
print('Downloading QMUL-iLIDS dataset')
|
||||
urllib.urlretrieve(self.dataset_url, fpath)
|
||||
|
||||
print('Extracting files')
|
||||
tar = tarfile.open(fpath)
|
||||
tar.extractall(path=self.dataset_dir)
|
||||
tar.close()
|
||||
|
||||
def prepare_split(self):
|
||||
if not osp.exists(self.split_path):
|
||||
print('Creating splits ...')
|
||||
|
||||
paths = glob.glob(osp.join(self.data_dir, '*.jpg'))
|
||||
img_names = [osp.basename(path) for path in paths]
|
||||
num_imgs = len(img_names)
|
||||
assert num_imgs == 476, 'There should be 476 images, but got {}, please check the data'.format(num_imgs)
|
||||
|
||||
# store image names
|
||||
# image naming format:
|
||||
# the first four digits denote the person ID
|
||||
# the last four digits denote the sequence index
|
||||
pid_dict = defaultdict(list)
|
||||
for img_name in img_names:
|
||||
pid = int(img_name[:4])
|
||||
pid_dict[pid].append(img_name)
|
||||
pids = list(pid_dict.keys())
|
||||
num_pids = len(pids)
|
||||
assert num_pids == 119, 'There should be 119 identities, but got {}, please check the data'.format(num_pids)
|
||||
|
||||
num_train_pids = int(num_pids * 0.5)
|
||||
num_test_pids = num_pids - num_train_pids # supposed to be 60
|
||||
|
||||
splits = []
|
||||
for _ in range(10):
|
||||
# randomly choose num_train_pids train IDs and num_test_pids test IDs
|
||||
pids_copy = copy.deepcopy(pids)
|
||||
random.shuffle(pids_copy)
|
||||
train_pids = pids_copy[:num_train_pids]
|
||||
test_pids = pids_copy[num_train_pids:]
|
||||
|
||||
train = []
|
||||
query = []
|
||||
gallery = []
|
||||
|
||||
# for train IDs, all images are used in the train set.
|
||||
for pid in train_pids:
|
||||
img_names = pid_dict[pid]
|
||||
train.extend(img_names)
|
||||
|
||||
# for each test ID, randomly choose two images, one for
|
||||
# query and the other one for gallery.
|
||||
for pid in test_pids:
|
||||
img_names = pid_dict[pid]
|
||||
samples = random.sample(img_names, 2)
|
||||
query.append(samples[0])
|
||||
gallery.append(samples[1])
|
||||
|
||||
split = {'train': train, 'query': query, 'gallery': gallery}
|
||||
splits.append(split)
|
||||
|
||||
print('Totally {} splits are created'.format(len(splits)))
|
||||
write_json(splits, self.split_path)
|
||||
print('Split file is saved to {}'.format(self.split_path))
|
||||
|
||||
def get_pid2label(self, img_names):
|
||||
pid_container = set()
|
||||
for img_name in img_names:
|
||||
pid = int(img_name[:4])
|
||||
pid_container.add(pid)
|
||||
pid2label = {pid: label for label, pid in enumerate(pid_container)}
|
||||
return pid2label
|
||||
|
||||
def parse_img_names(self, img_names, pid2label=None):
|
||||
output = []
|
||||
for img_name in img_names:
|
||||
pid = int(img_name[:4])
|
||||
if pid2label is not None:
|
||||
pid = pid2label[pid]
|
||||
camid = int(img_name[4:7]) - 1 # 0-based
|
||||
img_path = osp.join(self.data_dir, img_name)
|
||||
output.append((img_path, pid, camid))
|
||||
return output
|
||||
|
||||
def process_split(self, split):
|
||||
train, query, gallery = [], [], []
|
||||
train_pid2label = self.get_pid2label(split['train'])
|
||||
train = self.parse_img_names(split['train'], train_pid2label)
|
||||
query = self.parse_img_names(split['query'])
|
||||
gallery = self.parse_img_names(split['gallery'])
|
||||
return train, query, gallery
|
|
@ -1,151 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseVideoDataset
|
||||
|
||||
|
||||
class iLIDSVID(BaseVideoDataset):
|
||||
"""iLIDS-VID
|
||||
|
||||
Reference:
|
||||
Wang et al. Person Re-Identification by Video Ranking. ECCV 2014.
|
||||
|
||||
URL: http://www.eecs.qmul.ac.uk/~xiatian/downloads_qmul_iLIDS-VID_ReID_dataset.html
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 300
|
||||
# tracklets: 600
|
||||
# cameras: 2
|
||||
"""
|
||||
dataset_dir = 'ilids-vid'
|
||||
|
||||
def __init__(self, root='data', split_id=0, verbose=True, **kwargs):
|
||||
super(iLIDSVID, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.dataset_url = 'http://www.eecs.qmul.ac.uk/~xiatian/iLIDS-VID/iLIDS-VID.tar'
|
||||
self.data_dir = osp.join(self.dataset_dir, 'i-LIDS-VID')
|
||||
self.split_dir = osp.join(self.dataset_dir, 'train-test people splits')
|
||||
self.split_mat_path = osp.join(self.split_dir, 'train_test_splits_ilidsvid.mat')
|
||||
self.split_path = osp.join(self.dataset_dir, 'splits.json')
|
||||
self.cam_1_path = osp.join(self.dataset_dir, 'i-LIDS-VID/sequences/cam1')
|
||||
self.cam_2_path = osp.join(self.dataset_dir, 'i-LIDS-VID/sequences/cam2')
|
||||
|
||||
self.download_data()
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.data_dir,
|
||||
self.split_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
self.prepare_split()
|
||||
splits = read_json(self.split_path)
|
||||
if split_id >= len(splits):
|
||||
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
|
||||
split = splits[split_id]
|
||||
train_dirs, test_dirs = split['train'], split['test']
|
||||
|
||||
train = self.process_data(train_dirs, cam1=True, cam2=True)
|
||||
query = self.process_data(test_dirs, cam1=True, cam2=False)
|
||||
gallery = self.process_data(test_dirs, cam1=False, cam2=True)
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def download_data(self):
|
||||
if osp.exists(self.dataset_dir):
|
||||
return
|
||||
|
||||
mkdir_if_missing(self.dataset_dir)
|
||||
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
|
||||
|
||||
print('Downloading iLIDS-VID dataset')
|
||||
urllib.urlretrieve(self.dataset_url, fpath)
|
||||
|
||||
print('Extracting files')
|
||||
tar = tarfile.open(fpath)
|
||||
tar.extractall(path=self.dataset_dir)
|
||||
tar.close()
|
||||
|
||||
def prepare_split(self):
|
||||
if not osp.exists(self.split_path):
|
||||
print('Creating splits ...')
|
||||
mat_split_data = loadmat(self.split_mat_path)['ls_set']
|
||||
|
||||
num_splits = mat_split_data.shape[0]
|
||||
num_total_ids = mat_split_data.shape[1]
|
||||
assert num_splits == 10
|
||||
assert num_total_ids == 300
|
||||
num_ids_each = num_total_ids // 2
|
||||
|
||||
# pids in mat_split_data are indices, so we need to transform them
|
||||
# to real pids
|
||||
person_cam1_dirs = sorted(glob.glob(osp.join(self.cam_1_path, '*')))
|
||||
person_cam2_dirs = sorted(glob.glob(osp.join(self.cam_2_path, '*')))
|
||||
|
||||
person_cam1_dirs = [osp.basename(item) for item in person_cam1_dirs]
|
||||
person_cam2_dirs = [osp.basename(item) for item in person_cam2_dirs]
|
||||
|
||||
# make sure persons in one camera view can be found in the other camera view
|
||||
assert set(person_cam1_dirs) == set(person_cam2_dirs)
|
||||
|
||||
splits = []
|
||||
for i_split in range(num_splits):
|
||||
# first 50% for testing and the remaining for training, following Wang et al. ECCV'14.
|
||||
train_idxs = sorted(list(mat_split_data[i_split, num_ids_each:]))
|
||||
test_idxs = sorted(list(mat_split_data[i_split, :num_ids_each]))
|
||||
|
||||
train_idxs = [int(i)-1 for i in train_idxs]
|
||||
test_idxs = [int(i)-1 for i in test_idxs]
|
||||
|
||||
# transform pids to person dir names
|
||||
train_dirs = [person_cam1_dirs[i] for i in train_idxs]
|
||||
test_dirs = [person_cam1_dirs[i] for i in test_idxs]
|
||||
|
||||
split = {'train': train_dirs, 'test': test_dirs}
|
||||
splits.append(split)
|
||||
|
||||
print('Totally {} splits are created, following Wang et al. ECCV\'14'.format(len(splits)))
|
||||
print('Split file is saved to {}'.format(self.split_path))
|
||||
write_json(splits, self.split_path)
|
||||
|
||||
def process_data(self, dirnames, cam1=True, cam2=True):
|
||||
tracklets = []
|
||||
dirname2pid = {dirname:i for i, dirname in enumerate(dirnames)}
|
||||
|
||||
for dirname in dirnames:
|
||||
if cam1:
|
||||
person_dir = osp.join(self.cam_1_path, dirname)
|
||||
img_names = glob.glob(osp.join(person_dir, '*.png'))
|
||||
assert len(img_names) > 0
|
||||
img_names = tuple(img_names)
|
||||
pid = dirname2pid[dirname]
|
||||
tracklets.append((img_names, pid, 0))
|
||||
|
||||
if cam2:
|
||||
person_dir = osp.join(self.cam_2_path, dirname)
|
||||
img_names = glob.glob(osp.join(person_dir, '*.png'))
|
||||
assert len(img_names) > 0
|
||||
img_names = tuple(img_names)
|
||||
pid = dirname2pid[dirname]
|
||||
tracklets.append((img_names, pid, 1))
|
||||
|
||||
return tracklets
|
|
@ -1,89 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class Market1501(BaseImageDataset):
|
||||
"""Market1501
|
||||
|
||||
Reference:
|
||||
Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.
|
||||
|
||||
URL: http://www.liangzheng.org/Project/project_reid.html
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 1501 (+1 for background)
|
||||
# images: 12936 (train) + 3368 (query) + 15913 (gallery)
|
||||
"""
|
||||
dataset_dir = 'market1501'
|
||||
|
||||
def __init__(self, root='data', verbose=True, market1501_500k=False, **kwargs):
|
||||
super(Market1501, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
|
||||
self.query_dir = osp.join(self.dataset_dir, 'query')
|
||||
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
|
||||
self.extra_gallery_dir = osp.join(self.dataset_dir, 'images')
|
||||
self.market1501_500k = market1501_500k
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.train_dir,
|
||||
self.query_dir,
|
||||
self.gallery_dir
|
||||
]
|
||||
if self.market1501_500k:
|
||||
required_files.append(self.extra_gallery_dir)
|
||||
self.check_before_run(required_files)
|
||||
|
||||
train = self.process_dir(self.train_dir, relabel=True)
|
||||
query = self.process_dir(self.query_dir, relabel=False)
|
||||
gallery = self.process_dir(self.gallery_dir, relabel=False)
|
||||
if self.market1501_500k:
|
||||
gallery += self.process_dir(self.extra_gallery_dir, relabel=False)
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def process_dir(self, dir_path, relabel=False):
|
||||
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
|
||||
pattern = re.compile(r'([-\d]+)_c(\d)')
|
||||
|
||||
pid_container = set()
|
||||
for img_path in img_paths:
|
||||
pid, _ = map(int, pattern.search(img_path).groups())
|
||||
if pid == -1:
|
||||
continue # junk images are just ignored
|
||||
pid_container.add(pid)
|
||||
pid2label = {pid:label for label, pid in enumerate(pid_container)}
|
||||
|
||||
dataset = []
|
||||
for img_path in img_paths:
|
||||
pid, camid = map(int, pattern.search(img_path).groups())
|
||||
if pid == -1:
|
||||
continue # junk images are just ignored
|
||||
assert 0 <= pid <= 1501 # pid == 0 means background
|
||||
assert 1 <= camid <= 6
|
||||
camid -= 1 # index starts from 0
|
||||
if relabel:
|
||||
pid = pid2label[pid]
|
||||
dataset.append((img_path, pid, camid))
|
||||
|
||||
return dataset
|
|
@ -1,115 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from .bases import BaseVideoDataset
|
||||
|
||||
|
||||
class Mars(BaseVideoDataset):
|
||||
"""MARS
|
||||
|
||||
Reference:
|
||||
Zheng et al. MARS: A Video Benchmark for Large-Scale Person Re-identification. ECCV 2016.
|
||||
|
||||
URL: http://www.liangzheng.com.cn/Project/project_mars.html
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 1261
|
||||
# tracklets: 8298 (train) + 1980 (query) + 9330 (gallery)
|
||||
# cameras: 6
|
||||
"""
|
||||
dataset_dir = 'mars'
|
||||
|
||||
def __init__(self, root='data', min_seq_len=0, verbose=True, **kwargs):
|
||||
super(Mars, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.train_name_path = osp.join(self.dataset_dir, 'info/train_name.txt')
|
||||
self.test_name_path = osp.join(self.dataset_dir, 'info/test_name.txt')
|
||||
self.track_train_info_path = osp.join(self.dataset_dir, 'info/tracks_train_info.mat')
|
||||
self.track_test_info_path = osp.join(self.dataset_dir, 'info/tracks_test_info.mat')
|
||||
self.query_IDX_path = osp.join(self.dataset_dir, 'info/query_IDX.mat')
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.train_name_path,
|
||||
self.test_name_path,
|
||||
self.track_train_info_path,
|
||||
self.track_test_info_path,
|
||||
self.query_IDX_path
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
train_names = self.get_names(self.train_name_path)
|
||||
test_names = self.get_names(self.test_name_path)
|
||||
track_train = loadmat(self.track_train_info_path)['track_train_info'] # numpy.ndarray (8298, 4)
|
||||
track_test = loadmat(self.track_test_info_path)['track_test_info'] # numpy.ndarray (12180, 4)
|
||||
query_IDX = loadmat(self.query_IDX_path)['query_IDX'].squeeze() # numpy.ndarray (1980,)
|
||||
query_IDX -= 1 # index from 0
|
||||
track_query = track_test[query_IDX,:]
|
||||
gallery_IDX = [i for i in range(track_test.shape[0]) if i not in query_IDX]
|
||||
track_gallery = track_test[gallery_IDX,:]
|
||||
|
||||
train = self.process_data(train_names, track_train, home_dir='bbox_train', relabel=True, min_seq_len=min_seq_len)
|
||||
query = self.process_data(test_names, track_query, home_dir='bbox_test', relabel=False, min_seq_len=min_seq_len)
|
||||
gallery = self.process_data(test_names, track_gallery, home_dir='bbox_test', relabel=False, min_seq_len=min_seq_len)
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def get_names(self, fpath):
|
||||
names = []
|
||||
with open(fpath, 'r') as f:
|
||||
for line in f:
|
||||
new_line = line.rstrip()
|
||||
names.append(new_line)
|
||||
return names
|
||||
|
||||
def process_data(self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0):
|
||||
assert home_dir in ['bbox_train', 'bbox_test']
|
||||
num_tracklets = meta_data.shape[0]
|
||||
pid_list = list(set(meta_data[:,2].tolist()))
|
||||
num_pids = len(pid_list)
|
||||
|
||||
if relabel: pid2label = {pid:label for label, pid in enumerate(pid_list)}
|
||||
tracklets = []
|
||||
|
||||
for tracklet_idx in range(num_tracklets):
|
||||
data = meta_data[tracklet_idx,...]
|
||||
start_index, end_index, pid, camid = data
|
||||
if pid == -1:
|
||||
continue # junk images are just ignored
|
||||
assert 1 <= camid <= 6
|
||||
if relabel: pid = pid2label[pid]
|
||||
camid -= 1 # index starts from 0
|
||||
img_names = names[start_index - 1:end_index]
|
||||
|
||||
# make sure image names correspond to the same person
|
||||
pnames = [img_name[:4] for img_name in img_names]
|
||||
assert len(set(pnames)) == 1, 'Error: a single tracklet contains different person images'
|
||||
|
||||
# make sure all images are captured under the same camera
|
||||
camnames = [img_name[5] for img_name in img_names]
|
||||
assert len(set(camnames)) == 1, 'Error: images are captured under different cameras!'
|
||||
|
||||
# append image names with directory information
|
||||
img_paths = [osp.join(self.dataset_dir, home_dir, img_name[:4], img_name) for img_name in img_names]
|
||||
if len(img_paths) >= min_seq_len:
|
||||
img_paths = tuple(img_paths)
|
||||
tracklets.append((img_paths, pid, camid))
|
||||
|
||||
return tracklets
|
|
@ -1,108 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
# To adapt to different versions
|
||||
# Log:
|
||||
# 22.01.2019: v1 and v2 only differ in dir names
|
||||
TRAIN_DIR_KEY = 'train_dir'
|
||||
TEST_DIR_KEY = 'test_dir'
|
||||
VERSION_DICT = {
|
||||
'MSMT17_V1': {
|
||||
TRAIN_DIR_KEY: 'train',
|
||||
TEST_DIR_KEY: 'test',
|
||||
},
|
||||
'MSMT17_V2': {
|
||||
TRAIN_DIR_KEY: 'mask_train_v2',
|
||||
TEST_DIR_KEY: 'mask_test_v2',
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class MSMT17(BaseImageDataset):
|
||||
"""MSMT17
|
||||
|
||||
Reference:
|
||||
Wei et al. Person Transfer GAN to Bridge Domain Gap for Person Re-Identification. CVPR 2018.
|
||||
|
||||
URL: http://www.pkuvmc.com/publications/msmt17.html
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 4101
|
||||
# images: 32621 (train) + 11659 (query) + 82161 (gallery)
|
||||
# cameras: 15
|
||||
"""
|
||||
dataset_dir = 'msmt17'
|
||||
|
||||
def __init__(self, root='data', verbose=True, **kwargs):
|
||||
super(MSMT17, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
has_main_dir = False
|
||||
for main_dir in VERSION_DICT:
|
||||
if osp.exists(osp.join(self.dataset_dir, main_dir)):
|
||||
train_dir = VERSION_DICT[main_dir][TRAIN_DIR_KEY]
|
||||
test_dir = VERSION_DICT[main_dir][TEST_DIR_KEY]
|
||||
has_main_dir = True
|
||||
break
|
||||
assert has_main_dir, 'Dataset folder not found'
|
||||
self.train_dir = osp.join(self.dataset_dir, main_dir, train_dir)
|
||||
self.test_dir = osp.join(self.dataset_dir, main_dir, test_dir)
|
||||
self.list_train_path = osp.join(self.dataset_dir, main_dir, 'list_train.txt')
|
||||
self.list_val_path = osp.join(self.dataset_dir, main_dir, 'list_val.txt')
|
||||
self.list_query_path = osp.join(self.dataset_dir, main_dir, 'list_query.txt')
|
||||
self.list_gallery_path = osp.join(self.dataset_dir, main_dir, 'list_gallery.txt')
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.train_dir,
|
||||
self.test_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
train = self.process_dir(self.train_dir, self.list_train_path)
|
||||
val = self.process_dir(self.train_dir, self.list_val_path)
|
||||
query = self.process_dir(self.test_dir, self.list_query_path)
|
||||
gallery = self.process_dir(self.test_dir, self.list_gallery_path)
|
||||
|
||||
# Note: to fairly compare with published methods on the conventional ReID setting,
|
||||
# do not add val images to the training set.
|
||||
if 'combineall' in kwargs and kwargs['combineall']:
|
||||
train += val
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
def process_dir(self, dir_path, list_path):
|
||||
with open(list_path, 'r') as txt:
|
||||
lines = txt.readlines()
|
||||
dataset = []
|
||||
pid_container = set()
|
||||
for img_idx, img_info in enumerate(lines):
|
||||
img_path, pid = img_info.split(' ')
|
||||
pid = int(pid) # no need to relabel
|
||||
camid = int(img_path.split('_')[2]) - 1 # index starts from 0
|
||||
img_path = osp.join(dir_path, img_path)
|
||||
dataset.append((img_path, pid, camid))
|
||||
pid_container.add(pid)
|
||||
num_pids = len(pid_container)
|
||||
for idx, pid in enumerate(pid_container):
|
||||
if idx != pid:
|
||||
raise RuntimeError('pid does not start from 0 and increment by 1')
|
||||
return dataset
|
|
@ -1,117 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
from collections import defaultdict
|
||||
import copy
|
||||
import random
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class PRID(BaseImageDataset):
|
||||
"""PRID (single-shot version of prid-2011)
|
||||
|
||||
Reference:
|
||||
Hirzer et al. Person Re-Identification by Descriptive and Discriminative Classification. SCIA 2011.
|
||||
|
||||
URL: https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/PRID11/
|
||||
|
||||
Dataset statistics:
|
||||
- Two views
|
||||
- View A captures 385 identities
|
||||
- View B captures 749 identities
|
||||
- 200 identities appear in both views
|
||||
"""
|
||||
dataset_dir = 'prid2011'
|
||||
|
||||
def __init__(self, root='data', split_id=0, verbose=True, **kwargs):
|
||||
super(PRID, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.cam_a_dir = osp.join(self.dataset_dir, 'prid_2011', 'single_shot', 'cam_a')
|
||||
self.cam_b_dir = osp.join(self.dataset_dir, 'prid_2011', 'single_shot', 'cam_b')
|
||||
self.split_path = osp.join(self.dataset_dir, 'splits_single_shot.json')
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.cam_a_dir,
|
||||
self.cam_b_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
self.prepare_split()
|
||||
splits = read_json(self.split_path)
|
||||
if split_id >= len(splits):
|
||||
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
|
||||
split = splits[split_id]
|
||||
|
||||
train, query, gallery = self.process_split(split)
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def prepare_split(self):
|
||||
if not osp.exists(self.split_path):
|
||||
print('Creating splits ...')
|
||||
|
||||
splits = []
|
||||
for _ in range(10):
|
||||
# randomly sample 100 IDs for train and use the rest 100 IDs for test
|
||||
# (note: there are only 200 IDs appearing in both views)
|
||||
pids = [i for i in range(1, 201)]
|
||||
train_pids = random.sample(pids, 100)
|
||||
train_pids.sort()
|
||||
test_pids = [i for i in pids if i not in train_pids]
|
||||
split = {'train': train_pids, 'test': test_pids}
|
||||
splits.append(split)
|
||||
|
||||
print('Totally {} splits are created'.format(len(splits)))
|
||||
write_json(splits, self.split_path)
|
||||
print('Split file is saved to {}'.format(self.split_path))
|
||||
|
||||
def process_split(self, split):
|
||||
train, query, gallery = [], [], []
|
||||
train_pids = split['train']
|
||||
test_pids = split['test']
|
||||
|
||||
train_pid2label = {pid: label for label, pid in enumerate(train_pids)}
|
||||
|
||||
# train
|
||||
train = []
|
||||
for pid in train_pids:
|
||||
img_name = 'person_' + str(pid).zfill(4) + '.png'
|
||||
pid = train_pid2label[pid]
|
||||
img_a_path = osp.join(self.cam_a_dir, img_name)
|
||||
train.append((img_a_path, pid, 0))
|
||||
img_b_path = osp.join(self.cam_b_dir, img_name)
|
||||
train.append((img_b_path, pid, 1))
|
||||
|
||||
# query and gallery
|
||||
query, gallery = [], []
|
||||
for pid in test_pids:
|
||||
img_name = 'person_' + str(pid).zfill(4) + '.png'
|
||||
img_a_path = osp.join(self.cam_a_dir, img_name)
|
||||
query.append((img_a_path, pid, 0))
|
||||
img_b_path = osp.join(self.cam_b_dir, img_name)
|
||||
gallery.append((img_b_path, pid, 1))
|
||||
for pid in range(201, 750):
|
||||
img_name = 'person_' + str(pid).zfill(4) + '.png'
|
||||
img_b_path = osp.join(self.cam_b_dir, img_name)
|
||||
gallery.append((img_b_path, pid, 1))
|
||||
|
||||
return train, query, gallery
|
|
@ -1,87 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseVideoDataset
|
||||
|
||||
|
||||
class PRID2011(BaseVideoDataset):
|
||||
"""PRID2011
|
||||
|
||||
Reference:
|
||||
Hirzer et al. Person Re-Identification by Descriptive and Discriminative Classification. SCIA 2011.
|
||||
|
||||
URL: https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/PRID11/
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 200
|
||||
# tracklets: 400
|
||||
# cameras: 2
|
||||
"""
|
||||
dataset_dir = 'prid2011'
|
||||
|
||||
def __init__(self, root='data', split_id=0, min_seq_len=0, verbose=True, **kwargs):
|
||||
super(PRID2011, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.split_path = osp.join(self.dataset_dir, 'splits_prid2011.json')
|
||||
self.cam_a_dir = osp.join(self.dataset_dir, 'prid_2011', 'multi_shot', 'cam_a')
|
||||
self.cam_b_dir = osp.join(self.dataset_dir, 'prid_2011', 'multi_shot', 'cam_b')
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.cam_a_dir,
|
||||
self.cam_b_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
splits = read_json(self.split_path)
|
||||
if split_id >= len(splits):
|
||||
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
|
||||
split = splits[split_id]
|
||||
train_dirs, test_dirs = split['train'], split['test']
|
||||
|
||||
train = self.process_dir(train_dirs, cam1=True, cam2=True)
|
||||
query = self.process_dir(test_dirs, cam1=True, cam2=False)
|
||||
gallery = self.process_dir(test_dirs, cam1=False, cam2=True)
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def process_dir(self, dirnames, cam1=True, cam2=True):
|
||||
tracklets = []
|
||||
dirname2pid = {dirname:i for i, dirname in enumerate(dirnames)}
|
||||
|
||||
for dirname in dirnames:
|
||||
if cam1:
|
||||
person_dir = osp.join(self.cam_a_dir, dirname)
|
||||
img_names = glob.glob(osp.join(person_dir, '*.png'))
|
||||
assert len(img_names) > 0
|
||||
img_names = tuple(img_names)
|
||||
pid = dirname2pid[dirname]
|
||||
tracklets.append((img_names, pid, 0))
|
||||
|
||||
if cam2:
|
||||
person_dir = osp.join(self.cam_b_dir, dirname)
|
||||
img_names = glob.glob(osp.join(person_dir, '*.png'))
|
||||
assert len(img_names) > 0
|
||||
img_names = tuple(img_names)
|
||||
pid = dirname2pid[dirname]
|
||||
tracklets.append((img_names, pid, 1))
|
||||
|
||||
return tracklets
|
|
@ -1,137 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class PRID450S(BaseImageDataset):
|
||||
"""PRID450S
|
||||
|
||||
Reference:
|
||||
Roth et al. Mahalanobis Distance Learning for Person Re-Identification. PR 2014.
|
||||
|
||||
URL: https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/prid450s/
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 450
|
||||
# images: 900
|
||||
# cameras: 2
|
||||
"""
|
||||
dataset_dir = 'prid450s'
|
||||
|
||||
def __init__(self, root='data', split_id=0, min_seq_len=0, verbose=True, **kwargs):
|
||||
super(PRID450S, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.dataset_url = 'https://files.icg.tugraz.at/f/8c709245bb/?raw=1'
|
||||
self.split_path = osp.join(self.dataset_dir, 'splits.json')
|
||||
self.cam_a_dir = osp.join(self.dataset_dir, 'cam_a')
|
||||
self.cam_b_dir = osp.join(self.dataset_dir, 'cam_b')
|
||||
|
||||
self.download_data()
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.cam_a_dir,
|
||||
self.cam_b_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
self.prepare_split()
|
||||
splits = read_json(self.split_path)
|
||||
if split_id >= len(splits):
|
||||
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
|
||||
split = splits[split_id]
|
||||
|
||||
train = split['train']
|
||||
query = split['query']
|
||||
gallery = split['gallery']
|
||||
|
||||
train = [tuple(item) for item in train]
|
||||
query = [tuple(item) for item in query]
|
||||
gallery = [tuple(item) for item in gallery]
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def download_data(self):
|
||||
if osp.exists(self.dataset_dir):
|
||||
return
|
||||
|
||||
print('Creating directory {}'.format(self.dataset_dir))
|
||||
mkdir_if_missing(self.dataset_dir)
|
||||
fpath = osp.join(self.dataset_dir, 'prid_450s.zip')
|
||||
|
||||
print('Downloading PRID450S dataset')
|
||||
urllib.urlretrieve(self.dataset_url, fpath)
|
||||
|
||||
print('Extracting files')
|
||||
zip_ref = zipfile.ZipFile(fpath, 'r')
|
||||
zip_ref.extractall(self.dataset_dir)
|
||||
zip_ref.close()
|
||||
|
||||
def prepare_split(self):
|
||||
if not osp.exists(self.split_path):
|
||||
cam_a_imgs = sorted(glob.glob(osp.join(self.cam_a_dir, 'img_*.png')))
|
||||
cam_b_imgs = sorted(glob.glob(osp.join(self.cam_b_dir, 'img_*.png')))
|
||||
assert len(cam_a_imgs) == len(cam_b_imgs)
|
||||
|
||||
num_pids = len(cam_a_imgs)
|
||||
num_train_pids = num_pids // 2
|
||||
|
||||
splits = []
|
||||
for _ in range(10):
|
||||
order = np.arange(num_pids)
|
||||
np.random.shuffle(order)
|
||||
train_idxs = np.sort(order[:num_train_pids])
|
||||
idx2label = {idx: label for label, idx in enumerate(train_idxs)}
|
||||
|
||||
train, test = [], []
|
||||
|
||||
# processing camera a
|
||||
for img_path in cam_a_imgs:
|
||||
img_name = osp.basename(img_path)
|
||||
img_idx = int(img_name.split('_')[1].split('.')[0])
|
||||
if img_idx in train_idxs:
|
||||
train.append((img_path, idx2label[img_idx], 0))
|
||||
else:
|
||||
test.append((img_path, img_idx, 0))
|
||||
|
||||
# processing camera b
|
||||
for img_path in cam_b_imgs:
|
||||
img_name = osp.basename(img_path)
|
||||
img_idx = int(img_name.split('_')[1].split('.')[0])
|
||||
if img_idx in train_idxs:
|
||||
train.append((img_path, idx2label[img_idx], 1))
|
||||
else:
|
||||
test.append((img_path, img_idx, 1))
|
||||
|
||||
split = {
|
||||
'train': train,
|
||||
'query': test,
|
||||
'gallery': test,
|
||||
'num_train_pids': num_train_pids,
|
||||
'num_query_pids': num_pids - num_train_pids,
|
||||
'num_gallery_pids': num_pids - num_train_pids
|
||||
}
|
||||
splits.append(split)
|
||||
|
||||
print('Totally {} splits are created'.format(len(splits)))
|
||||
write_json(splits, self.split_path)
|
||||
print('Split file saved to {}'.format(self.split_path))
|
|
@ -1,72 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
import copy
|
||||
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class SenseReID(BaseImageDataset):
|
||||
"""SenseReID
|
||||
|
||||
This dataset is used for test purpose only.
|
||||
|
||||
Reference:
|
||||
Zhao et al. Spindle Net: Person Re-identification with Human Body
|
||||
Region Guided Feature Decomposition and Fusion. CVPR 2017.
|
||||
|
||||
URL: https://drive.google.com/file/d/0B56OfSrVI8hubVJLTzkwV2VaOWM/view
|
||||
|
||||
Dataset statistics:
|
||||
- train: 0 ids, 0 images
|
||||
- query: 522 ids, 1040 images
|
||||
- gallery: 1717 ids, 3388 images
|
||||
"""
|
||||
dataset_dir = 'sensereid'
|
||||
|
||||
def __init__(self, root='data', verbose=True, **kwargs):
|
||||
super(SenseReID, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.query_dir = osp.join(self.dataset_dir, 'SenseReID', 'test_probe')
|
||||
self.gallery_dir = osp.join(self.dataset_dir, 'SenseReID', 'test_gallery')
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.query_dir,
|
||||
self.gallery_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
query = self.process_dir(self.query_dir)
|
||||
gallery = self.process_dir(self.gallery_dir)
|
||||
train = copy.deepcopy(query) # dummy variable
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def process_dir(self, dir_path):
|
||||
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
|
||||
dataset = []
|
||||
|
||||
for img_path in img_paths:
|
||||
img_name = osp.splitext(osp.basename(img_path))[0]
|
||||
pid, camid = img_name.split('_')
|
||||
pid, camid = int(pid), int(camid)
|
||||
dataset.append((img_path, pid, camid))
|
||||
|
||||
return dataset
|
|
@ -1,155 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import glob
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import tarfile
|
||||
import zipfile
|
||||
import os.path as osp
|
||||
from scipy.io import loadmat
|
||||
import numpy as np
|
||||
import h5py
|
||||
from scipy.misc import imsave
|
||||
|
||||
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
|
||||
from .bases import BaseImageDataset
|
||||
|
||||
|
||||
class VIPeR(BaseImageDataset):
|
||||
"""VIPeR
|
||||
|
||||
Reference:
|
||||
Gray et al. Evaluating appearance models for recognition, reacquisition, and tracking. PETS 2007.
|
||||
|
||||
URL: https://vision.soe.ucsc.edu/node/178
|
||||
|
||||
Dataset statistics:
|
||||
# identities: 632
|
||||
# images: 632 x 2 = 1264
|
||||
# cameras: 2
|
||||
"""
|
||||
dataset_dir = 'viper'
|
||||
|
||||
def __init__(self, root='data', split_id=0, verbose=True, **kwargs):
|
||||
super(VIPeR, self).__init__(root)
|
||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||
self.dataset_url = 'http://users.soe.ucsc.edu/~manduchi/VIPeR.v1.0.zip'
|
||||
self.cam_a_dir = osp.join(self.dataset_dir, 'VIPeR', 'cam_a')
|
||||
self.cam_b_dir = osp.join(self.dataset_dir, 'VIPeR', 'cam_b')
|
||||
self.split_path = osp.join(self.dataset_dir, 'splits.json')
|
||||
|
||||
self.download_data()
|
||||
|
||||
required_files = [
|
||||
self.dataset_dir,
|
||||
self.cam_a_dir,
|
||||
self.cam_b_dir
|
||||
]
|
||||
self.check_before_run(required_files)
|
||||
|
||||
self.prepare_split()
|
||||
splits = read_json(self.split_path)
|
||||
if split_id >= len(splits):
|
||||
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
|
||||
split = splits[split_id]
|
||||
|
||||
train = split['train']
|
||||
query = split['query'] # note: query and gallery share the same images
|
||||
gallery = split['gallery']
|
||||
|
||||
train = [tuple(item) for item in train]
|
||||
query = [tuple(item) for item in query]
|
||||
gallery = [tuple(item) for item in gallery]
|
||||
|
||||
self.init_attributes(train, query, gallery, **kwargs)
|
||||
|
||||
if verbose:
|
||||
self.print_dataset_statistics(self.train, self.query, self.gallery)
|
||||
|
||||
def download_data(self):
|
||||
if osp.exists(self.dataset_dir):
|
||||
return
|
||||
|
||||
print('Creating directory {}'.format(self.dataset_dir))
|
||||
mkdir_if_missing(self.dataset_dir)
|
||||
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
|
||||
|
||||
print('Downloading VIPeR dataset')
|
||||
urllib.urlretrieve(self.dataset_url, fpath)
|
||||
|
||||
print('Extracting files')
|
||||
zip_ref = zipfile.ZipFile(fpath, 'r')
|
||||
zip_ref.extractall(self.dataset_dir)
|
||||
zip_ref.close()
|
||||
|
||||
def prepare_split(self):
|
||||
if not osp.exists(self.split_path):
|
||||
print('Creating 10 random splits of train ids and test ids')
|
||||
|
||||
cam_a_imgs = sorted(glob.glob(osp.join(self.cam_a_dir, '*.bmp')))
|
||||
cam_b_imgs = sorted(glob.glob(osp.join(self.cam_b_dir, '*.bmp')))
|
||||
assert len(cam_a_imgs) == len(cam_b_imgs)
|
||||
num_pids = len(cam_a_imgs)
|
||||
print('Number of identities: {}'.format(num_pids))
|
||||
num_train_pids = num_pids // 2
|
||||
|
||||
"""
|
||||
In total, there will be 20 splits because each random split creates two
|
||||
sub-splits, one using cameraA as query and cameraB as gallery
|
||||
while the other using cameraB as query and cameraA as gallery.
|
||||
Therefore, results should be averaged over 20 splits (split_id=0~19).
|
||||
|
||||
In practice, a model trained on split_id=0 can be applied to split_id=0&1
|
||||
as split_id=0&1 share the same training data (so on and so forth).
|
||||
"""
|
||||
splits = []
|
||||
for _ in range(10):
|
||||
order = np.arange(num_pids)
|
||||
np.random.shuffle(order)
|
||||
train_idxs = order[:num_train_pids]
|
||||
test_idxs = order[num_train_pids:]
|
||||
assert not bool(set(train_idxs) & set(test_idxs)), 'Error: train and test overlap'
|
||||
|
||||
train = []
|
||||
for pid, idx in enumerate(train_idxs):
|
||||
cam_a_img = cam_a_imgs[idx]
|
||||
cam_b_img = cam_b_imgs[idx]
|
||||
train.append((cam_a_img, pid, 0))
|
||||
train.append((cam_b_img, pid, 1))
|
||||
|
||||
test_a = []
|
||||
test_b = []
|
||||
for pid, idx in enumerate(test_idxs):
|
||||
cam_a_img = cam_a_imgs[idx]
|
||||
cam_b_img = cam_b_imgs[idx]
|
||||
test_a.append((cam_a_img, pid, 0))
|
||||
test_b.append((cam_b_img, pid, 1))
|
||||
|
||||
# use cameraA as query and cameraB as gallery
|
||||
split = {
|
||||
'train': train,
|
||||
'query': test_a,
|
||||
'gallery': test_b,
|
||||
'num_train_pids': num_train_pids,
|
||||
'num_query_pids': num_pids - num_train_pids,
|
||||
'num_gallery_pids': num_pids - num_train_pids
|
||||
}
|
||||
splits.append(split)
|
||||
|
||||
# use cameraB as query and cameraA as gallery
|
||||
split = {
|
||||
'train': train,
|
||||
'query': test_b,
|
||||
'gallery': test_a,
|
||||
'num_train_pids': num_train_pids,
|
||||
'num_query_pids': num_pids - num_train_pids,
|
||||
'num_gallery_pids': num_pids - num_train_pids
|
||||
}
|
||||
splits.append(split)
|
||||
|
||||
print('Totally {} splits are created'.format(len(splits)))
|
||||
write_json(splits, self.split_path)
|
||||
print('Split file saved to {}'.format(self.split_path))
|
|
@ -0,0 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
from .image import ImageSoftmaxEngine
|
||||
from .image import ImageTripletEngine
|
||||
|
||||
from .video import VideoSoftmaxEngine
|
||||
from .video import VideoTripletEngine
|
|
@ -0,0 +1,198 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
import os
|
||||
import os.path as osp
|
||||
import time
|
||||
import datetime
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
import torchreid
|
||||
from torchreid.utils import AverageMeter, visualize_ranked_results, save_checkpoint
|
||||
from torchreid.losses import DeepSupervision
|
||||
from torchreid import metrics
|
||||
|
||||
|
||||
class Engine(object):
|
||||
|
||||
def __init__(self, dataset, model, optimizer, scheduler=None, use_cpu=False):
|
||||
self.dataset = dataset
|
||||
self.model = model
|
||||
self.optimizer = optimizer
|
||||
self.scheduler = scheduler
|
||||
self.use_gpu = (torch.cuda.is_available() and not use_cpu)
|
||||
|
||||
# check attributes
|
||||
if not isinstance(self.model, nn.Module):
|
||||
raise TypeError('model must be an instance of nn.Module')
|
||||
if not isinstance(self.optimizer, torch.optim.Optimizer):
|
||||
raise TypeError('optimizer must be an instance of torch.optim.Optimizer')
|
||||
|
||||
def run(self, max_epoch=0, start_epoch=0, fixbase_epoch=0, open_layers=None,
|
||||
start_eval=0, eval_freq=-1, save_dir='log', test_only=False, print_freq=10,
|
||||
dist_metric='euclidean', visrank=False, visrank_topk=20,
|
||||
use_metric_cuhk03=False, ranks=[1, 5, 10, 20]):
|
||||
trainloader, testloader = self.dataset.return_dataloaders()
|
||||
|
||||
if test_only:
|
||||
self.test(testloader, dist_metric, visrank, visrank_topk,
|
||||
save_dir, use_metric_cuhk03, ranks)
|
||||
return
|
||||
|
||||
time_start = time.time()
|
||||
print('=> Start training')
|
||||
|
||||
if fixbase_epoch>0 and (open_layers is not None):
|
||||
print('Pretrain open layers ({}) for {} epochs'.format(open_layers, fixbase_epoch))
|
||||
for epoch in range(fixbase_epoch):
|
||||
self.train(epoch, trainloader, fixbase=True, open_layers=open_layers,
|
||||
print_freq=print_freq)
|
||||
print('Done. From now on all layers are open to train for {} epochs'.format(max_epoch))
|
||||
|
||||
for epoch in range(start_epoch, max_epoch):
|
||||
self.train(epoch, trainloader, print_freq=print_freq)
|
||||
|
||||
if self.scheduler is not None:
|
||||
self.scheduler.step()
|
||||
|
||||
if (epoch+1)>start_eval and eval_freq>0 and (epoch+1)%eval_freq==0 and (epoch+1)!=max_epoch:
|
||||
rank1 = self.test(testloader, dist_metric, visrank, visrank_topk,
|
||||
save_dir, use_metric_cuhk03, ranks)
|
||||
self._save_checkpoint(epoch, rank1, save_dir)
|
||||
|
||||
print('=> Final test')
|
||||
rank1 = self.test(
|
||||
testloader, dist_metric, visrank, visrank_topk,
|
||||
save_dir, use_metric_cuhk03, ranks
|
||||
)
|
||||
self._save_checkpoint(epoch, rank1, save_dir)
|
||||
|
||||
elapsed = round(time.time() - time_start)
|
||||
elapsed = str(datetime.timedelta(seconds=elapsed))
|
||||
print('Elapsed {}'.format(elapsed))
|
||||
|
||||
def train(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def test(self, testloader, dist_metric='euclidean', visrank=False, visrank_topk=20,
|
||||
save_dir='', use_metric_cuhk03=False, ranks=[1, 5, 10, 20]):
|
||||
target_names = list(testloader.keys())
|
||||
|
||||
for name in target_names:
|
||||
print('Evaluate {}'.format(name))
|
||||
queryloader = testloader[name]['query']
|
||||
galleryloader = testloader[name]['gallery']
|
||||
rank1 = self._evaluate(
|
||||
name, queryloader, galleryloader, dist_metric, visrank,
|
||||
visrank_topk, save_dir, use_metric_cuhk03, ranks
|
||||
)
|
||||
|
||||
return rank1
|
||||
|
||||
@torch.no_grad()
|
||||
def _evaluate(self, dataset_name, queryloader, galleryloader, dist_metric,
|
||||
visrank, visrank_topk, save_dir, use_metric_cuhk03, ranks):
|
||||
batch_time = AverageMeter()
|
||||
|
||||
self.model.eval()
|
||||
|
||||
print('Extracting features from query set ...')
|
||||
qf, q_pids, q_camids = [], [], []
|
||||
for batch_idx, data in enumerate(queryloader):
|
||||
imgs, pids, camids = self._parse_data_for_eval(data)
|
||||
if self.use_gpu:
|
||||
imgs = imgs.cuda()
|
||||
end = time.time()
|
||||
features = self._extract_features(imgs)
|
||||
batch_time.update(time.time() - end)
|
||||
features = features.data.cpu()
|
||||
qf.append(features)
|
||||
q_pids.extend(pids)
|
||||
q_camids.extend(camids)
|
||||
qf = torch.cat(qf, 0)
|
||||
q_pids = np.asarray(q_pids)
|
||||
q_camids = np.asarray(q_camids)
|
||||
print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))
|
||||
|
||||
print('Extracting features from gallery set ...')
|
||||
gf, g_pids, g_camids = [], [], []
|
||||
end = time.time()
|
||||
for batch_idx, data in enumerate(galleryloader):
|
||||
imgs, pids, camids = self._parse_data_for_eval(data)
|
||||
if self.use_gpu:
|
||||
imgs = imgs.cuda()
|
||||
end = time.time()
|
||||
features = self._extract_features(imgs)
|
||||
batch_time.update(time.time() - end)
|
||||
features = features.data.cpu()
|
||||
gf.append(features)
|
||||
g_pids.extend(pids)
|
||||
g_camids.extend(camids)
|
||||
gf = torch.cat(gf, 0)
|
||||
g_pids = np.asarray(g_pids)
|
||||
g_camids = np.asarray(g_camids)
|
||||
print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))
|
||||
|
||||
print('Speed: {:.4f} sec/batch'.format(batch_time.avg))
|
||||
|
||||
distmat = metrics.compute_distance_matrix(qf, gf, dist_metric)
|
||||
|
||||
print('Computing CMC and mAP ...')
|
||||
cmc, mAP = metrics.evaluate_rank(
|
||||
distmat,
|
||||
q_pids,
|
||||
g_pids,
|
||||
q_camids,
|
||||
g_camids,
|
||||
use_metric_cuhk03=use_metric_cuhk03
|
||||
)
|
||||
|
||||
print('** Results')
|
||||
print('mAP: {:.1%}'.format(mAP))
|
||||
print('CMC curve')
|
||||
for r in ranks:
|
||||
print('Rank-{:<3}: {:.1%}'.format(r, cmc[r-1]))
|
||||
|
||||
if visrank:
|
||||
visualize_ranked_results(
|
||||
distmat,
|
||||
self.dataset.return_testdataset_by_name(dataset_name),
|
||||
save_dir=osp.join(save_dir, 'ranked_results', dataset_name),
|
||||
topk=visrank_topk
|
||||
)
|
||||
|
||||
return cmc[0]
|
||||
|
||||
def _compute_loss(self, criterion, outputs, targets):
|
||||
if isinstance(outputs, (tuple, list)):
|
||||
loss = DeepSupervision(criterion, outputs, targets)
|
||||
else:
|
||||
loss = criterion(outputs, targets)
|
||||
return loss
|
||||
|
||||
def _extract_features(self, input):
|
||||
self.model.eval()
|
||||
return self.model(input)
|
||||
|
||||
def _parse_data_for_train(self, data):
|
||||
imgs = data[0]
|
||||
pids = data[1]
|
||||
return imgs, pids
|
||||
|
||||
def _parse_data_for_eval(self, data):
|
||||
imgs = data[0]
|
||||
pids = data[1]
|
||||
camids = data[2]
|
||||
return imgs, pids, camids
|
||||
|
||||
def _save_checkpoint(self, epoch, rank1, save_dir, is_best=False):
|
||||
save_checkpoint({
|
||||
'state_dict': self.model.state_dict(),
|
||||
'epoch': epoch + 1,
|
||||
'rank1': rank1,
|
||||
'optimizer': self.optimizer.state_dict(),
|
||||
}, save_dir, is_best=is_best)
|
|
@ -0,0 +1,4 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from .softmax import ImageSoftmaxEngine
|
||||
from .triplet import ImageTripletEngine
|
|
@ -0,0 +1,74 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
import time
|
||||
import datetime
|
||||
|
||||
import torch
|
||||
|
||||
import torchreid
|
||||
from torchreid.engine import engine
|
||||
from torchreid.losses import CrossEntropyLoss
|
||||
from torchreid.utils import AverageMeter, open_specified_layers, open_all_layers
|
||||
from torchreid import metrics
|
||||
|
||||
|
||||
class ImageSoftmaxEngine(engine.Engine):
|
||||
|
||||
def __init__(self, dataset, model, optimizer, scheduler=None, use_cpu=False,
|
||||
label_smooth=True):
|
||||
super(ImageSoftmaxEngine, self).__init__(dataset, model, optimizer, scheduler, use_cpu)
|
||||
|
||||
self.criterion = CrossEntropyLoss(
|
||||
num_classes=self.dataset.num_train_pids,
|
||||
use_gpu=self.use_gpu,
|
||||
label_smooth=label_smooth
|
||||
)
|
||||
|
||||
def train(self, epoch, trainloader, fixbase=False, open_layers=None, print_freq=10):
|
||||
losses = AverageMeter()
|
||||
accs = AverageMeter()
|
||||
batch_time = AverageMeter()
|
||||
data_time = AverageMeter()
|
||||
|
||||
self.model.train()
|
||||
|
||||
if fixbase and (open_layers is not None):
|
||||
open_specified_layers(self.model, open_layers)
|
||||
else:
|
||||
open_all_layers(self.model)
|
||||
|
||||
end = time.time()
|
||||
for batch_idx, data in enumerate(trainloader):
|
||||
data_time.update(time.time() - end)
|
||||
|
||||
imgs, pids = self._parse_data_for_train(data)
|
||||
if self.use_gpu:
|
||||
imgs = imgs.cuda()
|
||||
pids = pids.cuda()
|
||||
|
||||
self.optimizer.zero_grad()
|
||||
outputs = self.model(imgs)
|
||||
loss = self._compute_loss(self.criterion, outputs, pids)
|
||||
loss.backward()
|
||||
self.optimizer.step()
|
||||
|
||||
batch_time.update(time.time() - end)
|
||||
|
||||
losses.update(loss.item(), pids.size(0))
|
||||
accs.update(metrics.accuracy(outputs, pids)[0].item())
|
||||
|
||||
if (batch_idx+1) % print_freq==0:
|
||||
print('Epoch: [{0}][{1}/{2}]\t'
|
||||
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
|
||||
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
|
||||
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
|
||||
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
|
||||
epoch + 1, batch_idx + 1, len(trainloader),
|
||||
batch_time=batch_time,
|
||||
data_time=data_time,
|
||||
loss=losses,
|
||||
acc=accs))
|
||||
|
||||
end = time.time()
|
|
@ -0,0 +1,85 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
import time
|
||||
import datetime
|
||||
|
||||
import torch
|
||||
|
||||
import torchreid
|
||||
from torchreid.engine import engine
|
||||
from torchreid.losses import CrossEntropyLoss, TripletLoss
|
||||
from torchreid.utils import AverageMeter, open_specified_layers, open_all_layers
|
||||
from torchreid import metrics
|
||||
|
||||
|
||||
class ImageTripletEngine(engine.Engine):
|
||||
|
||||
def __init__(self, dataset, model, optimizer, margin=0.3,
|
||||
weight_t=1, weight_x=1, scheduler=None, use_cpu=False,
|
||||
label_smooth=True):
|
||||
super(ImageTripletEngine, self).__init__(dataset, model, optimizer, scheduler, use_cpu)
|
||||
|
||||
self.weight_t = weight_t
|
||||
self.weight_x = weight_x
|
||||
|
||||
self.criterion_t = TripletLoss(margin=margin)
|
||||
self.criterion_x = CrossEntropyLoss(
|
||||
num_classes=self.dataset.num_train_pids,
|
||||
use_gpu=self.use_gpu,
|
||||
label_smooth=label_smooth
|
||||
)
|
||||
|
||||
def train(self, epoch, trainloader, fixbase=False, open_layers=None, print_freq=10):
|
||||
losses_t = AverageMeter()
|
||||
losses_x = AverageMeter()
|
||||
accs = AverageMeter()
|
||||
batch_time = AverageMeter()
|
||||
data_time = AverageMeter()
|
||||
|
||||
self.model.train()
|
||||
|
||||
if fixbase and (open_layers is not None):
|
||||
open_specified_layers(self.model, open_layers)
|
||||
else:
|
||||
open_all_layers(self.model)
|
||||
|
||||
end = time.time()
|
||||
for batch_idx, data in enumerate(trainloader):
|
||||
data_time.update(time.time() - end)
|
||||
|
||||
imgs, pids = self._parse_data_for_train(data)
|
||||
if self.use_gpu:
|
||||
imgs = imgs.cuda()
|
||||
pids = pids.cuda()
|
||||
|
||||
self.optimizer.zero_grad()
|
||||
outputs, features = self.model(imgs)
|
||||
loss_t = self._compute_loss(self.criterion_t, features, pids)
|
||||
loss_x = self._compute_loss(self.criterion_x, outputs, pids)
|
||||
loss = self.weight_t * loss_t + self.weight_x * loss_x
|
||||
loss.backward()
|
||||
self.optimizer.step()
|
||||
|
||||
batch_time.update(time.time() - end)
|
||||
|
||||
losses_t.update(loss_t.item(), pids.size(0))
|
||||
losses_x.update(loss_x.item(), pids.size(0))
|
||||
accs.update(metrics.accuracy(outputs, pids)[0].item())
|
||||
|
||||
if (batch_idx+1) % print_freq==0:
|
||||
print('Epoch: [{0}][{1}/{2}]\t'
|
||||
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
|
||||
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
|
||||
'Triplet {loss_t.val:.4f} ({loss_t.avg:.4f})\t'
|
||||
'Softmax {loss_x.val:.4f} ({loss_x.avg:.4f})\t'
|
||||
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
|
||||
epoch + 1, batch_idx + 1, len(trainloader),
|
||||
batch_time=batch_time,
|
||||
data_time=data_time,
|
||||
loss_t=losses_t,
|
||||
loss_x=losses_x,
|
||||
acc=accs))
|
||||
|
||||
end = time.time()
|
|
@ -0,0 +1,4 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from .softmax import VideoSoftmaxEngine
|
||||
from .triplet import VideoTripletEngine
|
|
@ -0,0 +1,37 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
import time
|
||||
import datetime
|
||||
|
||||
import torch
|
||||
|
||||
import torchreid
|
||||
from torchreid.engine.image import ImageSoftmaxEngine
|
||||
|
||||
|
||||
class VideoSoftmaxEngine(ImageSoftmaxEngine):
|
||||
|
||||
def __init__(self, dataset, model, optimizer, scheduler=None,
|
||||
use_cpu=False, label_smooth=True, pooling_method='avg'):
|
||||
super(VideoSoftmaxEngine, self).__init__(dataset, model, optimizer, scheduler=scheduler,
|
||||
use_cpu=use_cpu, label_smooth=label_smooth)
|
||||
self.pooling_method = pooling_method
|
||||
|
||||
def _extract_features(self, input):
|
||||
self.model.eval()
|
||||
# b: batch size
|
||||
# s: sqeuence length
|
||||
# c: channel depth
|
||||
# h: height
|
||||
# w: width
|
||||
b, s, c, h, w = input.size()
|
||||
input = input.view(b*s, c, h, w)
|
||||
features = self.model(input)
|
||||
features = features.view(b, s, -1)
|
||||
if self.pooling_method == 'avg':
|
||||
features = torch.mean(features, 1)
|
||||
else:
|
||||
features = torch.max(features, 1)[0]
|
||||
return features
|
|
@ -0,0 +1,24 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
import time
|
||||
import datetime
|
||||
|
||||
import torch
|
||||
|
||||
import torchreid
|
||||
from torchreid.engine.image import ImageTripletEngine
|
||||
from torchreid.engine.video import VideoSoftmaxEngine
|
||||
|
||||
|
||||
class VideoTripletEngine(ImageTripletEngine, VideoSoftmaxEngine):
|
||||
|
||||
def __init__(self, dataset, model, optimizer, margin=0.3,
|
||||
weight_t=1, weight_x=1, scheduler=None, use_cpu=False,
|
||||
label_smooth=True, pooling_method='avg'):
|
||||
super(VideoTripletEngine, self).__init__(dataset, model, optimizer, margin=margin,
|
||||
weight_t=weight_t, weight_x=weight_x,
|
||||
scheduler=scheduler, use_cpu=use_cpu,
|
||||
label_smooth=label_smooth)
|
||||
self.pooling_method = pooling_method
|
|
@ -1,7 +0,0 @@
|
|||
all:
|
||||
python setup.py build_ext --inplace
|
||||
rm -rf build
|
||||
|
||||
clean:
|
||||
rm -rf build
|
||||
rm -f eval_metrics_cy.c *.so
|
|
@ -1,29 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def init_lr_scheduler(
|
||||
optimizer,
|
||||
lr_scheduler='multi_step', # learning rate scheduler
|
||||
stepsize=[20, 40], # step size to decay learning rate
|
||||
gamma=0.1, # learning rate decay
|
||||
):
|
||||
|
||||
print('Initializing lr_scheduler: {}'.format(lr_scheduler))
|
||||
|
||||
if lr_scheduler == 'single_step':
|
||||
scheduler = torch.optim.lr_scheduler.StepLR(
|
||||
optimizer, step_size=stepsize[0], gamma=gamma
|
||||
)
|
||||
|
||||
elif lr_scheduler == 'multi_step':
|
||||
scheduler = torch.optim.lr_scheduler.MultiStepLR(
|
||||
optimizer, milestones=stepsize, gamma=gamma
|
||||
)
|
||||
|
||||
else:
|
||||
raise ValueError('Unsupported lr_scheduler: {}'.format(lr_scheduler))
|
||||
|
||||
return scheduler
|
|
@ -0,0 +1,5 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from .accuracy import accuracy
|
||||
from .rank import evaluate_rank
|
||||
from .distance import compute_distance_matrix
|
|
@ -0,0 +1,26 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
|
||||
def accuracy(output, target, topk=(1,)):
|
||||
"""Computes the accuracy over the k top predictions for
|
||||
the specified values of k
|
||||
"""
|
||||
maxk = max(topk)
|
||||
batch_size = target.size(0)
|
||||
|
||||
if isinstance(output, (tuple, list)):
|
||||
output = output[0]
|
||||
|
||||
_, pred = output.topk(maxk, 1, True, True)
|
||||
pred = pred.t()
|
||||
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
||||
|
||||
res = []
|
||||
for k in topk:
|
||||
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
|
||||
acc = correct_k.mul_(100.0 / batch_size)
|
||||
res.append(acc)
|
||||
|
||||
return res
|
|
@ -0,0 +1,60 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
from torch.nn import functional as F
|
||||
|
||||
|
||||
def compute_distance_matrix(input1, input2, metric='euclidean'):
|
||||
# check input
|
||||
assert isinstance(input1, torch.Tensor)
|
||||
assert isinstance(input2, torch.Tensor)
|
||||
assert input1.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(input1.dim())
|
||||
assert input2.dim() == 2, 'Expected 2-D tensor, but got {}-D'.format(input2.dim())
|
||||
assert input1.size(1) == input2.size(1)
|
||||
|
||||
if metric == 'euclidean':
|
||||
distmat = euclidean_squared_distance(input1, input2)
|
||||
elif metric == 'cosine':
|
||||
distmat = cosine_distance(input1, input2)
|
||||
else:
|
||||
raise ValueError(
|
||||
'Unknown distance metric: {}. '
|
||||
'Please choose either "euclidean" or "cosine"'.format(metric)
|
||||
)
|
||||
|
||||
return distmat
|
||||
|
||||
|
||||
def euclidean_squared_distance(input1, input2):
|
||||
"""
|
||||
Args:
|
||||
input1 (torch.Tensor): 2-D feature matrix
|
||||
input2 (torch.Tensor): 2-D feature matrix
|
||||
|
||||
Returns:
|
||||
distmat (numpy.ndarray): distance matrix
|
||||
"""
|
||||
m, n = input1.size(0), input2.size(0)
|
||||
distmat = torch.pow(input1, 2).sum(dim=1, keepdim=True).expand(m, n) + \
|
||||
torch.pow(input2, 2).sum(dim=1, keepdim=True).expand(n, m).t()
|
||||
distmat.addmm_(1, -2, input1, input2.t())
|
||||
return distmat.numpy()
|
||||
|
||||
|
||||
def cosine_distance(input1, input2):
|
||||
"""
|
||||
Args:
|
||||
input1 (torch.Tensor): 2-D feature matrix
|
||||
input2 (torch.Tensor): 2-D feature matrix
|
||||
|
||||
Returns:
|
||||
distmat (numpy.ndarray): distance matrix
|
||||
"""
|
||||
input1_normed = F.normalize(input1, p=2, dim=1)
|
||||
input2_normed = F.normalize(input2, p=2, dim=1)
|
||||
distmat = 1 - torch.mm(input1_normed, input2_normed.t())
|
||||
return distmat.numpy()
|
|
@ -9,12 +9,14 @@ import sys
|
|||
import warnings
|
||||
|
||||
try:
|
||||
from torchreid.eval_cylib.eval_metrics_cy import evaluate_cy
|
||||
from torchreid.metrics.rank_cylib.rank_cy import evaluate_cy
|
||||
IS_CYTHON_AVAI = True
|
||||
print('Using Cython evaluation code as the backend')
|
||||
except ImportError:
|
||||
IS_CYTHON_AVAI = False
|
||||
warnings.warn('Cython evaluation is UNAVAILABLE, which is highly recommended')
|
||||
warnings.warn(
|
||||
'Cython evaluation (very fast, highly recommended) is unavailable, '
|
||||
'now use python evaluation.'
|
||||
)
|
||||
|
||||
|
||||
def eval_cuhk03(distmat, q_pids, g_pids, q_camids, g_camids, max_rank):
|
||||
|
@ -155,7 +157,7 @@ def evaluate_py(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metri
|
|||
return eval_market1501(distmat, q_pids, g_pids, q_camids, g_camids, max_rank)
|
||||
|
||||
|
||||
def evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank=50, use_metric_cuhk03=False, use_cython=True):
|
||||
def evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank=50, use_metric_cuhk03=False, use_cython=True):
|
||||
if use_cython and IS_CYTHON_AVAI:
|
||||
return evaluate_cy(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03)
|
||||
else:
|
|
@ -0,0 +1,6 @@
|
|||
all:
|
||||
$(PYTHON) setup.py build_ext --inplace
|
||||
rm -rf build
|
||||
clean:
|
||||
rm -rf build
|
||||
rm -f rank_cy.c *.so
|
File diff suppressed because it is too large
Load Diff
|
@ -15,8 +15,11 @@ https://github.com/cython/cython/wiki/enhancements-compilerdirectives
|
|||
|
||||
Cython tutorial:
|
||||
https://cython.readthedocs.io/en/latest/src/userguide/numpy_tutorial.html
|
||||
|
||||
Credit to https://github.com/luzai
|
||||
"""
|
||||
|
||||
|
||||
# Main interface
|
||||
cpdef evaluate_cy(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=False):
|
||||
distmat = np.asarray(distmat, dtype=np.float32)
|
|
@ -12,9 +12,10 @@ def numpy_include():
|
|||
return numpy_include
|
||||
|
||||
ext_modules = [
|
||||
Extension('eval_metrics_cy',
|
||||
['eval_metrics_cy.pyx'],
|
||||
include_dirs=[numpy_include()],
|
||||
Extension(
|
||||
'rank_cy',
|
||||
['rank_cy.pyx'],
|
||||
include_dirs=[numpy_include()],
|
||||
)
|
||||
]
|
||||
|
|
@ -5,9 +5,8 @@ import os.path as osp
|
|||
import timeit
|
||||
import numpy as np
|
||||
|
||||
sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../..')
|
||||
|
||||
from torchreid.eval_metrics import evaluate
|
||||
sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..')
|
||||
from torchreid import metrics
|
||||
|
||||
"""
|
||||
Test the speed of cython-based evaluation code. The speed improvements
|
||||
|
@ -25,8 +24,8 @@ setup = '''
|
|||
import sys
|
||||
import os.path as osp
|
||||
import numpy as np
|
||||
sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../..')
|
||||
from torchreid.eval_metrics import evaluate
|
||||
sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..')
|
||||
from torchreid import metrics
|
||||
num_q = 30
|
||||
num_g = 300
|
||||
max_rank = 5
|
||||
|
@ -38,15 +37,15 @@ g_camids = np.random.randint(0, 5, size=num_g)
|
|||
'''
|
||||
|
||||
print('=> Using market1501\'s metric')
|
||||
pytime = timeit.timeit('evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)', setup=setup, number=20)
|
||||
cytime = timeit.timeit('evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)', setup=setup, number=20)
|
||||
pytime = timeit.timeit('metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=False)', setup=setup, number=20)
|
||||
cytime = timeit.timeit('metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_cython=True)', setup=setup, number=20)
|
||||
print('Python time: {} s'.format(pytime))
|
||||
print('Cython time: {} s'.format(cytime))
|
||||
print('Cython is {} times faster than python\n'.format(pytime / cytime))
|
||||
|
||||
print('=> Using cuhk03\'s metric')
|
||||
pytime = timeit.timeit('evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=False)', setup=setup, number=20)
|
||||
cytime = timeit.timeit('evaluate(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=True)', setup=setup, number=20)
|
||||
pytime = timeit.timeit('metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=False)', setup=setup, number=20)
|
||||
cytime = timeit.timeit('metrics.evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids, max_rank, use_metric_cuhk03=True, use_cython=True)', setup=setup, number=20)
|
||||
print('Python time: {} s'.format(pytime))
|
||||
print('Cython time: {} s'.format(cytime))
|
||||
print('Cython is {} times faster than python\n'.format(pytime / cytime))
|
|
@ -1,5 +1,7 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import torch
|
||||
|
||||
from .resnet import *
|
||||
from .resnetmid import *
|
||||
from .resnext import *
|
||||
|
@ -54,12 +56,14 @@ __model_factory = {
|
|||
}
|
||||
|
||||
|
||||
def get_names():
|
||||
return list(__model_factory.keys())
|
||||
|
||||
|
||||
def init_model(name, *args, **kwargs):
|
||||
avai_models = get_names()
|
||||
def build_model(name, num_classes, loss='softmax', pretrained=True, use_gpu=True):
|
||||
avai_models = list(__model_factory.keys())
|
||||
if name not in avai_models:
|
||||
raise KeyError('Unknown model: {}. The available models are: {}'.format(name, avai_models))
|
||||
return __model_factory[name](*args, **kwargs)
|
||||
raise KeyError('Unknown model: {}. Must be one of {}'.format(name, avai_models))
|
||||
print('Initializing model: {}'.format(name))
|
||||
return __model_factory[name](
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
pretrained=pretrained,
|
||||
use_gpu=use_gpu
|
||||
)
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['densenet121', 'densenet121_fc512']
|
||||
|
||||
from collections import OrderedDict
|
||||
import math
|
||||
import re
|
||||
|
@ -12,9 +14,6 @@ from torch.nn import functional as F
|
|||
import torchvision
|
||||
|
||||
|
||||
__all__ = ['densenet121', 'densenet121_fc512']
|
||||
|
||||
|
||||
model_urls = {
|
||||
'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
|
||||
'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
|
||||
|
@ -163,9 +162,9 @@ class DenseNet(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
||||
|
@ -208,7 +207,7 @@ densenet161: num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24)
|
|||
"""
|
||||
|
||||
|
||||
def densenet121(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def densenet121(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = DenseNet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -224,7 +223,7 @@ def densenet121(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def densenet121_fc512(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def densenet121_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = DenseNet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['HACNN']
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
import torchvision
|
||||
|
||||
|
||||
__all__ = ['HACNN']
|
||||
|
||||
|
||||
class ConvBlock(nn.Module):
|
||||
"""Basic convolutional block.
|
||||
|
||||
|
@ -199,7 +198,7 @@ class HACNN(nn.Module):
|
|||
learn_region (bool): whether to learn region features (i.e. local branch)
|
||||
"""
|
||||
|
||||
def __init__(self, num_classes, loss={'xent'}, nchannels=[128, 256, 384], feat_dim=512, learn_region=True, use_gpu=True, **kwargs):
|
||||
def __init__(self, num_classes, loss='softmax', nchannels=[128, 256, 384], feat_dim=512, learn_region=True, use_gpu=True, **kwargs):
|
||||
super(HACNN, self).__init__()
|
||||
self.loss = loss
|
||||
self.learn_region = learn_region
|
||||
|
@ -362,13 +361,13 @@ class HACNN(nn.Module):
|
|||
if self.learn_region:
|
||||
prelogits_local = self.classifier_local(x_local)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
if self.learn_region:
|
||||
return (prelogits_global, prelogits_local)
|
||||
else:
|
||||
return prelogits_global
|
||||
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
if self.learn_region:
|
||||
return (prelogits_global, prelogits_local), (x_global, x_local)
|
||||
else:
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['inceptionresnetv2']
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.nn import functional as F
|
||||
|
@ -14,9 +16,6 @@ Code imported from https://github.com/Cadene/pretrained-models.pytorch
|
|||
"""
|
||||
|
||||
|
||||
__all__ = ['inceptionresnetv2']
|
||||
|
||||
|
||||
pretrained_settings = {
|
||||
'inceptionresnetv2': {
|
||||
'imagenet': {
|
||||
|
@ -282,7 +281,7 @@ class InceptionResNetV2(nn.Module):
|
|||
Connections on Learning. AAAI 2017.
|
||||
"""
|
||||
|
||||
def __init__(self, num_classes, loss={'xent'}, **kwargs):
|
||||
def __init__(self, num_classes, loss='softmax', **kwargs):
|
||||
super(InceptionResNetV2, self).__init__()
|
||||
self.loss = loss
|
||||
|
||||
|
@ -385,15 +384,15 @@ class InceptionResNetV2(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
||||
|
||||
|
||||
def inceptionresnetv2(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def inceptionresnetv2(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = InceptionResNetV2(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
__all__ = ['inceptionv4']
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
@ -9,9 +10,6 @@ import os
|
|||
import sys
|
||||
|
||||
|
||||
__all__ = ['inceptionv4']
|
||||
|
||||
|
||||
"""
|
||||
Code imported from https://github.com/Cadene/pretrained-models.pytorch
|
||||
"""
|
||||
|
@ -319,9 +317,9 @@ class InceptionV4Base(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
||||
|
@ -340,7 +338,7 @@ def init_pretrained_weights(model, model_url):
|
|||
print('Initialized model with pretrained weights from {}'.format(model_url))
|
||||
|
||||
|
||||
def inceptionv4(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def inceptionv4(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = InceptionV4Base(num_classes, loss, **kwargs)
|
||||
if pretrained:
|
||||
model_url = pretrained_settings['inceptionv4']['imagenet']['url']
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['mlfn']
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
@ -8,9 +10,6 @@ import torchvision
|
|||
import torch.utils.model_zoo as model_zoo
|
||||
|
||||
|
||||
__all__ = ['mlfn']
|
||||
|
||||
|
||||
model_urls = {
|
||||
# training epoch = 5, top1 = 51.6
|
||||
'imagenet': 'http://www.eecs.qmul.ac.uk/~kz303/deep-person-reid/model-zoo/imagenet-pretrained/mlfn-9cb5a267.pth.tar',
|
||||
|
@ -94,7 +93,7 @@ class MLFN(nn.Module):
|
|||
Chang et al. Multi-Level Factorisation Net for Person Re-Identification. CVPR 2018.
|
||||
"""
|
||||
|
||||
def __init__(self, num_classes, loss={'xent'}, groups=32, channels=[64, 256, 512, 1024, 2048], embed_dim=1024, **kwargs):
|
||||
def __init__(self, num_classes, loss='softmax', groups=32, channels=[64, 256, 512, 1024, 2048], embed_dim=1024, **kwargs):
|
||||
super(MLFN, self).__init__()
|
||||
self.loss = loss
|
||||
self.groups = groups
|
||||
|
@ -183,9 +182,9 @@ class MLFN(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
||||
|
@ -204,7 +203,7 @@ def init_pretrained_weights(model, model_url):
|
|||
print('Initialized model with pretrained weights from {}'.format(model_url))
|
||||
|
||||
|
||||
def mlfn(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def mlfn(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = MLFN(num_classes, loss, **kwargs)
|
||||
if pretrained:
|
||||
init_pretrained_weights(model, model_urls['imagenet'])
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['mobilenetv2_1dot0', 'mobilenetv2_1dot4']
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
import torch.utils.model_zoo as model_zoo
|
||||
|
||||
|
||||
__all__ = ['mobilenetv2_1dot0', 'mobilenetv2_1dot4']
|
||||
|
||||
|
||||
model_urls = {
|
||||
# 1.0: top-1 71.3
|
||||
'mobilenetv2_1dot0': 'http://eecs.qmul.ac.uk/~kz303/deep-person-reid/model-zoo/imagenet-pretrained/mobilenetv2_1.0-0f5d2d8f.pth',
|
||||
|
@ -72,7 +71,7 @@ class MobileNetV2(nn.Module):
|
|||
Sandler et al. MobileNetV2: Inverted Residuals and Linear Bottlenecks. CVPR 2018.
|
||||
"""
|
||||
|
||||
def __init__(self, num_classes, width_mult=1, loss={'xent'}, fc_dims=None, dropout_p=None, **kwargs):
|
||||
def __init__(self, num_classes, width_mult=1, loss='softmax', fc_dims=None, dropout_p=None, **kwargs):
|
||||
super(MobileNetV2, self).__init__()
|
||||
self.loss = loss
|
||||
self.in_channels = int(32 * width_mult)
|
||||
|
@ -176,9 +175,9 @@ class MobileNetV2(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError("Unsupported loss: {}".format(self.loss))
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['MuDeep']
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
import torchvision
|
||||
|
||||
|
||||
__all__ = ['MuDeep']
|
||||
|
||||
|
||||
class ConvBlock(nn.Module):
|
||||
"""Basic convolutional block.
|
||||
|
||||
|
@ -159,7 +158,7 @@ class MuDeep(nn.Module):
|
|||
Qian et al. Multi-scale Deep Learning Architectures for Person Re-identification. ICCV 2017.
|
||||
"""
|
||||
|
||||
def __init__(self, num_classes, loss={'xent'}, **kwargs):
|
||||
def __init__(self, num_classes, loss='softmax', **kwargs):
|
||||
super(MuDeep, self).__init__()
|
||||
self.loss = loss
|
||||
|
||||
|
@ -195,9 +194,9 @@ class MuDeep(nn.Module):
|
|||
x = self.fc(x)
|
||||
y = self.classifier(x)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, x
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['nasnetamobile']
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
@ -28,8 +30,6 @@ Thanks to Anastasiia (https://github.com/DagnyT) for the great help, support and
|
|||
Code imported from https://github.com/Cadene/pretrained-models.pytorch
|
||||
"""
|
||||
|
||||
__all__ = ['nasnetamobile']
|
||||
|
||||
|
||||
pretrained_settings = {
|
||||
'nasnetamobile': {
|
||||
|
@ -648,9 +648,9 @@ class NASNetAMobile(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
||||
|
@ -669,7 +669,7 @@ def init_pretrained_weights(model, model_url):
|
|||
print('Initialized model with pretrained weights from {}'.format(model_url))
|
||||
|
||||
|
||||
def nasnetamobile(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def nasnetamobile(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = NASNetAMobile(num_classes, loss, **kwargs)
|
||||
if pretrained:
|
||||
model_url = pretrained_settings['nasnetamobile']['imagenet']['url']
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['pcb_p6', 'pcb_p4']
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
@ -8,9 +10,6 @@ import torchvision
|
|||
import torch.utils.model_zoo as model_zoo
|
||||
|
||||
|
||||
__all__ = ['pcb_p6', 'pcb_p4']
|
||||
|
||||
|
||||
model_urls = {
|
||||
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
|
||||
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
|
||||
|
@ -217,9 +216,9 @@ class PCB(nn.Module):
|
|||
y_i = self.classifier[i](v_h_i)
|
||||
y.append(y_i)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
v_g = F.normalize(v_g, p=2, dim=1)
|
||||
return y, v_g.view(v_g.size(0), -1)
|
||||
else:
|
||||
|
@ -239,7 +238,7 @@ def init_pretrained_weights(model, model_url):
|
|||
print('Initialized model with pretrained weights from {}'.format(model_url))
|
||||
|
||||
|
||||
def pcb_p6(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def pcb_p6(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = PCB(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -256,7 +255,7 @@ def pcb_p6(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def pcb_p4(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def pcb_p4(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = PCB(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['resnet50', 'resnet50_fc512']
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
@ -8,9 +10,6 @@ import torchvision
|
|||
import torch.utils.model_zoo as model_zoo
|
||||
|
||||
|
||||
__all__ = ['resnet50', 'resnet50_fc512']
|
||||
|
||||
|
||||
model_urls = {
|
||||
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
|
||||
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
|
||||
|
@ -215,9 +214,9 @@ class ResNet(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError("Unsupported loss: {}".format(self.loss))
|
||||
|
@ -247,7 +246,7 @@ resnet152: block=Bottleneck, layers=[3, 8, 36, 3]
|
|||
"""
|
||||
|
||||
|
||||
def resnet50(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = ResNet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -263,7 +262,7 @@ def resnet50(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def resnet50_fc512(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = ResNet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['resnet50mid']
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
@ -8,9 +10,6 @@ import torchvision
|
|||
import torch.utils.model_zoo as model_zoo
|
||||
|
||||
|
||||
__all__ = ['resnet50mid']
|
||||
|
||||
|
||||
model_urls = {
|
||||
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
|
||||
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
|
||||
|
@ -223,9 +222,9 @@ class ResNet(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
||||
|
@ -255,7 +254,7 @@ resnet152: block=Bottleneck, layers=[3, 8, 36, 3]
|
|||
"""
|
||||
|
||||
|
||||
def resnet50mid(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def resnet50mid(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = ResNet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['resnext50_32x4d', 'resnext50_32x4d_fc512']
|
||||
|
||||
import math
|
||||
|
||||
import torch
|
||||
|
@ -10,9 +12,6 @@ import torchvision
|
|||
import torch.utils.model_zoo as model_zoo
|
||||
|
||||
|
||||
__all__ = ['resnext50_32x4d', 'resnext50_32x4d_fc512']
|
||||
|
||||
|
||||
model_urls = {
|
||||
# top1 = 76.3
|
||||
'resnext50_32x4d': 'http://www.eecs.qmul.ac.uk/~kz303/deep-person-reid/model-zoo/imagenet-pretrained/resnext50_32x4d-453b60f8.pth',
|
||||
|
@ -178,9 +177,9 @@ class ResNeXt(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError("Unsupported loss: {}".format(self.loss))
|
||||
|
@ -199,7 +198,7 @@ def init_pretrained_weights(model, model_url):
|
|||
print('Initialized model with pretrained weights from {}'.format(model_url))
|
||||
|
||||
|
||||
def resnext50_32x4d(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = ResNeXt(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -217,7 +216,7 @@ def resnext50_32x4d(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def resnext50_32x4d_fc512(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def resnext50_32x4d_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = ResNeXt(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
|
|
@ -1,6 +1,16 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = [
|
||||
'senet154',
|
||||
'se_resnet50',
|
||||
'se_resnet101',
|
||||
'se_resnet152',
|
||||
'se_resnext50_32x4d',
|
||||
'se_resnext101_32x4d',
|
||||
'se_resnet50_fc512'
|
||||
]
|
||||
|
||||
from collections import OrderedDict
|
||||
import math
|
||||
|
||||
|
@ -16,10 +26,6 @@ Code imported from https://github.com/Cadene/pretrained-models.pytorch
|
|||
"""
|
||||
|
||||
|
||||
__all__ = ['senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', 'se_resnext101_32x4d',
|
||||
'se_resnet50_fc512']
|
||||
|
||||
|
||||
pretrained_settings = {
|
||||
'senet154': {
|
||||
'imagenet': {
|
||||
|
@ -405,9 +411,9 @@ class SENet(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError("Unsupported loss: {}".format(self.loss))
|
||||
|
@ -426,7 +432,7 @@ def init_pretrained_weights(model, model_url):
|
|||
print('Initialized model with pretrained weights from {}'.format(model_url))
|
||||
|
||||
|
||||
def senet154(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def senet154(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SENet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -445,7 +451,7 @@ def senet154(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def se_resnet50(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def se_resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SENet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -468,7 +474,7 @@ def se_resnet50(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def se_resnet50_fc512(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def se_resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SENet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -491,7 +497,7 @@ def se_resnet50_fc512(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def se_resnet101(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def se_resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SENet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -514,7 +520,7 @@ def se_resnet101(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def se_resnet152(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def se_resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SENet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -537,7 +543,7 @@ def se_resnet152(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def se_resnext50_32x4d(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def se_resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SENet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
@ -560,7 +566,7 @@ def se_resnext50_32x4d(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def se_resnext101_32x4d(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def se_resnext101_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SENet(
|
||||
num_classes=num_classes,
|
||||
loss=loss,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['shufflenet']
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
@ -8,9 +10,6 @@ import torchvision
|
|||
import torch.utils.model_zoo as model_zoo
|
||||
|
||||
|
||||
__all__ = ['shufflenet']
|
||||
|
||||
|
||||
model_urls = {
|
||||
# training epoch = 90, top1 = 61.8
|
||||
'imagenet': 'http://www.eecs.qmul.ac.uk/~kz303/deep-person-reid/model-zoo/imagenet-pretrained/shufflenet-bee1b265.pth.tar',
|
||||
|
@ -85,7 +84,7 @@ class ShuffleNet(nn.Module):
|
|||
Network for Mobile Devices. CVPR 2018.
|
||||
"""
|
||||
|
||||
def __init__(self, num_classes, loss={'xent'}, num_groups=3, **kwargs):
|
||||
def __init__(self, num_classes, loss='softmax', num_groups=3, **kwargs):
|
||||
super(ShuffleNet, self).__init__()
|
||||
self.loss = loss
|
||||
|
||||
|
@ -136,9 +135,9 @@ class ShuffleNet(nn.Module):
|
|||
|
||||
y = self.classifier(x)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, x
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
||||
|
@ -157,7 +156,7 @@ def init_pretrained_weights(model, model_url):
|
|||
print('Initialized model with pretrained weights from {}'.format(model_url))
|
||||
|
||||
|
||||
def shufflenet(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def shufflenet(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = ShuffleNet(num_classes, loss, **kwargs)
|
||||
if pretrained:
|
||||
init_pretrained_weights(model, model_urls['imagenet'])
|
||||
|
|
|
@ -1,6 +1,12 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = [
|
||||
'squeezenet1_0',
|
||||
'squeezenet1_1',
|
||||
'squeezenet1_0_fc512'
|
||||
]
|
||||
|
||||
from collections import OrderedDict
|
||||
import math
|
||||
|
||||
|
@ -13,9 +19,6 @@ import torchvision
|
|||
import torch.utils.model_zoo as model_zoo
|
||||
|
||||
|
||||
__all__ = ['squeezenet1_0', 'squeezenet1_1', 'squeezenet1_0_fc512']
|
||||
|
||||
|
||||
model_urls = {
|
||||
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
|
||||
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
|
||||
|
@ -158,9 +161,9 @@ class SqueezeNet(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
||||
|
@ -179,9 +182,10 @@ def init_pretrained_weights(model, model_url):
|
|||
print('Initialized model with pretrained weights from {}'.format(model_url))
|
||||
|
||||
|
||||
def squeezenet1_0(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def squeezenet1_0(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SqueezeNet(
|
||||
num_classes, loss,
|
||||
num_classes,
|
||||
loss,
|
||||
version=1.0,
|
||||
fc_dims=None,
|
||||
dropout_p=None,
|
||||
|
@ -192,9 +196,10 @@ def squeezenet1_0(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def squeezenet1_0_fc512(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def squeezenet1_0_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SqueezeNet(
|
||||
num_classes, loss,
|
||||
num_classes,
|
||||
loss,
|
||||
version=1.0,
|
||||
fc_dims=[512],
|
||||
dropout_p=None,
|
||||
|
@ -205,9 +210,10 @@ def squeezenet1_0_fc512(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
|||
return model
|
||||
|
||||
|
||||
def squeezenet1_1(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def squeezenet1_1(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = SqueezeNet(
|
||||
num_classes, loss,
|
||||
num_classes,
|
||||
loss,
|
||||
version=1.1,
|
||||
fc_dims=None,
|
||||
dropout_p=None,
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['xception']
|
||||
|
||||
import math
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
@ -9,9 +11,6 @@ import torch.utils.model_zoo as model_zoo
|
|||
from torch.nn import init
|
||||
|
||||
|
||||
__all__ = ['xception']
|
||||
|
||||
|
||||
pretrained_settings = {
|
||||
'xception': {
|
||||
'imagenet': {
|
||||
|
@ -231,9 +230,9 @@ class Xception(nn.Module):
|
|||
|
||||
y = self.classifier(v)
|
||||
|
||||
if self.loss == {'xent'}:
|
||||
if self.loss == 'softmax':
|
||||
return y
|
||||
elif self.loss == {'xent', 'htri'}:
|
||||
elif self.loss == 'triplet':
|
||||
return y, v
|
||||
else:
|
||||
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
||||
|
@ -252,9 +251,10 @@ def init_pretrained_weights(model, model_url):
|
|||
print('Initialized model with pretrained weights from {}'.format(model_url))
|
||||
|
||||
|
||||
def xception(num_classes, loss={'xent'}, pretrained=True, **kwargs):
|
||||
def xception(num_classes, loss='softmax', pretrained=True, **kwargs):
|
||||
model = Xception(
|
||||
num_classes, loss,
|
||||
num_classes,
|
||||
loss,
|
||||
fc_dims=None,
|
||||
dropout_p=None,
|
||||
**kwargs
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from .optimizers import build_optimizer
|
||||
from .lr_schedulers import build_lr_scheduler
|
|
@ -0,0 +1,38 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
AVAI_SCH = ['single_step', 'multi_step']
|
||||
|
||||
|
||||
def build_lr_scheduler(optimizer, lr_scheduler, stepsize, gamma=0.1):
|
||||
if lr_scheduler not in AVAI_SCH:
|
||||
raise ValueError('Unsupported scheduler: {}. Must be one of {}'.format(lr_scheduler, AVAI_SCH))
|
||||
|
||||
print('Initializing lr_scheduler: {}'.format(lr_scheduler))
|
||||
|
||||
if lr_scheduler == 'single_step':
|
||||
if not isinstance(stepsize, int):
|
||||
raise TypeError(
|
||||
'For single_step lr_scheduler, stepsize must '
|
||||
'be an integer, but got {}'.format(type(stepsize))
|
||||
)
|
||||
|
||||
scheduler = torch.optim.lr_scheduler.StepLR(
|
||||
optimizer, step_size=stepsize, gamma=gamma
|
||||
)
|
||||
|
||||
elif lr_scheduler == 'multi_step':
|
||||
if not isinstance(stepsize, list):
|
||||
raise TypeError(
|
||||
'For multi_step lr_scheduler, stepsize must '
|
||||
'be a list, but got {}'.format(type(stepsize))
|
||||
)
|
||||
|
||||
scheduler = torch.optim.lr_scheduler.MultiStepLR(
|
||||
optimizer, milestones=stepsize, gamma=gamma
|
||||
)
|
||||
|
||||
return scheduler
|
|
@ -5,44 +5,56 @@ import torch
|
|||
import torch.nn as nn
|
||||
|
||||
|
||||
def init_optimizer(
|
||||
model,
|
||||
optim='adam', # optimizer choices
|
||||
lr=0.003, # learning rate
|
||||
weight_decay=5e-4, # weight decay
|
||||
momentum=0.9, # momentum factor for sgd and rmsprop
|
||||
sgd_dampening=0, # sgd's dampening for momentum
|
||||
sgd_nesterov=False, # whether to enable sgd's Nesterov momentum
|
||||
rmsprop_alpha=0.99, # rmsprop's smoothing constant
|
||||
adam_beta1=0.9, # exponential decay rate for adam's first moment
|
||||
adam_beta2=0.999, # # exponential decay rate for adam's second moment
|
||||
staged_lr=False, # different lr for different layers
|
||||
new_layers=None, # new layers use the default lr, while other layers's lr is scaled by base_lr_mult
|
||||
base_lr_mult=0.1, # learning rate multiplier for base layers
|
||||
AVAI_OPTIMS = ['adam', 'amsgrad', 'sgd', 'rmsprop']
|
||||
|
||||
|
||||
def build_optimizer(
|
||||
model,
|
||||
optim='adam',
|
||||
lr=0.0003,
|
||||
weight_decay=5e-04,
|
||||
momentum=0.9,
|
||||
sgd_dampening=0,
|
||||
sgd_nesterov=False,
|
||||
rmsprop_alpha=0.99,
|
||||
adam_beta1=0.9,
|
||||
adam_beta2=0.99,
|
||||
staged_lr=False,
|
||||
new_layers=None,
|
||||
base_lr_mult=0.1
|
||||
):
|
||||
if optim not in AVAI_OPTIMS:
|
||||
raise ValueError('Unsupported optim: {}. Must be one of {}'.format(optim, AVAI_OPTIMS))
|
||||
|
||||
if not isinstance(model, nn.Module):
|
||||
raise TypeError('model given to build_optimizer must be an instance of nn.Module')
|
||||
|
||||
if staged_lr:
|
||||
assert new_layers is not None
|
||||
if isinstance(new_layers, str):
|
||||
new_layers = [new_layers]
|
||||
|
||||
if isinstance(model, nn.DataParallel):
|
||||
model = model.module
|
||||
|
||||
base_params = []
|
||||
base_layers = []
|
||||
new_params = []
|
||||
if isinstance(model, nn.DataParallel):
|
||||
model = model.module
|
||||
|
||||
for name, module in model.named_children():
|
||||
if name in new_layers:
|
||||
new_params += [p for p in module.parameters()]
|
||||
else:
|
||||
base_params += [p for p in module.parameters()]
|
||||
base_layers.append(name)
|
||||
|
||||
param_groups = [
|
||||
{'params': base_params, 'lr': lr * base_lr_mult},
|
||||
{'params': new_params},
|
||||
]
|
||||
|
||||
print('Use staged learning rate')
|
||||
print(
|
||||
'* Base layers (initial lr = {}): {}'.format(lr * base_lr_mult, base_layers)
|
||||
)
|
||||
print('* New layers (initial lr = {}): {}'.format(lr, new_layers))
|
||||
print('Base layers (lr*{}): {}'.format(base_lr_mult, base_layers))
|
||||
print('New layers (lr): {}'.format(new_layers))
|
||||
|
||||
else:
|
||||
param_groups = model.parameters()
|
||||
|
@ -85,7 +97,4 @@ def init_optimizer(
|
|||
alpha=rmsprop_alpha,
|
||||
)
|
||||
|
||||
else:
|
||||
raise ValueError('Unsupported optimizer: {}'.format(optim))
|
||||
|
||||
return optimizer
|
||||
return optimizer
|
|
@ -1,87 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
from collections import defaultdict
|
||||
import numpy as np
|
||||
import copy
|
||||
import random
|
||||
|
||||
import torch
|
||||
from torch.utils.data.sampler import Sampler, RandomSampler
|
||||
|
||||
|
||||
class RandomIdentitySampler(Sampler):
|
||||
"""Randomly samples N identities each with K instances.
|
||||
|
||||
Args:
|
||||
data_source (list): contains a list of (img_path, pid, camid).
|
||||
batch_size (int): number of examples in a batch.
|
||||
num_instances (int): number of instances per identity in a batch.
|
||||
"""
|
||||
def __init__(self, data_source, batch_size, num_instances):
|
||||
self.data_source = data_source
|
||||
self.batch_size = batch_size
|
||||
self.num_instances = num_instances
|
||||
self.num_pids_per_batch = self.batch_size // self.num_instances
|
||||
self.index_dic = defaultdict(list)
|
||||
for index, (_, pid, _) in enumerate(self.data_source):
|
||||
self.index_dic[pid].append(index)
|
||||
self.pids = list(self.index_dic.keys())
|
||||
|
||||
# estimate number of examples in an epoch
|
||||
self.length = 0
|
||||
for pid in self.pids:
|
||||
idxs = self.index_dic[pid]
|
||||
num = len(idxs)
|
||||
if num < self.num_instances:
|
||||
num = self.num_instances
|
||||
self.length += num - num % self.num_instances
|
||||
|
||||
def __iter__(self):
|
||||
batch_idxs_dict = defaultdict(list)
|
||||
|
||||
for pid in self.pids:
|
||||
idxs = copy.deepcopy(self.index_dic[pid])
|
||||
if len(idxs) < self.num_instances:
|
||||
idxs = np.random.choice(idxs, size=self.num_instances, replace=True)
|
||||
random.shuffle(idxs)
|
||||
batch_idxs = []
|
||||
for idx in idxs:
|
||||
batch_idxs.append(idx)
|
||||
if len(batch_idxs) == self.num_instances:
|
||||
batch_idxs_dict[pid].append(batch_idxs)
|
||||
batch_idxs = []
|
||||
|
||||
avai_pids = copy.deepcopy(self.pids)
|
||||
final_idxs = []
|
||||
|
||||
while len(avai_pids) >= self.num_pids_per_batch:
|
||||
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
|
||||
for pid in selected_pids:
|
||||
batch_idxs = batch_idxs_dict[pid].pop(0)
|
||||
final_idxs.extend(batch_idxs)
|
||||
if len(batch_idxs_dict[pid]) == 0:
|
||||
avai_pids.remove(pid)
|
||||
|
||||
return iter(final_idxs)
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
|
||||
def build_train_sampler(data_source, train_sampler, train_batch_size, num_instances, **kwargs):
|
||||
"""Builds a training sampler.
|
||||
|
||||
Args:
|
||||
data_source (list): contains a list of (img_path, pid, camid).
|
||||
train_sampler (str): sampler name (default: RandomSampler).
|
||||
train_batch_size (int): training batch size.
|
||||
num_instances (int): number of instances per identity in a batch (for RandomIdentitySampler).
|
||||
"""
|
||||
if train_sampler == 'RandomIdentitySampler':
|
||||
sampler = RandomIdentitySampler(data_source, train_batch_size, num_instances)
|
||||
|
||||
else:
|
||||
sampler = RandomSampler(data_source)
|
||||
|
||||
return sampler
|
|
@ -1,159 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from PIL import Image
|
||||
import random
|
||||
import numpy as np
|
||||
import math
|
||||
|
||||
import torch
|
||||
from torchvision.transforms import *
|
||||
|
||||
|
||||
class Random2DTranslation(object):
|
||||
"""Randomly translates the input image with a probability.
|
||||
|
||||
Specifically, given a predefined shape (height, width), the input is first
|
||||
resized with the factor of 1.25, leading to (height*1.25, width*1.25), then
|
||||
a random crop is performed. Such operation is done with a probability.
|
||||
|
||||
Args:
|
||||
height (int): target image height.
|
||||
width (int): target image width.
|
||||
p (float): probability of performing this transformation. Default: 0.5.
|
||||
"""
|
||||
|
||||
def __init__(self, height, width, p=0.5, interpolation=Image.BILINEAR):
|
||||
self.height = height
|
||||
self.width = width
|
||||
self.p = p
|
||||
self.interpolation = interpolation
|
||||
|
||||
def __call__(self, img):
|
||||
if random.uniform(0, 1) > self.p:
|
||||
return img.resize((self.width, self.height), self.interpolation)
|
||||
|
||||
new_width, new_height = int(round(self.width * 1.125)), int(round(self.height * 1.125))
|
||||
resized_img = img.resize((new_width, new_height), self.interpolation)
|
||||
x_maxrange = new_width - self.width
|
||||
y_maxrange = new_height - self.height
|
||||
x1 = int(round(random.uniform(0, x_maxrange)))
|
||||
y1 = int(round(random.uniform(0, y_maxrange)))
|
||||
croped_img = resized_img.crop((x1, y1, x1 + self.width, y1 + self.height))
|
||||
return croped_img
|
||||
|
||||
|
||||
class RandomErasing(object):
|
||||
'''Randomly erases an image patch.
|
||||
|
||||
Class that performs Random Erasing in Random Erasing Data Augmentation by Zhong et al.
|
||||
-------------------------------------------------------------------------------------
|
||||
probability: The probability that the operation will be performed.
|
||||
sl: min erasing area
|
||||
sh: max erasing area
|
||||
r1: min aspect ratio
|
||||
mean: erasing value
|
||||
-------------------------------------------------------------------------------------
|
||||
|
||||
Imported from https://github.com/zhunzhong07/Random-Erasing
|
||||
'''
|
||||
|
||||
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):
|
||||
self.probability = probability
|
||||
self.mean = mean
|
||||
self.sl = sl
|
||||
self.sh = sh
|
||||
self.r1 = r1
|
||||
|
||||
def __call__(self, img):
|
||||
if random.uniform(0, 1) > self.probability:
|
||||
return img
|
||||
|
||||
for attempt in range(100):
|
||||
area = img.size()[1] * img.size()[2]
|
||||
|
||||
target_area = random.uniform(self.sl, self.sh) * area
|
||||
aspect_ratio = random.uniform(self.r1, 1/self.r1)
|
||||
|
||||
h = int(round(math.sqrt(target_area * aspect_ratio)))
|
||||
w = int(round(math.sqrt(target_area / aspect_ratio)))
|
||||
|
||||
if w < img.size()[2] and h < img.size()[1]:
|
||||
x1 = random.randint(0, img.size()[1] - h)
|
||||
y1 = random.randint(0, img.size()[2] - w)
|
||||
if img.size()[0] == 3:
|
||||
img[0, x1:x1+h, y1:y1+w] = self.mean[0]
|
||||
img[1, x1:x1+h, y1:y1+w] = self.mean[1]
|
||||
img[2, x1:x1+h, y1:y1+w] = self.mean[2]
|
||||
else:
|
||||
img[0, x1:x1+h, y1:y1+w] = self.mean[0]
|
||||
return img
|
||||
|
||||
return img
|
||||
|
||||
|
||||
class ColorAugmentation(object):
|
||||
"""Randomly alters the intensities of RGB channels.
|
||||
|
||||
Reference:
|
||||
Krizhevsky et al. ImageNet Classification with Deep ConvolutionalNeural Networks. NIPS 2012.
|
||||
"""
|
||||
|
||||
def __init__(self, p=0.5):
|
||||
self.p = p
|
||||
self.eig_vec = torch.Tensor([
|
||||
[0.4009, 0.7192, -0.5675],
|
||||
[-0.8140, -0.0045, -0.5808],
|
||||
[0.4203, -0.6948, -0.5836],
|
||||
])
|
||||
self.eig_val = torch.Tensor([[0.2175, 0.0188, 0.0045]])
|
||||
|
||||
def _check_input(self, tensor):
|
||||
assert tensor.dim() == 3 and tensor.size(0) == 3
|
||||
|
||||
def __call__(self, tensor):
|
||||
if random.uniform(0, 1) > self.p:
|
||||
return tensor
|
||||
alpha = torch.normal(mean=torch.zeros_like(self.eig_val)) * 0.1
|
||||
quatity = torch.mm(self.eig_val * alpha, self.eig_vec)
|
||||
tensor = tensor + quatity.view(3, 1, 1)
|
||||
return tensor
|
||||
|
||||
|
||||
def build_transforms(
|
||||
height,
|
||||
width,
|
||||
random_erase=False, # use random erasing for data augmentation
|
||||
color_jitter=False, # randomly change the brightness, contrast and saturation
|
||||
color_aug=False, # randomly alter the intensities of RGB channels
|
||||
**kwargs
|
||||
):
|
||||
|
||||
# use imagenet mean and std as default
|
||||
imagenet_mean = [0.485, 0.456, 0.406]
|
||||
imagenet_std = [0.229, 0.224, 0.225]
|
||||
normalize = Normalize(mean=imagenet_mean, std=imagenet_std)
|
||||
|
||||
# build train transformations
|
||||
transform_train = []
|
||||
transform_train += [Random2DTranslation(height, width)]
|
||||
transform_train += [RandomHorizontalFlip()]
|
||||
if color_jitter:
|
||||
transform_train += [ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0)]
|
||||
transform_train += [ToTensor()]
|
||||
if color_aug:
|
||||
transform_train += [ColorAugmentation()]
|
||||
transform_train += [normalize]
|
||||
if random_erase:
|
||||
transform_train += [RandomErasing()]
|
||||
transform_train = Compose(transform_train)
|
||||
|
||||
# build test transformations
|
||||
transform_test = Compose([
|
||||
Resize((height, width)),
|
||||
ToTensor(),
|
||||
normalize,
|
||||
])
|
||||
|
||||
return transform_train, transform_test
|
|
@ -0,0 +1,7 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
from .avgmeter import *
|
||||
from .loggers import *
|
||||
from .tools import *
|
||||
from .reidtools import *
|
||||
from .torchtools import *
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['AverageMeter']
|
||||
|
||||
|
||||
class AverageMeter(object):
|
||||
"""Computes and stores the average and current value"""
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
import os
|
||||
import os.path as osp
|
||||
import random
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
|
||||
def set_random_seed(seed):
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed_all(seed)
|
|
@ -1,36 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
import os.path as osp
|
||||
import errno
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
import warnings
|
||||
|
||||
|
||||
def mkdir_if_missing(directory):
|
||||
if not osp.exists(directory):
|
||||
try:
|
||||
os.makedirs(directory)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
def check_isfile(path):
|
||||
isfile = osp.isfile(path)
|
||||
if not isfile:
|
||||
warnings.warn('No file found at "{}"'.format(path))
|
||||
return isfile
|
||||
|
||||
|
||||
def read_json(fpath):
|
||||
with open(fpath, 'r') as f:
|
||||
obj = json.load(f)
|
||||
return obj
|
||||
|
||||
|
||||
def write_json(obj, fpath):
|
||||
mkdir_if_missing(osp.dirname(fpath))
|
||||
with open(fpath, 'w') as f:
|
||||
json.dump(obj, f, indent=4, separators=(',', ': '))
|
|
@ -1,10 +1,12 @@
|
|||
from __future__ import absolute_import
|
||||
|
||||
__all__ = ['Logger', 'RankLogger']
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path as osp
|
||||
|
||||
from .iotools import mkdir_if_missing
|
||||
from .tools import mkdir_if_missing
|
||||
|
||||
|
||||
class Logger(object):
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
__all__ = ['visualize_ranked_results']
|
||||
|
||||
import numpy as np
|
||||
import os
|
||||
import os.path as osp
|
||||
import shutil
|
||||
|
||||
from .iotools import mkdir_if_missing
|
||||
from .tools import mkdir_if_missing
|
||||
|
||||
|
||||
def visualize_ranked_results(distmat, dataset, save_dir='log/ranked_results', topk=20):
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
__all__ = ['mkdir_if_missing', 'check_isfile', 'read_json', 'write_json',
|
||||
'set_random_seed', 'download_url', 'read_image']
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path as osp
|
||||
import time
|
||||
import errno
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
import warnings
|
||||
import random
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def mkdir_if_missing(directory):
|
||||
if not osp.exists(directory):
|
||||
try:
|
||||
os.makedirs(directory)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
|
||||
def check_isfile(path):
|
||||
isfile = osp.isfile(path)
|
||||
if not isfile:
|
||||
warnings.warn('No file found at "{}"'.format(path))
|
||||
return isfile
|
||||
|
||||
|
||||
def read_json(fpath):
|
||||
with open(fpath, 'r') as f:
|
||||
obj = json.load(f)
|
||||
return obj
|
||||
|
||||
|
||||
def write_json(obj, fpath):
|
||||
mkdir_if_missing(osp.dirname(fpath))
|
||||
with open(fpath, 'w') as f:
|
||||
json.dump(obj, f, indent=4, separators=(',', ': '))
|
||||
|
||||
|
||||
def set_random_seed(seed):
|
||||
random.seed(seed)
|
||||
np.random.seed(seed)
|
||||
torch.manual_seed(seed)
|
||||
torch.cuda.manual_seed_all(seed)
|
||||
|
||||
|
||||
def download_url(url, dst):
|
||||
from six.moves import urllib
|
||||
print('* url="{}"'.format(url))
|
||||
print('* destination="{}"'.format(dst))
|
||||
|
||||
def _reporthook(count, block_size, total_size):
|
||||
global start_time
|
||||
if count == 0:
|
||||
start_time = time.time()
|
||||
return
|
||||
duration = time.time() - start_time
|
||||
progress_size = int(count * block_size)
|
||||
speed = int(progress_size / (1024 * duration))
|
||||
percent = int(count * block_size * 100 / total_size)
|
||||
sys.stdout.write('\r...%d%%, %d MB, %d KB/s, %d seconds passed' %
|
||||
(percent, progress_size / (1024 * 1024), speed, duration))
|
||||
sys.stdout.flush()
|
||||
|
||||
urllib.request.urlretrieve(url, dst, _reporthook)
|
||||
sys.stdout.write('\n')
|
||||
|
||||
|
||||
def read_image(path):
|
||||
got_img = False
|
||||
if not osp.exists(path):
|
||||
raise IOError('"{}" does not exist'.format(path))
|
||||
while not got_img:
|
||||
try:
|
||||
img = Image.open(path).convert('RGB')
|
||||
got_img = True
|
||||
except IOError:
|
||||
print('IOError incurred when reading "{}". Will redo. Don\'t worry. Just chill.'.format(img_path))
|
||||
pass
|
||||
return img
|
|
@ -2,6 +2,16 @@ from __future__ import absolute_import
|
|||
from __future__ import print_function
|
||||
from __future__ import division
|
||||
|
||||
__all__ = [
|
||||
'save_checkpoint',
|
||||
'load_checkpoint',
|
||||
'resume_from_checkpoint',
|
||||
'open_all_layers',
|
||||
'open_specified_layers',
|
||||
'count_num_param',
|
||||
'load_pretrained_weights'
|
||||
]
|
||||
|
||||
from collections import OrderedDict
|
||||
import shutil
|
||||
import warnings
|
||||
|
@ -13,7 +23,7 @@ import pickle
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from .iotools import mkdir_if_missing
|
||||
from .tools import mkdir_if_missing
|
||||
|
||||
|
||||
def save_checkpoint(state, save_dir, is_best=False, remove_module_from_keys=False):
|
||||
|
@ -104,11 +114,14 @@ def open_specified_layers(model, open_layers):
|
|||
|
||||
Args:
|
||||
model (nn.Module): neural net model.
|
||||
open_layers (list): list of layer names.
|
||||
open_layers (str or list): layers open for training.
|
||||
"""
|
||||
if isinstance(model, nn.DataParallel):
|
||||
model = model.module
|
||||
|
||||
if isinstance(open_layers, str):
|
||||
open_layers = [open_layers]
|
||||
|
||||
for layer in open_layers:
|
||||
assert hasattr(model, layer), '"{}" is not an attribute of the model, please provide the correct name'.format(layer)
|
||||
|
||||
|
@ -124,6 +137,11 @@ def open_specified_layers(model, open_layers):
|
|||
|
||||
|
||||
def count_num_param(model):
|
||||
"""Counts number of parameters in a model
|
||||
|
||||
Args:
|
||||
model (nn.Module): neural network
|
||||
"""
|
||||
num_param = sum(p.numel() for p in model.parameters()) / 1e+06
|
||||
|
||||
if isinstance(model, nn.DataParallel):
|
||||
|
@ -135,27 +153,6 @@ def count_num_param(model):
|
|||
return num_param
|
||||
|
||||
|
||||
def accuracy(output, target, topk=(1,)):
|
||||
"""Computes the accuracy over the k top predictions for the specified values of k"""
|
||||
with torch.no_grad():
|
||||
maxk = max(topk)
|
||||
batch_size = target.size(0)
|
||||
|
||||
if isinstance(output, (tuple, list)):
|
||||
output = output[0]
|
||||
|
||||
_, pred = output.topk(maxk, 1, True, True)
|
||||
pred = pred.t()
|
||||
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
||||
|
||||
res = []
|
||||
for k in topk:
|
||||
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
|
||||
acc = correct_k.mul_(100.0 / batch_size)
|
||||
res.append(acc.item())
|
||||
return res
|
||||
|
||||
|
||||
def load_pretrained_weights(model, weight_path):
|
||||
"""Loads pretrianed weights to model
|
||||
|
||||
|
|
Loading…
Reference in New Issue