deep-person-reid/torchreid/datasets/ilidsvid.py

151 lines
5.7 KiB
Python
Raw Normal View History

2018-07-04 17:32:43 +08:00
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
2018-07-02 17:17:14 +08:00
import os
import glob
import re
import sys
import urllib
import tarfile
import zipfile
import os.path as osp
from scipy.io import loadmat
import numpy as np
import h5py
from scipy.misc import imsave
2018-08-15 16:48:17 +08:00
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
2018-11-05 06:59:46 +08:00
from .bases import BaseVideoDataset
2018-07-02 17:17:14 +08:00
2018-11-05 06:59:46 +08:00
class iLIDSVID(BaseVideoDataset):
2019-03-15 22:49:18 +08:00
"""iLIDS-VID
2018-07-02 17:17:14 +08:00
Reference:
Wang et al. Person Re-Identification by Video Ranking. ECCV 2014.
URL: http://www.eecs.qmul.ac.uk/~xiatian/downloads_qmul_iLIDS-VID_ReID_dataset.html
Dataset statistics:
# identities: 300
# tracklets: 600
# cameras: 2
"""
dataset_dir = 'ilids-vid'
2018-07-02 18:57:01 +08:00
def __init__(self, root='data', split_id=0, verbose=True, **kwargs):
super(iLIDSVID, self).__init__(root)
self.dataset_dir = osp.join(self.root, self.dataset_dir)
2018-07-02 17:17:14 +08:00
self.dataset_url = 'http://www.eecs.qmul.ac.uk/~xiatian/iLIDS-VID/iLIDS-VID.tar'
self.data_dir = osp.join(self.dataset_dir, 'i-LIDS-VID')
self.split_dir = osp.join(self.dataset_dir, 'train-test people splits')
self.split_mat_path = osp.join(self.split_dir, 'train_test_splits_ilidsvid.mat')
self.split_path = osp.join(self.dataset_dir, 'splits.json')
self.cam_1_path = osp.join(self.dataset_dir, 'i-LIDS-VID/sequences/cam1')
self.cam_2_path = osp.join(self.dataset_dir, 'i-LIDS-VID/sequences/cam2')
2019-02-27 17:57:48 +08:00
self.download_data()
2019-03-15 22:49:18 +08:00
required_files = [
self.dataset_dir,
self.data_dir,
self.split_dir
]
self.check_before_run(required_files)
2018-07-02 17:17:14 +08:00
2019-02-27 17:57:48 +08:00
self.prepare_split()
2018-07-02 17:17:14 +08:00
splits = read_json(self.split_path)
if split_id >= len(splits):
2019-01-31 06:41:47 +08:00
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
2018-07-02 17:17:14 +08:00
split = splits[split_id]
train_dirs, test_dirs = split['train'], split['test']
2019-02-27 17:57:48 +08:00
train = self.process_data(train_dirs, cam1=True, cam2=True)
query = self.process_data(test_dirs, cam1=True, cam2=False)
gallery = self.process_data(test_dirs, cam1=False, cam2=True)
2018-07-02 17:17:14 +08:00
2019-03-16 01:45:47 +08:00
self.init_attributes(train, query, gallery, **kwargs)
2019-03-16 00:11:25 +08:00
2018-07-02 18:57:01 +08:00
if verbose:
2019-03-16 01:45:47 +08:00
self.print_dataset_statistics(self.train, self.query, self.gallery)
2018-07-02 17:17:14 +08:00
2019-02-27 17:57:48 +08:00
def download_data(self):
2018-07-02 17:17:14 +08:00
if osp.exists(self.dataset_dir):
return
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
2019-01-31 06:41:47 +08:00
print('Downloading iLIDS-VID dataset')
2018-07-02 17:17:14 +08:00
urllib.urlretrieve(self.dataset_url, fpath)
2019-01-31 06:41:47 +08:00
print('Extracting files')
2018-07-02 17:17:14 +08:00
tar = tarfile.open(fpath)
tar.extractall(path=self.dataset_dir)
tar.close()
2019-02-27 17:57:48 +08:00
def prepare_split(self):
2018-07-02 17:17:14 +08:00
if not osp.exists(self.split_path):
2019-01-31 06:41:47 +08:00
print('Creating splits ...')
2018-07-02 17:17:14 +08:00
mat_split_data = loadmat(self.split_mat_path)['ls_set']
num_splits = mat_split_data.shape[0]
num_total_ids = mat_split_data.shape[1]
assert num_splits == 10
assert num_total_ids == 300
2018-07-09 23:08:49 +08:00
num_ids_each = num_total_ids // 2
2018-07-02 17:17:14 +08:00
# pids in mat_split_data are indices, so we need to transform them
# to real pids
person_cam1_dirs = sorted(glob.glob(osp.join(self.cam_1_path, '*')))
person_cam2_dirs = sorted(glob.glob(osp.join(self.cam_2_path, '*')))
person_cam1_dirs = [osp.basename(item) for item in person_cam1_dirs]
person_cam2_dirs = [osp.basename(item) for item in person_cam2_dirs]
# make sure persons in one camera view can be found in the other camera view
assert set(person_cam1_dirs) == set(person_cam2_dirs)
splits = []
for i_split in range(num_splits):
# first 50% for testing and the remaining for training, following Wang et al. ECCV'14.
2018-07-09 23:08:49 +08:00
train_idxs = sorted(list(mat_split_data[i_split, num_ids_each:]))
test_idxs = sorted(list(mat_split_data[i_split, :num_ids_each]))
2018-07-02 17:17:14 +08:00
train_idxs = [int(i)-1 for i in train_idxs]
test_idxs = [int(i)-1 for i in test_idxs]
# transform pids to person dir names
train_dirs = [person_cam1_dirs[i] for i in train_idxs]
test_dirs = [person_cam1_dirs[i] for i in test_idxs]
split = {'train': train_dirs, 'test': test_dirs}
splits.append(split)
2019-01-31 06:41:47 +08:00
print('Totally {} splits are created, following Wang et al. ECCV\'14'.format(len(splits)))
print('Split file is saved to {}'.format(self.split_path))
2018-07-02 17:17:14 +08:00
write_json(splits, self.split_path)
2019-02-27 17:57:48 +08:00
def process_data(self, dirnames, cam1=True, cam2=True):
2018-07-02 17:17:14 +08:00
tracklets = []
dirname2pid = {dirname:i for i, dirname in enumerate(dirnames)}
for dirname in dirnames:
if cam1:
person_dir = osp.join(self.cam_1_path, dirname)
img_names = glob.glob(osp.join(person_dir, '*.png'))
assert len(img_names) > 0
img_names = tuple(img_names)
pid = dirname2pid[dirname]
tracklets.append((img_names, pid, 0))
if cam2:
person_dir = osp.join(self.cam_2_path, dirname)
img_names = glob.glob(osp.join(person_dir, '*.png'))
assert len(img_names) > 0
img_names = tuple(img_names)
pid = dirname2pid[dirname]
tracklets.append((img_names, pid, 1))
2018-11-05 06:59:46 +08:00
return tracklets