deep-person-reid/data_manager.py

741 lines
31 KiB
Python
Raw Normal View History

2018-03-12 20:06:40 +08:00
from __future__ import print_function, absolute_import
2018-03-12 05:17:48 +08:00
import os
import glob
import re
import sys
2018-04-01 23:39:26 +08:00
import urllib
import tarfile
import zipfile
2018-03-12 05:17:48 +08:00
import os.path as osp
2018-03-12 20:06:40 +08:00
from scipy.io import loadmat
import numpy as np
2018-04-23 03:37:39 +08:00
import h5py
from scipy.misc import imsave
2018-03-12 05:17:48 +08:00
2018-04-01 23:39:26 +08:00
from utils import mkdir_if_missing, write_json, read_json
2018-03-12 05:17:48 +08:00
"""Dataset classes"""
2018-04-23 03:37:39 +08:00
"""Image ReID"""
2018-03-12 05:17:48 +08:00
class Market1501(object):
"""
Market1501
Reference:
Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.
2018-04-23 03:37:39 +08:00
URL: http://www.liangzheng.org/Project/project_reid.html
2018-03-12 20:06:40 +08:00
2018-03-12 05:17:48 +08:00
Dataset statistics:
# identities: 1501 (+1 for background)
2018-03-12 20:06:40 +08:00
# images: 12936 (train) + 3368 (query) + 15913 (gallery)
2018-03-12 05:17:48 +08:00
"""
root = './data/market1501'
train_dir = osp.join(root, 'bounding_box_train')
query_dir = osp.join(root, 'query')
gallery_dir = osp.join(root, 'bounding_box_test')
def __init__(self):
self._check_before_run()
2018-03-12 05:17:48 +08:00
train, num_train_pids, num_train_imgs = self._process_dir(self.train_dir, relabel=True)
query, num_query_pids, num_query_imgs = self._process_dir(self.query_dir, relabel=False)
gallery, num_gallery_pids, num_gallery_imgs = self._process_dir(self.gallery_dir, relabel=False)
num_total_pids = num_train_pids + num_query_pids
num_total_imgs = num_train_imgs + num_query_imgs + num_gallery_imgs
print("=> Market1501 loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # ids | # images")
print(" ------------------------------")
print(" train | {:5d} | {:8d}".format(num_train_pids, num_train_imgs))
print(" query | {:5d} | {:8d}".format(num_query_pids, num_query_imgs))
print(" gallery | {:5d} | {:8d}".format(num_gallery_pids, num_gallery_imgs))
print(" ------------------------------")
print(" total | {:5d} | {:8d}".format(num_total_pids, num_total_imgs))
print(" ------------------------------")
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids = num_train_pids
self.num_query_pids = num_query_pids
self.num_gallery_pids = num_gallery_pids
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.root):
raise RuntimeError("'{}' is not available".format(self.root))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
2018-03-12 05:17:48 +08:00
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid:label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= camid <= 6
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
dataset.append((img_path, pid, camid))
num_pids = len(pid_container)
num_imgs = len(dataset)
return dataset, num_pids, num_imgs
2018-04-23 03:37:39 +08:00
class CUHK03(object):
"""
CUHK03
Reference:
Li et al. DeepReID: Deep Filter Pairing Neural Network for Person Re-identification. CVPR 2014.
URL: http://www.ee.cuhk.edu.hk/~xgwang/CUHK_identification.html#!
Dataset statistics:
# identities: 1360
# images: 13164
# cameras: 6
# splits: 20
Args:
split_id (int): split index (default: 0)
cuhk03_labeled (bool): whether to load labeled images; if false, detected images are loaded (default: False)
"""
root = './data/cuhk03'
data_dir = osp.join(root, 'cuhk03_release')
raw_mat_path = osp.join(data_dir, 'cuhk-03.mat')
imgs_detected_dir = osp.join(root, 'images_detected')
imgs_labeled_dir = osp.join(root, 'images_labeled')
split_detected_path = osp.join(root, 'splits_detected.json')
split_labeled_path = osp.join(root, 'splits_labeled.json')
def __init__(self, split_id=0, cuhk03_labeled=False):
self._check_before_run()
self._preprocess()
if cuhk03_labeled:
print("Loading CUHK03 Labeled Images")
split_path = self.split_labeled_path
else:
print("Loading CUHK03 Detected Images")
split_path = self.split_detected_path
splits = read_json(split_path)
assert split_id < len(splits), "Condition split_id ({}) < len(splits) ({}) is false".format(split_id, len(splits))
split = splits[split_id]
print("Split index = {}".format(split_id))
self.train = split['train']
self.query = split['query']
self.gallery = split['gallery']
num_train_pids = split['num_train_pids']
num_query_pids = split['num_query_pids']
num_gallery_pids = split['num_gallery_pids']
num_total_pids = num_train_pids + num_query_pids
num_train_imgs = split['num_train_imgs']
num_query_imgs = split['num_query_imgs']
num_gallery_imgs = split['num_gallery_imgs']
num_total_imgs = num_train_imgs + num_query_imgs
print("=> CUHK03 loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # ids | # images")
print(" ------------------------------")
print(" train | {:5d} | {:8d}".format(num_train_pids, num_train_imgs))
print(" query | {:5d} | {:8d}".format(num_query_pids, num_query_imgs))
print(" gallery | {:5d} | {:8d}".format(num_gallery_pids, num_gallery_imgs))
print(" ------------------------------")
print(" total | {:5d} | {:8d}".format(num_total_pids, num_total_imgs))
print(" ------------------------------")
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.root):
raise RuntimeError("'{}' is not available".format(self.root))
if not osp.exists(self.data_dir):
raise RuntimeError("'{}' is not available".format(self.root))
if not osp.exists(self.raw_mat_path):
raise RuntimeError("'{}' is not available".format(self.root))
def _preprocess(self):
if osp.exists(self.imgs_labeled_dir) and \
osp.exists(self.imgs_detected_dir) and \
osp.exists(self.split_detected_path) and \
osp.exists(self.split_labeled_path):
return
mkdir_if_missing(self.imgs_detected_dir)
mkdir_if_missing(self.imgs_labeled_dir)
"""
Goal: Extract image data from cuhk-03.mat, which contains three cells, 'detected', 'labeled', and 'testsets'.
'detected' and 'labeled', each containing five cells, meaning five different camera pairs. Each cell
is a (M, 10) matrix where M is the number of identities. The code below aims to loop through each of
M identities and save the data as jpg images. Each image is named with the format 'campid_pid_viewid
_imgid.jpg'. Detailed explanation of the arguments are provided below.
'testsets' contains 20 cells meaning 20 different splits. Each cell is a (100, 2) matrix where the first column
represents indices of camera pairs and the second column corresponds to indices of identities.
"""
print("Extract image data from {} and save as jpg".format(self.raw_mat_path))
mat = h5py.File(self.raw_mat_path, 'r')
def _deref(ref):
return mat[ref][:].T
def _process_images(img_refs, campid, pid, viewid, save_dir):
imgid = 0
img_paths = [] # Note: some persons only have images for one view
for img_ref in img_refs:
img = _deref(img_ref)
# skip empty cell
if img.size == 0 or img.ndim < 3: continue
# images are saved with the following format (ensure uniqueness)
# campid: index of camera pair (0 - 4)
# pid: index of person in 'campid'-th camera pair
# viewid: index of view, {0, 1}
# imgid: index of image, (0 - 4)
img_name = '{:02d}_{:04d}_{:02d}_{:02d}.jpg'.format(campid, pid, viewid, imgid)
img_path = osp.join(save_dir, img_name)
imsave(img_path, img)
img_paths.append(img_path)
imgid += 1
return img_paths
def _extract_img(name):
print("Processing {} images (extract and save) ...".format(name))
meta_data = []
imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir
for campid, camp_ref in enumerate(mat[name][0]):
camp = _deref(camp_ref)
num_pids = camp.shape[0]
for pid in range(num_pids):
img_paths_v0 = _process_images(camp[pid,:5], campid, pid, 0, imgs_dir)
img_paths_v1 = _process_images(camp[pid,5:], campid, pid, 1, imgs_dir)
img_paths_both = img_paths_v0 + img_paths_v1
assert len(img_paths_both) > 0, "campid{}-pid{} have no images".format(campid, pid)
meta_data.append((campid, pid, img_paths_both))
print("done camera pair {}".format(campid+1))
return meta_data
meta_detected = _extract_img('detected')
meta_labeled = _extract_img('labeled')
def _extract_split(meta_data, test_split):
train, test = [], []
num_train_pids, num_test_pids = 0, 0
num_train_imgs, num_test_imgs = 0, 0
for i, (campid, pid, img_paths) in enumerate(meta_data):
if [campid+1, pid+1] in test_split:
for img_path in img_paths:
camid = int(img_path.split('_')[2])
test.append((img_path, num_test_pids, camid))
num_test_pids += 1
num_test_imgs += len(img_paths)
else:
for img_path in img_paths:
camid = int(img_path.split('_')[2])
train.append((img_path, num_train_pids, camid))
num_train_pids += 1
num_train_imgs += len(img_paths)
return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs
print("Creating splits ...")
splits_detected, splits_labeled = [], []
for split_ref in mat['testsets'][0]:
test_split = _deref(split_ref).tolist()
# create split for detected images
train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
_extract_split(meta_detected, test_split)
splits_detected.append({
'train': train, 'query': test, 'gallery': test,
'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,
'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,
'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,
})
# create split for labeled images
train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
_extract_split(meta_labeled, test_split)
splits_labeled.append({
'train': train, 'query': test, 'gallery': test,
'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,
'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,
'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,
})
print("Total number of splits is {}".format(len(splits_detected)))
write_json(splits_detected, self.split_detected_path)
print("Splits for detected images saved to {}".format(self.split_detected_path))
write_json(splits_labeled, self.split_labeled_path)
print("Splits for labeled images saved to {}".format(self.split_labeled_path))
"""Video ReID"""
2018-03-12 20:06:40 +08:00
class Mars(object):
"""
MARS
Reference:
Zheng et al. MARS: A Video Benchmark for Large-Scale Person Re-identification. ECCV 2016.
2018-04-23 03:37:39 +08:00
URL: http://www.liangzheng.com.cn/Project/project_mars.html
2018-03-12 20:06:40 +08:00
Dataset statistics:
# identities: 1261
# tracklets: 8298 (train) + 1980 (query) + 9330 (gallery)
2018-04-01 23:39:26 +08:00
# cameras: 6
2018-03-12 20:06:40 +08:00
Args:
min_seq_len (int): tracklet with length shorter than this value will be discarded (default: 0).
2018-03-12 20:06:40 +08:00
"""
root = './data/mars'
train_name_path = osp.join(root, 'info/train_name.txt')
test_name_path = osp.join(root, 'info/test_name.txt')
track_train_info_path = osp.join(root, 'info/tracks_train_info.mat')
track_test_info_path = osp.join(root, 'info/tracks_test_info.mat')
query_IDX_path = osp.join(root, 'info/query_IDX.mat')
def __init__(self, min_seq_len=0):
self._check_before_run()
2018-03-12 20:06:40 +08:00
# prepare meta data
train_names = self._get_names(self.train_name_path)
test_names = self._get_names(self.test_name_path)
track_train = loadmat(self.track_train_info_path)['track_train_info'] # numpy.ndarray (8298, 4)
track_test = loadmat(self.track_test_info_path)['track_test_info'] # numpy.ndarray (12180, 4)
query_IDX = loadmat(self.query_IDX_path)['query_IDX'].squeeze() # numpy.ndarray (1980,)
query_IDX -= 1 # index from 0
track_query = track_test[query_IDX,:]
gallery_IDX = [i for i in range(track_test.shape[0]) if i not in query_IDX]
track_gallery = track_test[gallery_IDX,:]
train, num_train_tracklets, num_train_pids, num_train_imgs = \
self._process_data(train_names, track_train, home_dir='bbox_train', relabel=True, min_seq_len=min_seq_len)
query, num_query_tracklets, num_query_pids, num_query_imgs = \
self._process_data(test_names, track_query, home_dir='bbox_test', relabel=False, min_seq_len=min_seq_len)
gallery, num_gallery_tracklets, num_gallery_pids, num_gallery_imgs = \
self._process_data(test_names, track_gallery, home_dir='bbox_test', relabel=False, min_seq_len=min_seq_len)
num_imgs_per_tracklet = num_train_imgs + num_query_imgs + num_gallery_imgs
min_num = np.min(num_imgs_per_tracklet)
max_num = np.max(num_imgs_per_tracklet)
avg_num = np.mean(num_imgs_per_tracklet)
num_total_pids = num_train_pids + num_query_pids
num_total_tracklets = num_train_tracklets + num_query_tracklets + num_gallery_tracklets
print("=> MARS loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # ids | # tracklets")
print(" ------------------------------")
print(" train | {:5d} | {:8d}".format(num_train_pids, num_train_tracklets))
print(" query | {:5d} | {:8d}".format(num_query_pids, num_query_tracklets))
print(" gallery | {:5d} | {:8d}".format(num_gallery_pids, num_gallery_tracklets))
print(" ------------------------------")
print(" total | {:5d} | {:8d}".format(num_total_pids, num_total_tracklets))
print(" number of images per tracklet: {} ~ {}, average {:.1f}".format(min_num, max_num, avg_num))
print(" ------------------------------")
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids = num_train_pids
self.num_query_pids = num_query_pids
self.num_gallery_pids = num_gallery_pids
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.root):
raise RuntimeError("'{}' is not available".format(self.root))
if not osp.exists(self.train_name_path):
raise RuntimeError("'{}' is not available".format(self.train_name_path))
if not osp.exists(self.test_name_path):
raise RuntimeError("'{}' is not available".format(self.test_name_path))
if not osp.exists(self.track_train_info_path):
raise RuntimeError("'{}' is not available".format(self.track_train_info_path))
if not osp.exists(self.track_test_info_path):
raise RuntimeError("'{}' is not available".format(self.track_test_info_path))
if not osp.exists(self.query_IDX_path):
raise RuntimeError("'{}' is not available".format(self.query_IDX_path))
2018-03-12 20:06:40 +08:00
def _get_names(self, fpath):
names = []
with open(fpath, 'r') as f:
for line in f:
new_line = line.rstrip()
names.append(new_line)
return names
def _process_data(self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0):
assert home_dir in ['bbox_train', 'bbox_test']
num_tracklets = meta_data.shape[0]
pid_list = list(set(meta_data[:,2].tolist()))
num_pids = len(pid_list)
if relabel: pid2label = {pid:label for label, pid in enumerate(pid_list)}
tracklets = []
num_imgs_per_tracklet = []
for tracklet_idx in range(num_tracklets):
data = meta_data[tracklet_idx,...]
start_index, end_index, pid, camid = data
if pid == -1: continue # junk images are just ignored
assert 1 <= camid <= 6
if relabel: pid = pid2label[pid]
camid -= 1 # index starts from 0
img_names = names[start_index-1:end_index]
# make sure image names correspond to the same person
pnames = [img_name[:4] for img_name in img_names]
assert len(set(pnames)) == 1, "Error: a single tracklet contains different person images"
# make sure all images are captured under the same camera
camnames = [img_name[5] for img_name in img_names]
assert len(set(camnames)) == 1, "Error: images are captured under different cameras!"
# append image names with directory information
img_paths = [osp.join(self.root, home_dir, img_name[:4], img_name) for img_name in img_names]
if len(img_paths) >= min_seq_len:
img_paths = tuple(img_paths)
tracklets.append((img_paths, pid, camid))
num_imgs_per_tracklet.append(len(img_paths))
num_tracklets = len(tracklets)
return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet
2018-04-01 23:39:26 +08:00
class iLIDSVID(object):
"""
iLIDS-VID
Reference:
Wang et al. Person Re-Identification by Video Ranking. ECCV 2014.
2018-04-23 03:37:39 +08:00
URL: http://www.eecs.qmul.ac.uk/~xiatian/downloads_qmul_iLIDS-VID_ReID_dataset.html
2018-04-01 23:39:26 +08:00
Dataset statistics:
# identities: 300
# tracklets: 600
# cameras: 2
Args:
split_id (int): indicates which split to use. There are totally 10 splits.
"""
root = './data/ilids-vid'
dataset_url = 'http://www.eecs.qmul.ac.uk/~xiatian/iLIDS-VID/iLIDS-VID.tar'
data_dir = osp.join(root, 'i-LIDS-VID')
split_dir = osp.join(root, 'train-test people splits')
split_mat_path = osp.join(split_dir, 'train_test_splits_ilidsvid.mat')
split_path = osp.join(root, 'splits.json')
cam_1_path = osp.join(root, 'i-LIDS-VID/sequences/cam1')
cam_2_path = osp.join(root, 'i-LIDS-VID/sequences/cam2')
def __init__(self, split_id=0):
self._download_data()
self._check_before_run()
self._prepare_split()
splits = read_json(self.split_path)
if split_id >= len(splits):
raise ValueError("split_id exceeds range, received {}, but expected between 0 and {}".format(split_id, len(splits)-1))
split = splits[split_id]
train_dirs, test_dirs = split['train'], split['test']
print("# train identites: {}, # test identites {}".format(len(train_dirs), len(test_dirs)))
train, num_train_tracklets, num_train_pids, num_imgs_train = \
self._process_data(train_dirs, cam1=True, cam2=True)
query, num_query_tracklets, num_query_pids, num_imgs_query = \
self._process_data(test_dirs, cam1=True, cam2=False)
gallery, num_gallery_tracklets, num_gallery_pids, num_imgs_gallery = \
self._process_data(test_dirs, cam1=False, cam2=True)
num_imgs_per_tracklet = num_imgs_train + num_imgs_query + num_imgs_gallery
min_num = np.min(num_imgs_per_tracklet)
max_num = np.max(num_imgs_per_tracklet)
avg_num = np.mean(num_imgs_per_tracklet)
num_total_pids = num_train_pids + num_query_pids
num_total_tracklets = num_train_tracklets + num_query_tracklets + num_gallery_tracklets
print("=> iLIDS-VID loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # ids | # tracklets")
print(" ------------------------------")
print(" train | {:5d} | {:8d}".format(num_train_pids, num_train_tracklets))
print(" query | {:5d} | {:8d}".format(num_query_pids, num_query_tracklets))
print(" gallery | {:5d} | {:8d}".format(num_gallery_pids, num_gallery_tracklets))
print(" ------------------------------")
print(" total | {:5d} | {:8d}".format(num_total_pids, num_total_tracklets))
print(" number of images per tracklet: {} ~ {}, average {:.1f}".format(min_num, max_num, avg_num))
print(" ------------------------------")
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids = num_train_pids
self.num_query_pids = num_query_pids
self.num_gallery_pids = num_gallery_pids
def _download_data(self):
if osp.exists(self.root):
print("This dataset has been downloaded.")
return
mkdir_if_missing(self.root)
fpath = osp.join(self.root, osp.basename(self.dataset_url))
print("Downloading iLIDS-VID dataset")
url_opener = urllib.URLopener()
url_opener.retrieve(self.dataset_url, fpath)
print("Extracting files")
tar = tarfile.open(fpath)
tar.extractall(path=self.root)
tar.close()
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.root):
raise RuntimeError("'{}' is not available".format(self.root))
if not osp.exists(self.data_dir):
raise RuntimeError("'{}' is not available".format(self.data_dir))
if not osp.exists(self.split_dir):
raise RuntimeError("'{}' is not available".format(self.split_dir))
def _prepare_split(self):
if not osp.exists(self.split_path):
print("Creating splits")
mat_split_data = loadmat(self.split_mat_path)['ls_set']
num_splits = mat_split_data.shape[0]
num_total_ids = mat_split_data.shape[1]
assert num_splits == 10
assert num_total_ids == 300
num_ids_each = num_total_ids/2
# pids in mat_split_data are indices, so we need to transform them
# to real pids
person_cam1_dirs = os.listdir(self.cam_1_path)
person_cam2_dirs = os.listdir(self.cam_2_path)
# make sure persons in one camera view can be found in the other camera view
assert set(person_cam1_dirs) == set(person_cam2_dirs)
splits = []
for i_split in range(num_splits):
# first 50% for testing and the remaining for training, following Wang et al. ECCV'14.
train_idxs = sorted(list(mat_split_data[i_split,num_ids_each:]))
test_idxs = sorted(list(mat_split_data[i_split,:num_ids_each]))
train_idxs = [int(i)-1 for i in train_idxs]
test_idxs = [int(i)-1 for i in test_idxs]
# transform pids to person dir names
train_dirs = [person_cam1_dirs[i] for i in train_idxs]
test_dirs = [person_cam1_dirs[i] for i in test_idxs]
split = {'train': train_dirs, 'test': test_dirs}
splits.append(split)
print("Totally {} splits are created, following Wang et al. ECCV'14".format(len(splits)))
print("Split file is saved to {}".format(self.split_path))
write_json(splits, self.split_path)
print("Splits created")
def _process_data(self, dirnames, cam1=True, cam2=True):
tracklets = []
num_imgs_per_tracklet = []
dirname2pid = {dirname:i for i, dirname in enumerate(dirnames)}
for dirname in dirnames:
if cam1:
person_dir = osp.join(self.cam_1_path, dirname)
img_names = glob.glob(osp.join(person_dir, '*.png'))
assert len(img_names) > 0
img_names = tuple(img_names)
pid = dirname2pid[dirname]
tracklets.append((img_names, pid, 0))
num_imgs_per_tracklet.append(len(img_names))
if cam2:
person_dir = osp.join(self.cam_2_path, dirname)
img_names = glob.glob(osp.join(person_dir, '*.png'))
assert len(img_names) > 0
img_names = tuple(img_names)
pid = dirname2pid[dirname]
tracklets.append((img_names, pid, 1))
num_imgs_per_tracklet.append(len(img_names))
num_tracklets = len(tracklets)
num_pids = len(dirnames)
return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet
class PRID(object):
"""
PRID
Reference:
Hirzer et al. Person Re-Identification by Descriptive and Discriminative Classification. SCIA 2011.
2018-04-23 03:37:39 +08:00
URL: https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/PRID11/
2018-04-01 23:39:26 +08:00
Dataset statistics:
# identities: 200
# tracklets: 400
# cameras: 2
Args:
split_id (int): indicates which split to use. There are totally 10 splits.
min_seq_len (int): tracklet with length shorter than this value will be discarded (default: 0).
"""
root = './data/prid2011'
dataset_url = 'https://files.icg.tugraz.at/f/6ab7e8ce8f/?raw=1'
split_path = osp.join(root, 'splits_prid2011.json')
cam_a_path = osp.join(root, 'prid_2011', 'multi_shot', 'cam_a')
cam_b_path = osp.join(root, 'prid_2011', 'multi_shot', 'cam_b')
def __init__(self, split_id=0, min_seq_len=0):
self._check_before_run()
splits = read_json(self.split_path)
if split_id >= len(splits):
raise ValueError("split_id exceeds range, received {}, but expected between 0 and {}".format(split_id, len(splits)-1))
split = splits[split_id]
train_dirs, test_dirs = split['train'], split['test']
print("# train identites: {}, # test identites {}".format(len(train_dirs), len(test_dirs)))
train, num_train_tracklets, num_train_pids, num_imgs_train = \
self._process_data(train_dirs, cam1=True, cam2=True)
query, num_query_tracklets, num_query_pids, num_imgs_query = \
self._process_data(test_dirs, cam1=True, cam2=False)
gallery, num_gallery_tracklets, num_gallery_pids, num_imgs_gallery = \
self._process_data(test_dirs, cam1=False, cam2=True)
num_imgs_per_tracklet = num_imgs_train + num_imgs_query + num_imgs_gallery
min_num = np.min(num_imgs_per_tracklet)
max_num = np.max(num_imgs_per_tracklet)
avg_num = np.mean(num_imgs_per_tracklet)
num_total_pids = num_train_pids + num_query_pids
num_total_tracklets = num_train_tracklets + num_query_tracklets + num_gallery_tracklets
print("=> PRID-2011 loaded")
print("Dataset statistics:")
print(" ------------------------------")
print(" subset | # ids | # tracklets")
print(" ------------------------------")
print(" train | {:5d} | {:8d}".format(num_train_pids, num_train_tracklets))
print(" query | {:5d} | {:8d}".format(num_query_pids, num_query_tracklets))
print(" gallery | {:5d} | {:8d}".format(num_gallery_pids, num_gallery_tracklets))
print(" ------------------------------")
print(" total | {:5d} | {:8d}".format(num_total_pids, num_total_tracklets))
print(" number of images per tracklet: {} ~ {}, average {:.1f}".format(min_num, max_num, avg_num))
print(" ------------------------------")
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids = num_train_pids
self.num_query_pids = num_query_pids
self.num_gallery_pids = num_gallery_pids
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.root):
raise RuntimeError("'{}' is not available".format(self.root))
def _process_data(self, dirnames, cam1=True, cam2=True):
tracklets = []
num_imgs_per_tracklet = []
dirname2pid = {dirname:i for i, dirname in enumerate(dirnames)}
for dirname in dirnames:
if cam1:
person_dir = osp.join(self.cam_a_path, dirname)
img_names = glob.glob(osp.join(person_dir, '*.png'))
assert len(img_names) > 0
img_names = tuple(img_names)
pid = dirname2pid[dirname]
tracklets.append((img_names, pid, 0))
num_imgs_per_tracklet.append(len(img_names))
if cam2:
person_dir = osp.join(self.cam_b_path, dirname)
img_names = glob.glob(osp.join(person_dir, '*.png'))
assert len(img_names) > 0
img_names = tuple(img_names)
pid = dirname2pid[dirname]
tracklets.append((img_names, pid, 1))
num_imgs_per_tracklet.append(len(img_names))
num_tracklets = len(tracklets)
num_pids = len(dirnames)
return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet
2018-03-12 05:17:48 +08:00
"""Create dataset"""
__factory = {
'market1501': Market1501,
2018-03-12 20:06:40 +08:00
'mars': Mars,
2018-04-01 23:39:26 +08:00
'ilidsvid': iLIDSVID,
'prid': PRID,
2018-03-12 05:17:48 +08:00
}
def get_names():
return __factory.keys()
def init_dataset(name, *args, **kwargs):
if name not in __factory.keys():
raise KeyError("Unknown dataset: {}".format(name))
return __factory[name](*args, **kwargs)
if __name__ == '__main__':
# test
2018-04-23 03:37:39 +08:00
dataset = CUHK03()
2018-03-12 20:06:40 +08:00