deep-person-reid/torchreid/datasets/grid.py

139 lines
5.0 KiB
Python
Raw Normal View History

2018-07-04 17:32:43 +08:00
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
2018-07-02 17:17:14 +08:00
import os
import glob
import re
import sys
import urllib
import tarfile
import zipfile
import os.path as osp
from scipy.io import loadmat
import numpy as np
import h5py
from scipy.misc import imsave
2018-08-15 16:48:17 +08:00
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
2018-11-05 06:59:46 +08:00
from .bases import BaseImageDataset
2018-07-02 17:17:14 +08:00
2018-11-05 06:59:46 +08:00
class GRID(BaseImageDataset):
2019-03-15 22:49:18 +08:00
"""GRID
2018-07-02 17:17:14 +08:00
Reference:
Loy et al. Multi-camera activity correlation analysis. CVPR 2009.
URL: http://personal.ie.cuhk.edu.hk/~ccloy/downloads_qmul_underground_reid.html
Dataset statistics:
# identities: 250
# images: 1275
# cameras: 8
"""
dataset_dir = 'grid'
2018-08-12 05:22:48 +08:00
def __init__(self, root='data', split_id=0, verbose=True, **kwargs):
super(GRID, self).__init__(root)
self.dataset_dir = osp.join(self.root, self.dataset_dir)
2018-07-02 17:17:14 +08:00
self.dataset_url = 'http://personal.ie.cuhk.edu.hk/~ccloy/files/datasets/underground_reid.zip'
self.probe_path = osp.join(self.dataset_dir, 'underground_reid', 'probe')
self.gallery_path = osp.join(self.dataset_dir, 'underground_reid', 'gallery')
self.split_mat_path = osp.join(self.dataset_dir, 'underground_reid', 'features_and_partitions.mat')
self.split_path = osp.join(self.dataset_dir, 'splits.json')
2019-02-27 17:57:48 +08:00
self.download_data()
2019-03-15 22:49:18 +08:00
required_files = [
self.dataset_dir,
self.probe_path,
self.gallery_path,
self.split_mat_path
]
self.check_before_run(required_files)
2018-07-02 17:17:14 +08:00
2019-02-27 17:57:48 +08:00
self.prepare_split()
2018-07-02 17:17:14 +08:00
splits = read_json(self.split_path)
if split_id >= len(splits):
2019-01-31 06:41:47 +08:00
raise ValueError('split_id exceeds range, received {}, but expected between 0 and {}'.format(split_id, len(splits)-1))
2018-07-02 17:17:14 +08:00
split = splits[split_id]
train = split['train']
query = split['query']
gallery = split['gallery']
train = [tuple(item) for item in train]
query = [tuple(item) for item in query]
gallery = [tuple(item) for item in gallery]
2019-03-16 00:11:25 +08:00
self.init_attributes(train, query, gallery)
2018-07-02 18:57:01 +08:00
if verbose:
2018-11-05 06:59:46 +08:00
self.print_dataset_statistics(train, query, gallery)
2018-07-02 17:17:14 +08:00
2019-02-27 17:57:48 +08:00
def download_data(self):
2018-07-02 17:17:14 +08:00
if osp.exists(self.dataset_dir):
return
2019-01-31 06:41:47 +08:00
print('Creating directory {}'.format(self.dataset_dir))
2018-07-02 17:17:14 +08:00
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
2019-01-31 06:41:47 +08:00
print('Downloading GRID dataset')
2018-07-02 17:17:14 +08:00
urllib.urlretrieve(self.dataset_url, fpath)
2019-01-31 06:41:47 +08:00
print('Extracting files')
2018-07-02 17:17:14 +08:00
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
2019-02-27 17:57:48 +08:00
def prepare_split(self):
2018-07-02 17:17:14 +08:00
if not osp.exists(self.split_path):
2019-01-31 06:41:47 +08:00
print('Creating 10 random splits')
2018-07-02 17:17:14 +08:00
split_mat = loadmat(self.split_mat_path)
trainIdxAll = split_mat['trainIdxAll'][0] # length = 10
probe_img_paths = sorted(glob.glob(osp.join(self.probe_path, '*.jpeg')))
gallery_img_paths = sorted(glob.glob(osp.join(self.gallery_path, '*.jpeg')))
splits = []
for split_idx in range(10):
train_idxs = trainIdxAll[split_idx][0][0][2][0].tolist()
assert len(train_idxs) == 125
idx2label = {idx: label for label, idx in enumerate(train_idxs)}
train, query, gallery = [], [], []
# processing probe folder
for img_path in probe_img_paths:
img_name = osp.basename(img_path)
img_idx = int(img_name.split('_')[0])
2018-11-05 05:27:08 +08:00
camid = int(img_name.split('_')[1]) - 1 # index starts from 0
2018-07-02 17:17:14 +08:00
if img_idx in train_idxs:
train.append((img_path, idx2label[img_idx], camid))
else:
query.append((img_path, img_idx, camid))
# process gallery folder
for img_path in gallery_img_paths:
img_name = osp.basename(img_path)
img_idx = int(img_name.split('_')[0])
2018-11-05 05:27:08 +08:00
camid = int(img_name.split('_')[1]) - 1 # index starts from 0
2018-07-02 17:17:14 +08:00
if img_idx in train_idxs:
train.append((img_path, idx2label[img_idx], camid))
else:
gallery.append((img_path, img_idx, camid))
2019-03-15 22:49:18 +08:00
split = {
'train': train,
'query': query,
'gallery': gallery,
'num_train_pids': 125,
'num_query_pids': 125,
'num_gallery_pids': 900
}
2018-07-02 17:17:14 +08:00
splits.append(split)
2019-01-31 06:41:47 +08:00
print('Totally {} splits are created'.format(len(splits)))
2018-07-02 17:17:14 +08:00
write_json(splits, self.split_path)
2019-03-15 22:49:18 +08:00
print('Split file saved to {}'.format(self.split_path))