deep-person-reid/torchreid/datasets/dukemtmcvidreid.py

137 lines
5.0 KiB
Python
Raw Normal View History

2018-07-04 17:32:43 +08:00
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
2018-07-02 17:17:14 +08:00
import os
import glob
import re
import sys
import urllib
import tarfile
import zipfile
import os.path as osp
from scipy.io import loadmat
import numpy as np
import h5py
from scipy.misc import imsave
2018-08-15 16:48:17 +08:00
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
2018-11-05 06:59:46 +08:00
from .bases import BaseVideoDataset
2018-07-02 17:17:14 +08:00
2018-11-05 06:59:46 +08:00
class DukeMTMCVidReID(BaseVideoDataset):
2019-03-15 22:49:18 +08:00
"""DukeMTMCVidReID
2018-07-02 17:17:14 +08:00
Reference:
Wu et al. Exploit the Unknown Gradually: One-Shot Video-Based Person
Re-Identification by Stepwise Learning. CVPR 2018.
2018-08-15 21:29:44 +08:00
URL: https://github.com/Yu-Wu/DukeMTMC-VideoReID
2018-07-02 17:17:14 +08:00
Dataset statistics:
# identities: 702 (train) + 702 (test)
# tracklets: 2196 (train) + 2636 (test)
"""
dataset_dir = 'dukemtmc-vidreid'
2018-07-02 18:57:01 +08:00
def __init__(self, root='data', min_seq_len=0, verbose=True, **kwargs):
super(DukeMTMCVidReID, self).__init__(root)
self.dataset_dir = osp.join(self.root, self.dataset_dir)
2018-08-15 21:29:44 +08:00
self.dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-VideoReID.zip'
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/train')
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/gallery')
2018-07-02 17:17:14 +08:00
self.split_train_json_path = osp.join(self.dataset_dir, 'split_train.json')
self.split_query_json_path = osp.join(self.dataset_dir, 'split_query.json')
self.split_gallery_json_path = osp.join(self.dataset_dir, 'split_gallery.json')
self.min_seq_len = min_seq_len
2019-03-15 22:49:18 +08:00
2019-02-27 17:57:48 +08:00
self.download_data()
2019-03-15 22:49:18 +08:00
required_files = [
self.dataset_dir,
self.train_dir,
self.query_dir,
self.gallery_dir
]
self.check_before_run(required_files)
2018-07-02 17:17:14 +08:00
2019-02-27 17:57:48 +08:00
train = self.process_dir(self.train_dir, self.split_train_json_path, relabel=True)
query = self.process_dir(self.query_dir, self.split_query_json_path, relabel=False)
gallery = self.process_dir(self.gallery_dir, self.split_gallery_json_path, relabel=False)
2018-07-02 17:17:14 +08:00
2019-03-16 00:11:25 +08:00
self.init_attributes(train, query, gallery)
2018-07-02 18:57:01 +08:00
if verbose:
2018-11-05 06:59:46 +08:00
self.print_dataset_statistics(train, query, gallery)
2018-07-02 17:17:14 +08:00
2019-02-27 17:57:48 +08:00
def download_data(self):
2018-08-15 21:29:44 +08:00
if osp.exists(self.dataset_dir):
return
2019-01-31 06:41:47 +08:00
print('Creating directory {}'.format(self.dataset_dir))
2018-08-15 21:29:44 +08:00
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
2019-01-31 06:41:47 +08:00
print('Downloading DukeMTMC-VideoReID dataset')
2018-08-15 21:29:44 +08:00
urllib.urlretrieve(self.dataset_url, fpath)
2019-01-31 06:41:47 +08:00
print('Extracting files')
2018-08-15 21:29:44 +08:00
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
2019-02-27 17:57:48 +08:00
def process_dir(self, dir_path, json_path, relabel):
2018-07-02 17:17:14 +08:00
if osp.exists(json_path):
split = read_json(json_path)
2018-11-05 06:59:46 +08:00
return split['tracklets']
2018-07-02 17:17:14 +08:00
2019-03-15 22:49:18 +08:00
print('=> Generating split json file (** this might take a while **)')
2018-07-02 17:17:14 +08:00
pdirs = glob.glob(osp.join(dir_path, '*')) # avoid .DS_Store
2019-01-31 06:41:47 +08:00
print('Processing "{}" with {} person identities'.format(dir_path, len(pdirs)))
2018-07-02 17:17:14 +08:00
pid_container = set()
for pdir in pdirs:
pid = int(osp.basename(pdir))
pid_container.add(pid)
pid2label = {pid:label for label, pid in enumerate(pid_container)}
tracklets = []
for pdir in pdirs:
pid = int(osp.basename(pdir))
2019-03-15 22:49:18 +08:00
if relabel:
pid = pid2label[pid]
2018-07-02 17:17:14 +08:00
tdirs = glob.glob(osp.join(pdir, '*'))
for tdir in tdirs:
raw_img_paths = glob.glob(osp.join(tdir, '*.jpg'))
num_imgs = len(raw_img_paths)
if num_imgs < self.min_seq_len:
continue
img_paths = []
for img_idx in range(num_imgs):
# some tracklet starts from 0002 instead of 0001
img_idx_name = 'F' + str(img_idx+1).zfill(4)
res = glob.glob(osp.join(tdir, '*' + img_idx_name + '*.jpg'))
if len(res) == 0:
2019-01-31 06:41:47 +08:00
print('Warn: index name {} in {} is missing, jump to next'.format(img_idx_name, tdir))
2018-07-02 17:17:14 +08:00
continue
img_paths.append(res[0])
img_name = osp.basename(img_paths[0])
2018-07-25 16:50:48 +08:00
if img_name.find('_') == -1:
# old naming format: 0001C6F0099X30823.jpg
camid = int(img_name[5]) - 1
else:
# new naming format: 0001_C6_F0099_X30823.jpg
camid = int(img_name[6]) - 1
2018-07-02 17:17:14 +08:00
img_paths = tuple(img_paths)
tracklets.append((img_paths, pid, camid))
2019-01-31 06:41:47 +08:00
print('Saving split to {}'.format(json_path))
2018-07-02 17:17:14 +08:00
split_dict = {
'tracklets': tracklets,
}
write_json(split_dict, json_path)
2018-11-05 06:59:46 +08:00
return tracklets