Add files via upload

pull/44/head
zjk15068083791 2020-04-27 16:27:44 +08:00 committed by GitHub
parent 5daf322ac6
commit 2f3f6e3267
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 242 additions and 19 deletions

View File

@ -1,19 +1,21 @@
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from ...utils.registry import Registry
DATASET_REGISTRY = Registry("DATASET")
DATASET_REGISTRY.__doc__ = """
Registry for datasets
It must returns an instance of :class:`Backbone`.
"""
from .cuhk03 import CUHK03
from .dukemtmcreid import DukeMTMC
from .market1501 import Market1501
from .msmt17 import MSMT17
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from ...utils.registry import Registry
DATASET_REGISTRY = Registry("DATASET")
DATASET_REGISTRY.__doc__ = """
Registry for datasets
It must returns an instance of :class:`Backbone`.
"""
from .cuhk03 import CUHK03
from .dukemtmcreid import DukeMTMC
from .market1501 import Market1501
from .msmt17 import MSMT17
from .veri import VeRi
from .vehicleid import VehicleID
from .veriwild import VeRiWild

View File

@ -0,0 +1,72 @@
# encoding: utf-8
"""
@author: Jinkai Zheng
@contact: 1315673509@qq.com
"""
import os.path as osp
import random
from .bases import ImageDataset
from ..datasets import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class VehicleID(ImageDataset):
"""VehicleID.
Reference:
Liu et al. Deep relative distance learning: Tell the difference between similar vehicles. CVPR 2016.
URL: `<https://pkuml.org/resources/pku-vehicleid.html>`_
Dataset statistics:
- identities: 26267.
- images: 221763.
"""
dataset_dir = 'vehicleid'
dataset_url = None
def __init__(self, root='/home/liuxinchen3/notespace/data', **kwargs):
self.dataset_dir = osp.join(root, self.dataset_dir)
self.image_dir = osp.join(self.dataset_dir, 'image')
self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list.txt')
self.test_list = osp.join(self.dataset_dir, 'train_test_split/test_list_2400.txt')
required_files = [
self.dataset_dir,
self.image_dir,
self.train_list,
self.test_list,
]
self.check_before_run(required_files)
train = self.process_dir(self.train_list, is_train=True)
query, gallery = self.process_dir(self.test_list, is_train=False)
super(VehicleID, self).__init__(train, query, gallery, **kwargs)
def process_dir(self, list_file, is_train=True):
img_list_lines = open(list_file, 'r').readlines()
dataset = []
for idx, line in enumerate(img_list_lines):
line = line.strip()
vid = line.split(' ')[1]
imgid = line.split(' ')[0]
img_path = osp.join(self.image_dir, imgid + '.jpg')
dataset.append((img_path, int(vid), int(imgid)))
random.shuffle(dataset)
vid_container = set()
if is_train:
return dataset
else:
query = []
for sample in dataset:
if sample[1] not in vid_container:
vid_container.add(sample[1])
query.append(sample)
return query, dataset

View File

@ -0,0 +1,65 @@
# encoding: utf-8
"""
@author: Jinkai Zheng
@contact: 1315673509@qq.com
"""
import glob
import os.path as osp
import re
from .bases import ImageDataset
from ..datasets import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class VeRi(ImageDataset):
"""VeRi.
Reference:
Liu et al. A Deep Learning based Approach for Progressive Vehicle Re-Identification. ECCV 2016.
URL: `<https://vehiclereid.github.io/VeRi/>`_
Dataset statistics:
- identities: 775.
- images: 37778 (train) + 1678 (query) + 11579 (gallery).
"""
dataset_dir = 'veri'
dataset_url = None
def __init__(self, root='/home/liuxinchen3/notespace/data', **kwargs):
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'image_train')
self.query_dir = osp.join(self.dataset_dir, 'image_query')
self.gallery_dir = osp.join(self.dataset_dir, 'image_test')
required_files = [
self.dataset_dir,
self.train_dir,
self.query_dir,
self.gallery_dir,
]
self.check_before_run(required_files)
train = self.process_dir(self.train_dir)
query = self.process_dir(self.query_dir)
gallery = self.process_dir(self.gallery_dir)
super(VeRi, self).__init__(train, query, gallery, **kwargs)
def process_dir(self, dir_path):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([\d]+)_c(\d\d\d)')
data = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
assert 1 <= pid <= 776
assert 1 <= camid <= 20
camid -= 1 # index starts from 0
data.append((img_path, pid, camid))
return data

View File

@ -0,0 +1,84 @@
# encoding: utf-8
"""
@author: Jinkai Zheng
@contact: 1315673509@qq.com
"""
import os.path as osp
from .bases import ImageDataset
from ..datasets import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class VeRiWild(ImageDataset):
"""VeRi-Wild.
Reference:
Lou et al. A Large-Scale Dataset for Vehicle Re-Identification in the Wild. CVPR 2019.
URL: `<https://github.com/PKU-IMRE/VERI-Wild>`_
Dataset statistics:
- identities: 40671.
- images: 416314.
"""
dataset_dir = 'VERI-Wild'
dataset_url = None
def __init__(self, root='/home/liuxinchen3/notespace/data', **kwargs):
self.dataset_dir = osp.join(root, self.dataset_dir)
self.image_dir = osp.join(self.dataset_dir, 'images')
self.train_list = osp.join(self.dataset_dir, 'train_test_split/train_list.txt')
self.query_list = osp.join(self.dataset_dir, 'train_test_split/test_10000_query.txt')
self.gallery_list = osp.join(self.dataset_dir, 'train_test_split/test_10000.txt')
self.vehicle_info = osp.join(self.dataset_dir, 'train_test_split/vehicle_info.txt')
required_files = [
self.image_dir,
self.train_list,
self.query_list,
self.gallery_list,
self.vehicle_info,
]
self.check_before_run(required_files)
self.imgid2vid, self.imgid2camid, self.imgid2imgpath = self.process_vehicle(self.vehicle_info)
train = self.process_dir(self.train_list)
query = self.process_dir(self.query_list)
gallery = self.process_dir(self.gallery_list)
super(VeRiWild, self).__init__(train, query, gallery, **kwargs)
def process_dir(self, img_list):
img_list_lines = open(img_list, 'r').readlines()
dataset = []
for idx, line in enumerate(img_list_lines):
line = line.strip()
vid = line.split('/')[0]
imgid = line.split('/')[1]
dataset.append((self.imgid2imgpath[imgid], int(vid), int(self.imgid2camid[imgid])))
assert len(dataset) == len(img_list_lines)
return dataset
def process_vehicle(self, vehicle_info):
imgid2vid = {}
imgid2camid = {}
imgid2imgpath = {}
vehicle_info_lines = open(vehicle_info, 'r').readlines()
for idx, line in enumerate(vehicle_info_lines[1:]):
vid = line.strip().split('/')[0]
imgid = line.strip().split(';')[0].split('/')[1]
camid = line.strip().split(';')[1]
img_path = osp.join(self.image_dir, vid, imgid + '.jpg')
imgid2vid[imgid] = vid
imgid2camid[imgid] = camid
imgid2imgpath[imgid] = img_path
assert len(imgid2vid) == len(vehicle_info_lines) - 1
return imgid2vid, imgid2camid, imgid2imgpath