mirror of
https://github.com/JDAI-CV/fast-reid.git
synced 2025-06-03 14:50:47 +08:00
170 lines
6.3 KiB
Python
170 lines
6.3 KiB
Python
# encoding: utf-8
|
|
"""
|
|
@author: liaoxingyu
|
|
@contact: liaoxingyu2@jd.com
|
|
"""
|
|
|
|
import copy
|
|
import itertools
|
|
from collections import defaultdict
|
|
from typing import Optional
|
|
|
|
import numpy as np
|
|
from torch.utils.data.sampler import Sampler
|
|
|
|
from fastreid.utils import comm
|
|
|
|
|
|
def no_index(a, b):
|
|
assert isinstance(a, list)
|
|
return [i for i, j in enumerate(a) if j != b]
|
|
|
|
|
|
class BalancedIdentitySampler(Sampler):
|
|
def __init__(self, data_source: str, batch_size: int, num_instances: int, seed: Optional[int] = None):
|
|
self.data_source = data_source
|
|
self.batch_size = batch_size
|
|
self.num_instances = num_instances
|
|
self.num_pids_per_batch = batch_size // self.num_instances
|
|
|
|
self.index_pid = defaultdict(list)
|
|
self.pid_cam = defaultdict(list)
|
|
self.pid_index = defaultdict(list)
|
|
|
|
for index, info in enumerate(data_source):
|
|
pid = info[1]
|
|
camid = info[2]
|
|
self.index_pid[index] = pid
|
|
self.pid_cam[pid].append(camid)
|
|
self.pid_index[pid].append(index)
|
|
|
|
self.pids = sorted(list(self.pid_index.keys()))
|
|
self.num_identities = len(self.pids)
|
|
|
|
if seed is None:
|
|
seed = comm.shared_random_seed()
|
|
self._seed = int(seed)
|
|
|
|
self._rank = comm.get_rank()
|
|
self._world_size = comm.get_world_size()
|
|
|
|
def __iter__(self):
|
|
start = self._rank
|
|
yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
|
|
|
|
def _infinite_indices(self):
|
|
np.random.seed(self._seed)
|
|
while True:
|
|
# Shuffle identity list
|
|
identities = np.random.permutation(self.num_identities)
|
|
|
|
# If remaining identities cannot be enough for a batch,
|
|
# just drop the remaining parts
|
|
drop_indices = self.num_identities % self.num_pids_per_batch
|
|
if drop_indices: identities = identities[:-drop_indices]
|
|
|
|
ret = []
|
|
for kid in identities:
|
|
i = np.random.choice(self.pid_index[self.pids[kid]])
|
|
_, i_pid, i_cam = self.data_source[i]
|
|
ret.append(i)
|
|
pid_i = self.index_pid[i]
|
|
cams = self.pid_cam[pid_i]
|
|
index = self.pid_index[pid_i]
|
|
select_cams = no_index(cams, i_cam)
|
|
|
|
if select_cams:
|
|
if len(select_cams) >= self.num_instances:
|
|
cam_indexes = np.random.choice(select_cams, size=self.num_instances - 1, replace=False)
|
|
else:
|
|
cam_indexes = np.random.choice(select_cams, size=self.num_instances - 1, replace=True)
|
|
for kk in cam_indexes:
|
|
ret.append(index[kk])
|
|
else:
|
|
select_indexes = no_index(index, i)
|
|
if not select_indexes:
|
|
# Only one image for this identity
|
|
ind_indexes = [0] * (self.num_instances - 1)
|
|
elif len(select_indexes) >= self.num_instances:
|
|
ind_indexes = np.random.choice(select_indexes, size=self.num_instances - 1, replace=False)
|
|
else:
|
|
ind_indexes = np.random.choice(select_indexes, size=self.num_instances - 1, replace=True)
|
|
|
|
for kk in ind_indexes:
|
|
ret.append(index[kk])
|
|
|
|
if len(ret) == self.batch_size:
|
|
yield from ret
|
|
ret = []
|
|
|
|
|
|
class NaiveIdentitySampler(Sampler):
|
|
"""
|
|
Randomly sample N identities, then for each identity,
|
|
randomly sample K instances, therefore batch size is N*K.
|
|
Args:
|
|
- data_source (list): list of (img_path, pid, camid).
|
|
- num_instances (int): number of instances per identity in a batch.
|
|
- batch_size (int): number of examples in a batch.
|
|
"""
|
|
|
|
def __init__(self, data_source: str, batch_size: int, num_instances: int, seed: Optional[int] = None):
|
|
self.data_source = data_source
|
|
self.batch_size = batch_size
|
|
self.num_instances = num_instances
|
|
self.num_pids_per_batch = batch_size // self.num_instances
|
|
|
|
self.index_pid = defaultdict(list)
|
|
self.pid_cam = defaultdict(list)
|
|
self.pid_index = defaultdict(list)
|
|
|
|
for index, info in enumerate(data_source):
|
|
pid = info[1]
|
|
camid = info[2]
|
|
self.index_pid[index] = pid
|
|
self.pid_cam[pid].append(camid)
|
|
self.pid_index[pid].append(index)
|
|
|
|
self.pids = sorted(list(self.pid_index.keys()))
|
|
self.num_identities = len(self.pids)
|
|
|
|
if seed is None:
|
|
seed = comm.shared_random_seed()
|
|
self._seed = int(seed)
|
|
|
|
self._rank = comm.get_rank()
|
|
self._world_size = comm.get_world_size()
|
|
|
|
def __iter__(self):
|
|
start = self._rank
|
|
yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
|
|
|
|
def _infinite_indices(self):
|
|
np.random.seed(self._seed)
|
|
while True:
|
|
avai_pids = copy.deepcopy(self.pids)
|
|
batch_idxs_dict = {}
|
|
|
|
batch_indices = []
|
|
while len(avai_pids) >= self.num_pids_per_batch:
|
|
selected_pids = np.random.choice(avai_pids, self.num_pids_per_batch, replace=False).tolist()
|
|
for pid in selected_pids:
|
|
# Register pid in batch_idxs_dict if not
|
|
if pid not in batch_idxs_dict:
|
|
idxs = copy.deepcopy(self.pid_index[pid])
|
|
if len(idxs) < self.num_instances:
|
|
idxs = np.random.choice(idxs, size=self.num_instances, replace=True).tolist()
|
|
np.random.shuffle(idxs)
|
|
batch_idxs_dict[pid] = idxs
|
|
|
|
avai_idxs = batch_idxs_dict[pid]
|
|
for _ in range(self.num_instances):
|
|
batch_indices.append(avai_idxs.pop(0))
|
|
|
|
if len(avai_idxs) < self.num_instances: avai_pids.remove(pid)
|
|
|
|
assert len(batch_indices) == self.batch_size, f"batch indices have wrong " \
|
|
f"length with {len(batch_indices)}!"
|
|
yield from batch_indices
|
|
batch_indices = []
|