mirror of
https://github.com/JDAI-CV/fast-reid.git
synced 2025-06-03 14:50:47 +08:00
115 lines
4.0 KiB
Python
115 lines
4.0 KiB
Python
# encoding: utf-8
|
|
"""
|
|
@author: liaoxingyu
|
|
@contact: sherlockliao01@gmail.com
|
|
"""
|
|
import torch
|
|
import torch.utils.data as data
|
|
import math
|
|
import itertools
|
|
|
|
|
|
class AspectRatioGroupedDataset(data.IterableDataset):
|
|
"""
|
|
Batch data that have similar aspect ratio together.
|
|
In this implementation, images whose aspect ratio < (or >) 1 will
|
|
be batched together.
|
|
It assumes the underlying dataset produces dicts with "width" and "height" keys.
|
|
It will then produce a list of original dicts with length = batch_size,
|
|
all with similar aspect ratios.
|
|
"""
|
|
|
|
def __init__(self, ):
|
|
"""
|
|
Args:
|
|
dataset: an iterable. Each element must be a dict with keys
|
|
"width" and "height", which will be used to batch data.
|
|
batch_size (int):
|
|
"""
|
|
self.dataset = list(range(0, 100))
|
|
self.batch_size = 32
|
|
self._buckets = [[] for _ in range(2)]
|
|
# Hard-coded two aspect ratio groups: w > h and w < h.
|
|
# Can add support for more aspect ratio groups, but doesn't seem useful
|
|
|
|
def __iter__(self):
|
|
for d in self.dataset:
|
|
bucket_id = 0
|
|
bucket = self._buckets[bucket_id]
|
|
bucket.append(d)
|
|
if len(bucket) == self.batch_size:
|
|
yield bucket[:]
|
|
del bucket[:]
|
|
|
|
|
|
class MyIterableDataset(data.IterableDataset):
|
|
def __init__(self, start, end):
|
|
super(MyIterableDataset).__init__()
|
|
assert end > start, "this example code only works with end >= start"
|
|
self.start = start
|
|
self.end = end
|
|
|
|
def __iter__(self):
|
|
worker_info = torch.utils.data.get_worker_info()
|
|
if worker_info is None: # single-process data loading, return the full iterator
|
|
iter_start = self.start
|
|
iter_end = self.end
|
|
else: # in a worker process
|
|
# split workload
|
|
per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
|
|
worker_id = worker_info.id
|
|
iter_start = self.start + worker_id * per_worker
|
|
iter_end = min(iter_start + per_worker, self.end)
|
|
yield
|
|
return iter(range(iter_start, iter_end))
|
|
|
|
|
|
class TrainingSampler(data.Sampler):
|
|
"""
|
|
In training, we only care about the "infinite stream" of training data.
|
|
So this sampler produces an infinite stream of indices and
|
|
all workers cooperate to correctly shuffle the indices and sample different indices.
|
|
The samplers in each worker effectively produces `indices[worker_id::num_workers]`
|
|
where `indices` is an infinite stream of indices consisting of
|
|
`shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
|
|
or `range(size) + range(size) + ...` (if shuffle is False)
|
|
"""
|
|
|
|
def __init__(self, size: int, shuffle: bool = True, seed: int = 0):
|
|
"""
|
|
Args:
|
|
size (int): the total number of data of the underlying dataset to sample from
|
|
shuffle (bool): whether to shuffle the indices or not
|
|
seed (int): the initial seed of the shuffle. Must be the same
|
|
across all workers. If None, will use a random seed shared
|
|
among workers (require synchronization among all workers).
|
|
"""
|
|
self._size = size
|
|
assert size > 0
|
|
self._shuffle = shuffle
|
|
self._seed = int(seed)
|
|
|
|
def __iter__(self):
|
|
from ipdb import set_trace; set_trace()
|
|
start = 0
|
|
for i in self._infinite_indices():
|
|
yield i
|
|
# yield from itertools.islice(self._infinite_indices(), start, None, 32)
|
|
|
|
def _infinite_indices(self):
|
|
g = torch.Generator()
|
|
g.manual_seed(self._seed)
|
|
while True:
|
|
if self._shuffle:
|
|
yield from torch.randperm(self._size, generator=g)
|
|
else:
|
|
yield from torch.arange(self._size)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
my_loader = TrainingSampler(10)
|
|
my_iter = iter(my_loader)
|
|
while True:
|
|
print(next(my_iter))
|
|
|