v1.3.6: added University-1652
parent
6e498f8b17
commit
93b8c9f3db
|
@ -33,7 +33,7 @@ You can find some research projects that are built on top of Torchreid `here <ht
|
||||||
|
|
||||||
What's new
|
What's new
|
||||||
------------
|
------------
|
||||||
- [Feb 2021] We support the new multi-view multi-source geo-localization dataset `University-1652 <https://dl.acm.org/doi/abs/10.1145/3394171.3413896>`_.
|
- [Feb 2021] ``v1.3.6`` Added `University-1652 <https://dl.acm.org/doi/abs/10.1145/3394171.3413896>`_, a new dataset for multi-view multi-source geo-localization (credit to `Zhedong Zheng <https://github.com/layumi>`_).
|
||||||
- [Feb 2021] ``v1.3.5``: Now the `cython code <https://github.com/KaiyangZhou/deep-person-reid/pull/412>`_ works on Windows (credit to `lablabla <https://github.com/lablabla>`_).
|
- [Feb 2021] ``v1.3.5``: Now the `cython code <https://github.com/KaiyangZhou/deep-person-reid/pull/412>`_ works on Windows (credit to `lablabla <https://github.com/lablabla>`_).
|
||||||
- [Jan 2021] Our recent work, `MixStyle <https://openreview.net/forum?id=6xHJ37MVxxp>`_ (mixing instance-level feature statistics of samples of different domains for improving domain generalization), has been accepted to ICLR'21. The code has been released at https://github.com/KaiyangZhou/mixstyle-release where the person re-ID part is based on Torchreid.
|
- [Jan 2021] Our recent work, `MixStyle <https://openreview.net/forum?id=6xHJ37MVxxp>`_ (mixing instance-level feature statistics of samples of different domains for improving domain generalization), has been accepted to ICLR'21. The code has been released at https://github.com/KaiyangZhou/mixstyle-release where the person re-ID part is based on Torchreid.
|
||||||
- [Jan 2021] A new evaluation metric called `mean Inverse Negative Penalty (mINP)` for person re-ID has been introduced in `Deep Learning for Person Re-identification: A Survey and Outlook (TPAMI 2021) <https://arxiv.org/abs/2001.04193>`_. Their code can be accessed at `<https://github.com/mangye16/ReID-Survey>`_.
|
- [Jan 2021] A new evaluation metric called `mean Inverse Negative Penalty (mINP)` for person re-ID has been introduced in `Deep Learning for Person Re-identification: A Survey and Outlook (TPAMI 2021) <https://arxiv.org/abs/2001.04193>`_. Their code can be accessed at `<https://github.com/mangye16/ReID-Survey>`_.
|
||||||
|
@ -232,7 +232,7 @@ Image-reid datasets
|
||||||
- `PRID <https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf>`_
|
- `PRID <https://pdfs.semanticscholar.org/4c1b/f0592be3e535faf256c95e27982db9b3d3d3.pdf>`_
|
||||||
|
|
||||||
Geo-localization datasets
|
Geo-localization datasets
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
- `University-1652 <https://dl.acm.org/doi/abs/10.1145/3394171.3413896>`_
|
- `University-1652 <https://dl.acm.org/doi/abs/10.1145/3394171.3413896>`_
|
||||||
|
|
||||||
Video-reid datasets
|
Video-reid datasets
|
||||||
|
|
|
@ -2,7 +2,7 @@ from __future__ import print_function, absolute_import
|
||||||
|
|
||||||
from torchreid import data, optim, utils, engine, losses, models, metrics
|
from torchreid import data, optim, utils, engine, losses, models, metrics
|
||||||
|
|
||||||
__version__ = '1.3.5'
|
__version__ = '1.3.6'
|
||||||
__author__ = 'Kaiyang Zhou'
|
__author__ = 'Kaiyang Zhou'
|
||||||
__homepage__ = 'https://kaiyangzhou.github.io/'
|
__homepage__ = 'https://kaiyangzhou.github.io/'
|
||||||
__description__ = 'Deep learning person re-identification in PyTorch'
|
__description__ = 'Deep learning person re-identification in PyTorch'
|
||||||
|
|
|
@ -2,7 +2,7 @@ from __future__ import print_function, absolute_import
|
||||||
|
|
||||||
from .image import (
|
from .image import (
|
||||||
GRID, PRID, CUHK01, CUHK02, CUHK03, MSMT17, VIPeR, SenseReID, Market1501,
|
GRID, PRID, CUHK01, CUHK02, CUHK03, MSMT17, VIPeR, SenseReID, Market1501,
|
||||||
DukeMTMCreID, iLIDS, University1652
|
DukeMTMCreID, University1652, iLIDS
|
||||||
)
|
)
|
||||||
from .video import PRID2011, Mars, DukeMTMCVidReID, iLIDSVID
|
from .video import PRID2011, Mars, DukeMTMCVidReID, iLIDSVID
|
||||||
from .dataset import Dataset, ImageDataset, VideoDataset
|
from .dataset import Dataset, ImageDataset, VideoDataset
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
from __future__ import division, print_function, absolute_import
|
from __future__ import division, print_function, absolute_import
|
||||||
|
import os
|
||||||
import re
|
import re
|
||||||
import glob
|
import glob
|
||||||
import os.path as osp
|
import os.path as osp
|
||||||
import os
|
|
||||||
import gdown
|
import gdown
|
||||||
|
|
||||||
from ..dataset import ImageDataset
|
from ..dataset import ImageDataset
|
||||||
|
@ -48,18 +48,23 @@ datamanager = torchreid.data.ImageDataManager(
|
||||||
"""
|
"""
|
||||||
dataset_dir = 'university1652'
|
dataset_dir = 'university1652'
|
||||||
dataset_url = 'https://drive.google.com/uc?id=1iVnP4gjw-iHXa0KerZQ1IfIO0i1jADsR'
|
dataset_url = 'https://drive.google.com/uc?id=1iVnP4gjw-iHXa0KerZQ1IfIO0i1jADsR'
|
||||||
|
|
||||||
def __init__(self, root='', **kwargs):
|
def __init__(self, root='', **kwargs):
|
||||||
self.root = osp.abspath(osp.expanduser(root))
|
self.root = osp.abspath(osp.expanduser(root))
|
||||||
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
self.dataset_dir = osp.join(self.root, self.dataset_dir)
|
||||||
print(self.dataset_dir)
|
print(self.dataset_dir)
|
||||||
if not os.path.isdir(self.dataset_dir):
|
if not os.path.isdir(self.dataset_dir):
|
||||||
os.mkdir(self.dataset_dir)
|
os.mkdir(self.dataset_dir)
|
||||||
gdown.download(self.dataset_url, self.dataset_dir+'data.zip', quiet=False)
|
gdown.download(
|
||||||
|
self.dataset_url, self.dataset_dir + 'data.zip', quiet=False
|
||||||
|
)
|
||||||
os.system('unzip %s' % (self.dataset_dir + 'data.zip'))
|
os.system('unzip %s' % (self.dataset_dir + 'data.zip'))
|
||||||
self.train_dir = osp.join(
|
self.train_dir = osp.join(
|
||||||
self.dataset_dir, 'University-Release/train/'
|
self.dataset_dir, 'University-Release/train/'
|
||||||
)
|
)
|
||||||
self.query_dir = osp.join(self.dataset_dir, 'University-Release/test/query_drone')
|
self.query_dir = osp.join(
|
||||||
|
self.dataset_dir, 'University-Release/test/query_drone'
|
||||||
|
)
|
||||||
self.gallery_dir = osp.join(
|
self.gallery_dir = osp.join(
|
||||||
self.dataset_dir, 'University-Release/test/gallery_satellite'
|
self.dataset_dir, 'University-Release/test/gallery_satellite'
|
||||||
)
|
)
|
||||||
|
@ -77,7 +82,10 @@ datamanager = torchreid.data.ImageDataManager(
|
||||||
super(University1652, self).__init__(train, query, gallery, **kwargs)
|
super(University1652, self).__init__(train, query, gallery, **kwargs)
|
||||||
|
|
||||||
def process_dir(self, dir_path, relabel=False, train=False):
|
def process_dir(self, dir_path, relabel=False, train=False):
|
||||||
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
|
IMG_EXTENSIONS = (
|
||||||
|
'.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff',
|
||||||
|
'.webp'
|
||||||
|
)
|
||||||
if train:
|
if train:
|
||||||
img_paths = glob.glob(osp.join(dir_path, '*/*/*'))
|
img_paths = glob.glob(osp.join(dir_path, '*/*/*'))
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -16,22 +16,21 @@
|
||||||
with limited time cost.
|
with limited time cost.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from setuptools import setup, Extension
|
from setuptools import Extension, setup
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
from torch.autograd import Function
|
from torch.autograd import Function
|
||||||
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
|
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
|
||||||
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='build_adjacency_matrix',
|
name='build_adjacency_matrix',
|
||||||
ext_modules=[
|
ext_modules=[
|
||||||
CUDAExtension('build_adjacency_matrix', [
|
CUDAExtension(
|
||||||
|
'build_adjacency_matrix', [
|
||||||
'build_adjacency_matrix.cpp',
|
'build_adjacency_matrix.cpp',
|
||||||
'build_adjacency_matrix_kernel.cu',
|
'build_adjacency_matrix_kernel.cu',
|
||||||
]),
|
]
|
||||||
|
),
|
||||||
],
|
],
|
||||||
cmdclass={
|
cmdclass={'build_ext': BuildExtension}
|
||||||
'build_ext':BuildExtension
|
)
|
||||||
})
|
|
||||||
|
|
|
@ -16,22 +16,21 @@
|
||||||
with limited time cost.
|
with limited time cost.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from setuptools import setup, Extension
|
from setuptools import Extension, setup
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
from torch.autograd import Function
|
from torch.autograd import Function
|
||||||
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
|
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
|
||||||
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='gnn_propagate',
|
name='gnn_propagate',
|
||||||
ext_modules=[
|
ext_modules=[
|
||||||
CUDAExtension('gnn_propagate', [
|
CUDAExtension(
|
||||||
|
'gnn_propagate', [
|
||||||
'gnn_propagate.cpp',
|
'gnn_propagate.cpp',
|
||||||
'gnn_propagate_kernel.cu',
|
'gnn_propagate_kernel.cu',
|
||||||
]),
|
]
|
||||||
|
),
|
||||||
],
|
],
|
||||||
cmdclass={
|
cmdclass={'build_ext': BuildExtension}
|
||||||
'build_ext':BuildExtension
|
)
|
||||||
})
|
|
||||||
|
|
|
@ -16,16 +16,14 @@
|
||||||
with limited time cost.
|
with limited time cost.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import torch
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
import build_adjacency_matrix
|
|
||||||
import gnn_propagate
|
import gnn_propagate
|
||||||
|
import build_adjacency_matrix
|
||||||
from utils import *
|
from utils import *
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def gnn_reranking(X_q, X_g, k1, k2):
|
def gnn_reranking(X_q, X_g, k1, k2):
|
||||||
query_num, gallery_num = X_q.shape[0], X_g.shape[0]
|
query_num, gallery_num = X_q.shape[0], X_g.shape[0]
|
||||||
|
|
||||||
|
@ -34,7 +32,9 @@ def gnn_reranking(X_q, X_g, k1, k2):
|
||||||
del X_u, X_q, X_g
|
del X_u, X_q, X_g
|
||||||
|
|
||||||
# initial ranking list
|
# initial ranking list
|
||||||
S, initial_rank = original_score.topk(k=k1, dim=-1, largest=True, sorted=True)
|
S, initial_rank = original_score.topk(
|
||||||
|
k=k1, dim=-1, largest=True, sorted=True
|
||||||
|
)
|
||||||
|
|
||||||
# stage 1
|
# stage 1
|
||||||
A = build_adjacency_matrix.forward(initial_rank.float())
|
A = build_adjacency_matrix.forward(initial_rank.float())
|
||||||
|
@ -44,11 +44,13 @@ def gnn_reranking(X_q, X_g, k1, k2):
|
||||||
if k2 != 1:
|
if k2 != 1:
|
||||||
for i in range(2):
|
for i in range(2):
|
||||||
A = A + A.T
|
A = A + A.T
|
||||||
A = gnn_propagate.forward(A, initial_rank[:, :k2].contiguous().float(), S[:, :k2].contiguous().float())
|
A = gnn_propagate.forward(
|
||||||
|
A, initial_rank[:, :k2].contiguous().float(),
|
||||||
|
S[:, :k2].contiguous().float()
|
||||||
|
)
|
||||||
A_norm = torch.norm(A, p=2, dim=1, keepdim=True)
|
A_norm = torch.norm(A, p=2, dim=1, keepdim=True)
|
||||||
A = A.div(A_norm.expand_as(A))
|
A = A.div(A_norm.expand_as(A))
|
||||||
|
|
||||||
|
|
||||||
cosine_similarity = torch.mm(A[:query_num, ], A[query_num:, ].t())
|
cosine_similarity = torch.mm(A[:query_num, ], A[query_num:, ].t())
|
||||||
del A, S
|
del A, S
|
||||||
|
|
||||||
|
|
|
@ -17,31 +17,38 @@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import torch
|
|
||||||
import argparse
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import argparse
|
||||||
|
import torch
|
||||||
|
|
||||||
from utils import *
|
from utils import *
|
||||||
from gnn_reranking import *
|
from gnn_reranking import *
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Reranking_is_GNN')
|
parser = argparse.ArgumentParser(description='Reranking_is_GNN')
|
||||||
parser.add_argument('--data_path',
|
parser.add_argument(
|
||||||
|
'--data_path',
|
||||||
type=str,
|
type=str,
|
||||||
default='../xm_rerank_gpu_2/features/market_88_test.pkl',
|
default='../xm_rerank_gpu_2/features/market_88_test.pkl',
|
||||||
help='path to dataset')
|
help='path to dataset'
|
||||||
parser.add_argument('--k1',
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--k1',
|
||||||
type=int,
|
type=int,
|
||||||
default=26, # Market-1501
|
default=26, # Market-1501
|
||||||
# default=60, # Veri-776
|
# default=60, # Veri-776
|
||||||
help='parameter k1')
|
help='parameter k1'
|
||||||
parser.add_argument('--k2',
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--k2',
|
||||||
type=int,
|
type=int,
|
||||||
default=7, # Market-1501
|
default=7, # Market-1501
|
||||||
# default=10, # Veri-776
|
# default=10, # Veri-776
|
||||||
help='parameter k2')
|
help='parameter k2'
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
data = load_pickle(args.data_path)
|
data = load_pickle(args.data_path)
|
||||||
|
|
||||||
|
@ -56,7 +63,10 @@ def main():
|
||||||
gallery_feature = gallery_feature.cuda()
|
gallery_feature = gallery_feature.cuda()
|
||||||
|
|
||||||
indices = gnn_reranking(query_feature, gallery_feature, args.k1, args.k2)
|
indices = gnn_reranking(query_feature, gallery_feature, args.k1, args.k2)
|
||||||
evaluate_ranking_list(indices, query_label, query_cam, gallery_label, gallery_cam)
|
evaluate_ranking_list(
|
||||||
|
indices, query_label, query_cam, gallery_label, gallery_cam
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
|
@ -16,9 +16,9 @@
|
||||||
with limited time cost.
|
with limited time cost.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import pickle
|
|
||||||
import numpy as np
|
|
||||||
import os
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import pickle
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
@ -27,10 +27,12 @@ def load_pickle(pickle_path):
|
||||||
data = pickle.load(f)
|
data = pickle.load(f)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def save_pickle(pickle_path, data):
|
def save_pickle(pickle_path, data):
|
||||||
with open(pickle_path, 'wb') as f:
|
with open(pickle_path, 'wb') as f:
|
||||||
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
|
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
|
||||||
|
|
||||||
|
|
||||||
def pairwise_squared_distance(x):
|
def pairwise_squared_distance(x):
|
||||||
'''
|
'''
|
||||||
x : (n_samples, n_points, dims)
|
x : (n_samples, n_points, dims)
|
||||||
|
@ -39,17 +41,23 @@ def pairwise_squared_distance(x):
|
||||||
x2s = (x * x).sum(-1, keepdim=True)
|
x2s = (x * x).sum(-1, keepdim=True)
|
||||||
return x2s + x2s.transpose(-1, -2) - 2 * x @ x.transpose(-1, -2)
|
return x2s + x2s.transpose(-1, -2) - 2 * x @ x.transpose(-1, -2)
|
||||||
|
|
||||||
|
|
||||||
def pairwise_distance(x, y):
|
def pairwise_distance(x, y):
|
||||||
m, n = x.size(0), y.size(0)
|
m, n = x.size(0), y.size(0)
|
||||||
|
|
||||||
x = x.view(m, -1)
|
x = x.view(m, -1)
|
||||||
y = y.view(n, -1)
|
y = y.view(n, -1)
|
||||||
|
|
||||||
dist = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n,m).t()
|
dist = torch.pow(x, 2).sum(
|
||||||
|
dim=1, keepdim=True
|
||||||
|
).expand(m, n) + torch.pow(y, 2).sum(
|
||||||
|
dim=1, keepdim=True
|
||||||
|
).expand(n, m).t()
|
||||||
dist.addmm_(1, -2, x, y.t())
|
dist.addmm_(1, -2, x, y.t())
|
||||||
|
|
||||||
return dist
|
return dist
|
||||||
|
|
||||||
|
|
||||||
def cosine_similarity(x, y):
|
def cosine_similarity(x, y):
|
||||||
m, n = x.size(0), y.size(0)
|
m, n = x.size(0), y.size(0)
|
||||||
|
|
||||||
|
@ -61,12 +69,18 @@ def cosine_similarity(x, y):
|
||||||
|
|
||||||
return score
|
return score
|
||||||
|
|
||||||
def evaluate_ranking_list(indices, query_label, query_cam, gallery_label, gallery_cam):
|
|
||||||
|
def evaluate_ranking_list(
|
||||||
|
indices, query_label, query_cam, gallery_label, gallery_cam
|
||||||
|
):
|
||||||
CMC = np.zeros((len(gallery_label)), dtype=np.int)
|
CMC = np.zeros((len(gallery_label)), dtype=np.int)
|
||||||
ap = 0.0
|
ap = 0.0
|
||||||
|
|
||||||
for i in range(len(query_label)):
|
for i in range(len(query_label)):
|
||||||
ap_tmp, CMC_tmp = evaluate(indices[i],query_label[i], query_cam[i], gallery_label, gallery_cam)
|
ap_tmp, CMC_tmp = evaluate(
|
||||||
|
indices[i], query_label[i], query_cam[i], gallery_label,
|
||||||
|
gallery_cam
|
||||||
|
)
|
||||||
if CMC_tmp[0] == -1:
|
if CMC_tmp[0] == -1:
|
||||||
continue
|
continue
|
||||||
CMC = CMC + CMC_tmp
|
CMC = CMC + CMC_tmp
|
||||||
|
@ -74,7 +88,11 @@ def evaluate_ranking_list(indices, query_label, query_cam, gallery_label, galler
|
||||||
|
|
||||||
CMC = CMC.astype(np.float32)
|
CMC = CMC.astype(np.float32)
|
||||||
CMC = CMC / len(query_label) #average CMC
|
CMC = CMC / len(query_label) #average CMC
|
||||||
print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
|
print(
|
||||||
|
'Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f' %
|
||||||
|
(CMC[0], CMC[4], CMC[9], ap / len(query_label))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def evaluate(index, ql, qc, gl, gc):
|
def evaluate(index, ql, qc, gl, gc):
|
||||||
query_index = np.argwhere(gl == ql)
|
query_index = np.argwhere(gl == ql)
|
||||||
|
|
Loading…
Reference in New Issue