finish code

pull/25/head
liaoxingyu 2018-06-13 16:53:32 +08:00
parent 9f1fdbdda8
commit 9f0c2bcd91
4 changed files with 59 additions and 20 deletions

View File

@ -1,16 +1,40 @@
# reid_baseline
reid baseline model for exploring softmax and triplet hard loss's influence.
## Configuration
## Architecture
### softmax
### triplet
### softmax + triplet
### Classification
resnet lr: 0.1
classifier lr: 0.01
<img src='https://ws3.sinaimg.cn/large/006tKfTcly1fs76ysvu3zj30kg0d60t5.jpg' height='200'>
<img src='https://ws2.sinaimg.cn/large/006tKfTcly1fs76zbtfxcj30js0d674m.jpg' height='200'>
### Triplet Hard
lr: 2e-4
<img src='https://ws2.sinaimg.cn/large/006tNc79ly1fs3sxc54xjj30ka0d6dgd.jpg' height='200'>
<img src='https://ws2.sinaimg.cn/large/006tNc79ly1fs3tpat6emj30k00d2t93.jpg' height=200>
### Classification + Triplet Hard
lr: 2e-4
exponetional decay at 150
<img src='https://ws2.sinaimg.cn/large/006tKfTcly1fs79doog34j30ja0cudg5.jpg' height='200'>
<img src='https://ws2.sinaimg.cn/large/006tNc79ly1fs3tpat6emj30k00d2t93.jpg' height=200>
## Results
| loss | rank1 | map |
| --- | --| ---|
| triplet hard | 89.9% | 76.8% |
| softmax | 87% | 65% |
|triplet + softmax | 89.7% | 76.2% |

View File

@ -21,12 +21,12 @@ class DefaultConfig(object):
width = 192
# optimization options
optim = 'SGD'
max_epoch = 100
optim = 'Adam'
max_epoch = 60
train_batch = 128
test_batch = 128
lr = 0.1
step_size = 60
step_size = 40
gamma = 0.1
weight_decay = 5e-4
momentum = 0.9

View File

@ -92,7 +92,8 @@ def train_classification(**kwargs):
# get optimizer
optimizer = torch.optim.SGD(
model.optim_policy(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum
model.optim_policy(), lr=args.lr, weight_decay=args.weight_decay,
momentum=args.momentum, nesterov=True
)
def adjust_lr(optimizer, ep, decay_ep, gamma):

View File

@ -15,10 +15,10 @@ from torch import nn
from .resnet import ResNet
def weights_init(m):
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
@ -30,6 +30,13 @@ def weights_init(m):
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0.0)
class ResNetBuilder(nn.Module):
in_planes = 2048
@ -37,20 +44,25 @@ class ResNetBuilder(nn.Module):
super().__init__()
self.base = ResNet(last_stride)
self.base.load_param(model_path)
self.bottleneck = nn.Sequential(
nn.BatchNorm2d(self.in_planes),
nn.ReLU(True)
)
self.num_classes = num_classes
if num_classes is not None:
self.classifier = nn.Linear(self.in_planes, num_classes)
self.bottleneck = nn.Sequential(
nn.Linear(self.in_planes, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.1),
nn.Dropout(p=0.5)
)
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(512, self.num_classes)
self.classifier.apply(weights_init_classifier)
def forward(self, x):
feat = self.base(x)
feat = self.bottleneck(feat)
global_feat = F.avg_pool2d(feat, feat.shape[2:]) # (b, 2048, 1, 1)
global_feat = global_feat.view(global_feat.shape[0], -1)
if self.training and self.num_classes is not None:
global_feat = self.bottleneck(global_feat)
cls_score = self.classifier(global_feat)
return cls_score, global_feat
else:
@ -58,10 +70,12 @@ class ResNetBuilder(nn.Module):
def optim_policy(self):
base_param_group = self.base.parameters()
clf_param_group = self.classifier.parameters()
other_param_group = list()
other_param_group.extend(list(self.bottleneck.parameters()))
other_param_group.extend(list(self.classifier.parameters()))
return [
{'params': base_param_group, 'lr_multi': 0.1},
{'params': clf_param_group}
{'params': other_param_group}
]