update print
parent
dac3ca1486
commit
1702e018e4
|
@ -162,11 +162,10 @@ def main():
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
best_rank1 = -np.inf
|
best_rank1 = -np.inf
|
||||||
best_epoch = 0
|
best_epoch = 0
|
||||||
|
print("==> Start training")
|
||||||
|
|
||||||
for epoch in range(start_epoch, args.max_epoch):
|
for epoch in range(start_epoch, args.max_epoch):
|
||||||
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
|
train(epoch, model, criterion_xent, criterion_cent, optimizer_model, optimizer_cent, trainloader, use_gpu)
|
||||||
|
|
||||||
train(model, criterion_xent, criterion_cent, optimizer_model, optimizer_cent, trainloader, use_gpu)
|
|
||||||
|
|
||||||
if args.stepsize > 0: scheduler.step()
|
if args.stepsize > 0: scheduler.step()
|
||||||
|
|
||||||
|
@ -194,7 +193,7 @@ def main():
|
||||||
elapsed = str(datetime.timedelta(seconds=elapsed))
|
elapsed = str(datetime.timedelta(seconds=elapsed))
|
||||||
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
||||||
|
|
||||||
def train(model, criterion_xent, criterion_cent, optimizer_model, optimizer_cent, trainloader, use_gpu):
|
def train(epoch, model, criterion_xent, criterion_cent, optimizer_model, optimizer_cent, trainloader, use_gpu):
|
||||||
model.train()
|
model.train()
|
||||||
losses = AverageMeter()
|
losses = AverageMeter()
|
||||||
|
|
||||||
|
@ -216,7 +215,9 @@ def train(model, criterion_xent, criterion_cent, optimizer_model, optimizer_cent
|
||||||
losses.update(loss.item(), pids.size(0))
|
losses.update(loss.item(), pids.size(0))
|
||||||
|
|
||||||
if (batch_idx+1) % args.print_freq == 0:
|
if (batch_idx+1) % args.print_freq == 0:
|
||||||
print("Batch {}/{}\t Loss {:.6f} ({:.6f})".format(batch_idx+1, len(trainloader), losses.val, losses.avg))
|
print("Epoch {}/{}\t Batch {}/{}\t Loss {:.6f} ({:.6f})".format(
|
||||||
|
epoch+1, args.max_epoch, batch_idx+1, len(trainloader), losses.val, losses.avg
|
||||||
|
))
|
||||||
|
|
||||||
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
|
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
|
||||||
model.eval()
|
model.eval()
|
||||||
|
|
|
@ -157,11 +157,10 @@ def main():
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
best_rank1 = -np.inf
|
best_rank1 = -np.inf
|
||||||
best_epoch = 0
|
best_epoch = 0
|
||||||
|
print("==> Start training")
|
||||||
|
|
||||||
for epoch in range(start_epoch, args.max_epoch):
|
for epoch in range(start_epoch, args.max_epoch):
|
||||||
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
|
train(epoch, model, criterion, optimizer, trainloader, use_gpu)
|
||||||
|
|
||||||
train(model, criterion, optimizer, trainloader, use_gpu)
|
|
||||||
|
|
||||||
if args.stepsize > 0: scheduler.step()
|
if args.stepsize > 0: scheduler.step()
|
||||||
|
|
||||||
|
@ -189,7 +188,7 @@ def main():
|
||||||
elapsed = str(datetime.timedelta(seconds=elapsed))
|
elapsed = str(datetime.timedelta(seconds=elapsed))
|
||||||
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
||||||
|
|
||||||
def train(model, criterion, optimizer, trainloader, use_gpu):
|
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
|
||||||
model.train()
|
model.train()
|
||||||
losses = AverageMeter()
|
losses = AverageMeter()
|
||||||
|
|
||||||
|
@ -207,7 +206,9 @@ def train(model, criterion, optimizer, trainloader, use_gpu):
|
||||||
losses.update(loss.item(), pids.size(0))
|
losses.update(loss.item(), pids.size(0))
|
||||||
|
|
||||||
if (batch_idx+1) % args.print_freq == 0:
|
if (batch_idx+1) % args.print_freq == 0:
|
||||||
print("Batch {}/{}\t Loss {:.6f} ({:.6f})".format(batch_idx+1, len(trainloader), losses.val, losses.avg))
|
print("Epoch {}/{}\t Batch {}/{}\t Loss {:.6f} ({:.6f})".format(
|
||||||
|
epoch+1, args.max_epoch, batch_idx+1, len(trainloader), losses.val, losses.avg
|
||||||
|
))
|
||||||
|
|
||||||
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
|
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
|
||||||
model.eval()
|
model.eval()
|
||||||
|
|
|
@ -166,11 +166,10 @@ def main():
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
best_rank1 = -np.inf
|
best_rank1 = -np.inf
|
||||||
best_epoch = 0
|
best_epoch = 0
|
||||||
|
print("==> Start training")
|
||||||
|
|
||||||
for epoch in range(start_epoch, args.max_epoch):
|
for epoch in range(start_epoch, args.max_epoch):
|
||||||
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
|
train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
|
||||||
|
|
||||||
train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
|
|
||||||
|
|
||||||
if args.stepsize > 0: scheduler.step()
|
if args.stepsize > 0: scheduler.step()
|
||||||
|
|
||||||
|
@ -198,7 +197,7 @@ def main():
|
||||||
elapsed = str(datetime.timedelta(seconds=elapsed))
|
elapsed = str(datetime.timedelta(seconds=elapsed))
|
||||||
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
||||||
|
|
||||||
def train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
|
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
|
||||||
model.train()
|
model.train()
|
||||||
losses = AverageMeter()
|
losses = AverageMeter()
|
||||||
|
|
||||||
|
@ -229,7 +228,9 @@ def train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu
|
||||||
losses.update(loss.item(), pids.size(0))
|
losses.update(loss.item(), pids.size(0))
|
||||||
|
|
||||||
if (batch_idx+1) % args.print_freq == 0:
|
if (batch_idx+1) % args.print_freq == 0:
|
||||||
print("Batch {}/{}\t Loss {:.6f} ({:.6f})".format(batch_idx+1, len(trainloader), losses.val, losses.avg))
|
print("Epoch {}/{}\t Batch {}/{}\t Loss {:.6f} ({:.6f})".format(
|
||||||
|
epoch+1, args.max_epoch, batch_idx+1, len(trainloader), losses.val, losses.avg
|
||||||
|
))
|
||||||
|
|
||||||
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
|
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
|
||||||
model.eval()
|
model.eval()
|
||||||
|
|
|
@ -154,11 +154,10 @@ def main():
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
best_rank1 = -np.inf
|
best_rank1 = -np.inf
|
||||||
best_epoch = 0
|
best_epoch = 0
|
||||||
|
print("==> Start training")
|
||||||
|
|
||||||
for epoch in range(start_epoch, args.max_epoch):
|
for epoch in range(start_epoch, args.max_epoch):
|
||||||
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
|
train(epoch, model, criterion, optimizer, trainloader, use_gpu)
|
||||||
|
|
||||||
train(model, criterion, optimizer, trainloader, use_gpu)
|
|
||||||
|
|
||||||
if args.stepsize > 0: scheduler.step()
|
if args.stepsize > 0: scheduler.step()
|
||||||
|
|
||||||
|
@ -186,7 +185,7 @@ def main():
|
||||||
elapsed = str(datetime.timedelta(seconds=elapsed))
|
elapsed = str(datetime.timedelta(seconds=elapsed))
|
||||||
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
||||||
|
|
||||||
def train(model, criterion, optimizer, trainloader, use_gpu):
|
def train(epoch, model, criterion, optimizer, trainloader, use_gpu):
|
||||||
model.train()
|
model.train()
|
||||||
losses = AverageMeter()
|
losses = AverageMeter()
|
||||||
|
|
||||||
|
@ -201,7 +200,9 @@ def train(model, criterion, optimizer, trainloader, use_gpu):
|
||||||
losses.update(loss.item(), pids.size(0))
|
losses.update(loss.item(), pids.size(0))
|
||||||
|
|
||||||
if (batch_idx+1) % args.print_freq == 0:
|
if (batch_idx+1) % args.print_freq == 0:
|
||||||
print("Batch {}/{}\t Loss {:.6f} ({:.6f})".format(batch_idx+1, len(trainloader), losses.val, losses.avg))
|
print("Epoch {}/{}\t Batch {}/{}\t Loss {:.6f} ({:.6f})".format(
|
||||||
|
epoch+1, args.max_epoch, batch_idx+1, len(trainloader), losses.val, losses.avg
|
||||||
|
))
|
||||||
|
|
||||||
def test(model, queryloader, galleryloader, pool, use_gpu, ranks=[1, 5, 10, 20]):
|
def test(model, queryloader, galleryloader, pool, use_gpu, ranks=[1, 5, 10, 20]):
|
||||||
model.eval()
|
model.eval()
|
||||||
|
|
|
@ -140,7 +140,7 @@ def main():
|
||||||
|
|
||||||
criterion_xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu)
|
criterion_xent = CrossEntropyLabelSmooth(num_classes=dataset.num_train_pids, use_gpu=use_gpu)
|
||||||
criterion_htri = TripletLoss(margin=args.margin)
|
criterion_htri = TripletLoss(margin=args.margin)
|
||||||
|
|
||||||
optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
|
optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
|
||||||
if args.stepsize > 0:
|
if args.stepsize > 0:
|
||||||
scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
|
scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)
|
||||||
|
@ -163,11 +163,10 @@ def main():
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
best_rank1 = -np.inf
|
best_rank1 = -np.inf
|
||||||
best_epoch = 0
|
best_epoch = 0
|
||||||
|
print("==> Start training")
|
||||||
|
|
||||||
for epoch in range(start_epoch, args.max_epoch):
|
for epoch in range(start_epoch, args.max_epoch):
|
||||||
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
|
train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
|
||||||
|
|
||||||
train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
|
|
||||||
|
|
||||||
if args.stepsize > 0: scheduler.step()
|
if args.stepsize > 0: scheduler.step()
|
||||||
|
|
||||||
|
@ -195,7 +194,7 @@ def main():
|
||||||
elapsed = str(datetime.timedelta(seconds=elapsed))
|
elapsed = str(datetime.timedelta(seconds=elapsed))
|
||||||
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
|
||||||
|
|
||||||
def train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
|
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
|
||||||
model.train()
|
model.train()
|
||||||
losses = AverageMeter()
|
losses = AverageMeter()
|
||||||
|
|
||||||
|
@ -217,7 +216,9 @@ def train(model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu
|
||||||
losses.update(loss.item(), pids.size(0))
|
losses.update(loss.item(), pids.size(0))
|
||||||
|
|
||||||
if (batch_idx+1) % args.print_freq == 0:
|
if (batch_idx+1) % args.print_freq == 0:
|
||||||
print("Batch {}/{}\t Loss {:.6f} ({:.6f})".format(batch_idx+1, len(trainloader), losses.val, losses.avg))
|
print("Epoch {}/{}\t Batch {}/{}\t Loss {:.6f} ({:.6f})".format(
|
||||||
|
epoch+1, args.max_epoch, batch_idx+1, len(trainloader), losses.val, losses.avg
|
||||||
|
))
|
||||||
|
|
||||||
def test(model, queryloader, galleryloader, pool, use_gpu, ranks=[1, 5, 10, 20]):
|
def test(model, queryloader, galleryloader, pool, use_gpu, ranks=[1, 5, 10, 20]):
|
||||||
model.eval()
|
model.eval()
|
||||||
|
|
Loading…
Reference in New Issue