log_train.txt => train.log

pull/133/head
KaiyangZhou 2019-03-12 16:56:36 +00:00
parent 850da2bbed
commit ee31aa73c8
4 changed files with 8 additions and 8 deletions

View File

@ -41,7 +41,7 @@ def main():
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
log_name = 'test.log' if args.evaluate else 'train.log'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
@ -168,7 +168,7 @@ def train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=Fals
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader),

View File

@ -42,7 +42,7 @@ def main():
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
log_name = 'test.log' if args.evaluate else 'train.log'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
@ -179,7 +179,7 @@ def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Xent {xent.val:.4f} ({xent.avg:.4f})\t'
'Htri {htri.val:.4f} ({htri.avg:.4f})\t'
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(

View File

@ -42,7 +42,7 @@ def main():
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
log_name = 'test.log' if args.evaluate else 'train.log'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
@ -169,7 +169,7 @@ def train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=Fals
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader),

View File

@ -43,7 +43,7 @@ def main():
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
log_name = 'test.log' if args.evaluate else 'train.log'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
@ -180,7 +180,7 @@ def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Xent {xent.val:.4f} ({xent.avg:.4f})\t'
'Htri {htri.val:.4f} ({htri.avg:.4f})\t'
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(