119 lines
4.5 KiB
Python
119 lines
4.5 KiB
Python
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||
|
#
|
||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||
|
# you may not use this file except in compliance with the License.
|
||
|
# You may obtain a copy of the License at
|
||
|
#
|
||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||
|
#
|
||
|
# Unless required by applicable law or agreed to in writing, software
|
||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
# See the License for the specific language governing permissions and
|
||
|
# limitations under the License.
|
||
|
from __future__ import absolute_import
|
||
|
from __future__ import division
|
||
|
from __future__ import print_function
|
||
|
import os
|
||
|
import sys
|
||
|
import time
|
||
|
import platform
|
||
|
import paddle
|
||
|
|
||
|
__dir__ = os.path.dirname(os.path.abspath(__file__))
|
||
|
sys.path.append(os.path.abspath(os.path.join(__dir__, '../../../')))
|
||
|
from ppcls.utils.misc import AverageMeter
|
||
|
from ppcls.utils import logger
|
||
|
|
||
|
|
||
|
def classification_eval(evaler, epoch_id=0):
|
||
|
output_info = dict()
|
||
|
time_info = {
|
||
|
"batch_cost": AverageMeter(
|
||
|
"batch_cost", '.5f', postfix=" s,"),
|
||
|
"reader_cost": AverageMeter(
|
||
|
"reader_cost", ".5f", postfix=" s,"),
|
||
|
}
|
||
|
print_batch_step = evaler.config["Global"]["print_batch_step"]
|
||
|
|
||
|
metric_key = None
|
||
|
tic = time.time()
|
||
|
eval_dataloader = evaler.eval_dataloader if evaler.use_dali else evaler.eval_dataloader(
|
||
|
)
|
||
|
max_iter = len(evaler.eval_dataloader) - 1 if platform.system(
|
||
|
) == "Windows" else len(evaler.eval_dataloader)
|
||
|
for iter_id, batch in enumerate(eval_dataloader):
|
||
|
if iter_id >= max_iter:
|
||
|
break
|
||
|
if iter_id == 5:
|
||
|
for key in time_info:
|
||
|
time_info[key].reset()
|
||
|
if evaler.use_dali:
|
||
|
batch = [
|
||
|
paddle.to_tensor(batch[0]['data']),
|
||
|
paddle.to_tensor(batch[0]['label'])
|
||
|
]
|
||
|
time_info["reader_cost"].update(time.time() - tic)
|
||
|
batch_size = batch[0].shape[0]
|
||
|
batch[0] = paddle.to_tensor(batch[0]).astype("float32")
|
||
|
batch[1] = batch[1].reshape([-1, 1]).astype("int64")
|
||
|
# image input
|
||
|
out = evaler.model(batch[0])
|
||
|
# calc loss
|
||
|
if evaler.eval_loss_func is not None:
|
||
|
loss_dict = evaler.eval_loss_func(out, batch[-1])
|
||
|
for key in loss_dict:
|
||
|
if key not in output_info:
|
||
|
output_info[key] = AverageMeter(key, '7.5f')
|
||
|
output_info[key].update(loss_dict[key].numpy()[0], batch_size)
|
||
|
# calc metric
|
||
|
if evaler.eval_metric_func is not None:
|
||
|
metric_dict = evaler.eval_metric_func(out, batch[-1])
|
||
|
if paddle.distributed.get_world_size() > 1:
|
||
|
for key in metric_dict:
|
||
|
paddle.distributed.all_reduce(
|
||
|
metric_dict[key], op=paddle.distributed.ReduceOp.SUM)
|
||
|
metric_dict[key] = metric_dict[
|
||
|
key] / paddle.distributed.get_world_size()
|
||
|
for key in metric_dict:
|
||
|
if metric_key is None:
|
||
|
metric_key = key
|
||
|
if key not in output_info:
|
||
|
output_info[key] = AverageMeter(key, '7.5f')
|
||
|
|
||
|
output_info[key].update(metric_dict[key].numpy()[0],
|
||
|
batch_size)
|
||
|
|
||
|
time_info["batch_cost"].update(time.time() - tic)
|
||
|
|
||
|
if iter_id % print_batch_step == 0:
|
||
|
time_msg = "s, ".join([
|
||
|
"{}: {:.5f}".format(key, time_info[key].avg)
|
||
|
for key in time_info
|
||
|
])
|
||
|
|
||
|
ips_msg = "ips: {:.5f} images/sec".format(
|
||
|
batch_size / time_info["batch_cost"].avg)
|
||
|
|
||
|
metric_msg = ", ".join([
|
||
|
"{}: {:.5f}".format(key, output_info[key].val)
|
||
|
for key in output_info
|
||
|
])
|
||
|
logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format(
|
||
|
epoch_id, iter_id,
|
||
|
len(evaler.eval_dataloader), metric_msg, time_msg, ips_msg))
|
||
|
|
||
|
tic = time.time()
|
||
|
if evaler.use_dali:
|
||
|
evaler.eval_dataloader.reset()
|
||
|
metric_msg = ", ".join([
|
||
|
"{}: {:.5f}".format(key, output_info[key].avg) for key in output_info
|
||
|
])
|
||
|
logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))
|
||
|
|
||
|
# do not try to save best eval.model
|
||
|
if evaler.eval_metric_func is None:
|
||
|
return -1
|
||
|
# return 1st metric in the dict
|
||
|
return output_info[metric_key].avg
|