fix: only fp16 evaluation is supported when ampO2 is enabled

pull/1824/head
gaotingquan 2022-04-10 13:38:16 +00:00
parent b761325faa
commit efde56ffc6
No known key found for this signature in database
GPG Key ID: F3EF7F42536A30B7
3 changed files with 5 additions and 1 deletions

View File

@ -24,6 +24,8 @@ AMP:
use_dynamic_loss_scaling: True
# O2: pure fp16
level: O2
# only FP16 evaluation is supported when AMP O2 is enabled
use_fp16_test: True
# model architecture
Arch:

View File

@ -38,6 +38,8 @@ AMP:
use_dynamic_loss_scaling: True
# O2: pure fp16
level: O2
# only FP16 evaluation is supported when AMP O2 is enabled
use_fp16_test: True
Optimizer:
name: Momentum

View File

@ -53,7 +53,7 @@ def classification_eval(engine, epoch_id=0):
]
time_info["reader_cost"].update(time.time() - tic)
batch_size = batch[0].shape[0]
batch[0] = paddle.to_tensor(batch[0]).astype("float32")
batch[0] = paddle.to_tensor(batch[0])
if not engine.config["Global"].get("use_multilabel", False):
batch[1] = batch[1].reshape([-1, 1]).astype("int64")