fix: only fp16 evaluation is supported when ampO2 is enabled
parent
b761325faa
commit
efde56ffc6
|
@ -24,6 +24,8 @@ AMP:
|
||||||
use_dynamic_loss_scaling: True
|
use_dynamic_loss_scaling: True
|
||||||
# O2: pure fp16
|
# O2: pure fp16
|
||||||
level: O2
|
level: O2
|
||||||
|
# only FP16 evaluation is supported when AMP O2 is enabled
|
||||||
|
use_fp16_test: True
|
||||||
|
|
||||||
# model architecture
|
# model architecture
|
||||||
Arch:
|
Arch:
|
||||||
|
|
|
@ -38,6 +38,8 @@ AMP:
|
||||||
use_dynamic_loss_scaling: True
|
use_dynamic_loss_scaling: True
|
||||||
# O2: pure fp16
|
# O2: pure fp16
|
||||||
level: O2
|
level: O2
|
||||||
|
# only FP16 evaluation is supported when AMP O2 is enabled
|
||||||
|
use_fp16_test: True
|
||||||
|
|
||||||
Optimizer:
|
Optimizer:
|
||||||
name: Momentum
|
name: Momentum
|
||||||
|
|
|
@ -53,7 +53,7 @@ def classification_eval(engine, epoch_id=0):
|
||||||
]
|
]
|
||||||
time_info["reader_cost"].update(time.time() - tic)
|
time_info["reader_cost"].update(time.time() - tic)
|
||||||
batch_size = batch[0].shape[0]
|
batch_size = batch[0].shape[0]
|
||||||
batch[0] = paddle.to_tensor(batch[0]).astype("float32")
|
batch[0] = paddle.to_tensor(batch[0])
|
||||||
if not engine.config["Global"].get("use_multilabel", False):
|
if not engine.config["Global"].get("use_multilabel", False):
|
||||||
batch[1] = batch[1].reshape([-1, 1]).astype("int64")
|
batch[1] = batch[1].reshape([-1, 1]).astype("int64")
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue