diff --git a/deploy/utils/predictor.py b/deploy/utils/predictor.py index b05e28c2f..04f9eab7a 100644 --- a/deploy/utils/predictor.py +++ b/deploy/utils/predictor.py @@ -74,6 +74,22 @@ class Predictor(object): config.enable_xpu() elif args.get("use_mlu", False): config.enable_custom_device('mlu') + elif args.get("use_gcu", False): + assert paddle.device.is_compiled_with_custom_device("gcu"), ( + "Config use_gcu cannot be set as True while your paddle " + "is not compiled with gcu! \nPlease try: \n" + "\t1. Install paddle-custom-gcu to run model on GCU. \n" + "\t2. Set use_gcu as False in config file to run model on CPU." + ) + import paddle_custom_device.gcu.passes as gcu_passes + gcu_passes.setUp() + config.enable_custom_device("gcu") + config.enable_new_ir(True) + config.enable_new_executor(True) + kPirGcuPasses = gcu_passes.inference_passes( + use_pir=True, name="PaddleClas" + ) + config.enable_custom_passes(kPirGcuPasses, True) else: config.disable_gpu() if args.enable_mkldnn: diff --git a/ppcls/engine/engine.py b/ppcls/engine/engine.py index 7a5adc2cd..1bc54249b 100644 --- a/ppcls/engine/engine.py +++ b/ppcls/engine/engine.py @@ -105,7 +105,7 @@ class Engine(object): # set device assert self.config["Global"]["device"] in [ - "cpu", "gpu", "xpu", "npu", "mlu", "ascend", "intel_gpu", "mps" + "cpu", "gpu", "xpu", "npu", "mlu", "ascend", "intel_gpu", "mps", "gcu" ] self.device = paddle.set_device(self.config["Global"]["device"]) logger.info('train with paddle {} and device {}'.format( diff --git a/ppcls/static/train.py b/ppcls/static/train.py index 4f85fe16f..a7a5c4af5 100755 --- a/ppcls/static/train.py +++ b/ppcls/static/train.py @@ -92,7 +92,7 @@ def main(args): # assign the device assert global_config["device"] in [ - "cpu", "gpu", "xpu", "npu", "mlu", "ascend", "intel_gpu", "mps" + "cpu", "gpu", "xpu", "npu", "mlu", "ascend", "intel_gpu", "mps", "gcu" ] device = paddle.set_device(global_config["device"])