From 12b3d18c7aa41d5573930d6fe7b4a935d2bd2f53 Mon Sep 17 00:00:00 2001 From: AllentDan <41138331+AllentDan@users.noreply.github.com> Date: Mon, 6 Feb 2023 11:35:44 +0800 Subject: [PATCH] [Fix] fix torch allocator resouce releasing (#1708) * delete root logger and add condition before calling caching_allocator_delete * fix lint error * use torch._C._cuda_cudaCachingAllocator_raw_delete --- mmdeploy/backend/tensorrt/torch_allocator.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mmdeploy/backend/tensorrt/torch_allocator.py b/mmdeploy/backend/tensorrt/torch_allocator.py index 7b8b0ee58..47e7d5a59 100644 --- a/mmdeploy/backend/tensorrt/torch_allocator.py +++ b/mmdeploy/backend/tensorrt/torch_allocator.py @@ -13,6 +13,7 @@ class TorchAllocator(trt.IGpuAllocator): self.device_id = device_id self.mems = set() + self.caching_delete = torch._C._cuda_cudaCachingAllocator_raw_delete def __del__(self): """destructor.""" @@ -53,11 +54,9 @@ class TorchAllocator(trt.IGpuAllocator): Returns: bool: deallocate success. """ - logger = get_root_logger() - logger.debug(f'deallocate {memory} with TorchAllocator.') if memory not in self.mems: return False - torch.cuda.caching_allocator_delete(memory) + self.caching_delete(memory) self.mems.discard(memory) return True