[Fix] fix torch allocator resouce releasing (#1708)

* delete root logger and add condition before calling caching_allocator_delete

* fix lint error

* use torch._C._cuda_cudaCachingAllocator_raw_delete
This commit is contained in:
AllentDan 2023-02-06 11:35:44 +08:00 committed by GitHub
parent b85f34141b
commit 12b3d18c7a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -13,6 +13,7 @@ class TorchAllocator(trt.IGpuAllocator):
self.device_id = device_id
self.mems = set()
self.caching_delete = torch._C._cuda_cudaCachingAllocator_raw_delete
def __del__(self):
"""destructor."""
@ -53,11 +54,9 @@ class TorchAllocator(trt.IGpuAllocator):
Returns:
bool: deallocate success.
"""
logger = get_root_logger()
logger.debug(f'deallocate {memory} with TorchAllocator.')
if memory not in self.mems:
return False
torch.cuda.caching_allocator_delete(memory)
self.caching_delete(memory)
self.mems.discard(memory)
return True