mirror of
https://github.com/open-mmlab/mmdeploy.git
synced 2025-01-14 08:09:43 +08:00
[Fix] fix torch allocator resouce releasing (#1708)
* delete root logger and add condition before calling caching_allocator_delete * fix lint error * use torch._C._cuda_cudaCachingAllocator_raw_delete
This commit is contained in:
parent
b85f34141b
commit
12b3d18c7a
@ -13,6 +13,7 @@ class TorchAllocator(trt.IGpuAllocator):
|
|||||||
|
|
||||||
self.device_id = device_id
|
self.device_id = device_id
|
||||||
self.mems = set()
|
self.mems = set()
|
||||||
|
self.caching_delete = torch._C._cuda_cudaCachingAllocator_raw_delete
|
||||||
|
|
||||||
def __del__(self):
|
def __del__(self):
|
||||||
"""destructor."""
|
"""destructor."""
|
||||||
@ -53,11 +54,9 @@ class TorchAllocator(trt.IGpuAllocator):
|
|||||||
Returns:
|
Returns:
|
||||||
bool: deallocate success.
|
bool: deallocate success.
|
||||||
"""
|
"""
|
||||||
logger = get_root_logger()
|
|
||||||
logger.debug(f'deallocate {memory} with TorchAllocator.')
|
|
||||||
if memory not in self.mems:
|
if memory not in self.mems:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
torch.cuda.caching_allocator_delete(memory)
|
self.caching_delete(memory)
|
||||||
self.mems.discard(memory)
|
self.mems.discard(memory)
|
||||||
return True
|
return True
|
||||||
|
Loading…
x
Reference in New Issue
Block a user