Fix torch multi-GPU --device error (#1701)
* Fix torch GPU error * Update torch_utils.py single-line device = Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>pull/1705/head
parent
69ea70cd3b
commit
035ac82ed0
|
@ -75,13 +75,14 @@ def time_synchronized():
|
|||
return time.time()
|
||||
|
||||
|
||||
def profile(x, ops, n=100, device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
|
||||
def profile(x, ops, n=100, device=None):
|
||||
# profile a pytorch module or list of modules. Example usage:
|
||||
# x = torch.randn(16, 3, 640, 640) # input
|
||||
# m1 = lambda x: x * torch.sigmoid(x)
|
||||
# m2 = nn.SiLU()
|
||||
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations
|
||||
|
||||
|
||||
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
||||
x = x.to(device)
|
||||
x.requires_grad = True
|
||||
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
|
||||
|
|
Loading…
Reference in New Issue