50 lines
1.6 KiB
Python
50 lines
1.6 KiB
Python
from collections import OrderedDict
|
|
|
|
import torch.distributed as dist
|
|
from torch._utils import (_flatten_dense_tensors, _take_tensors,
|
|
_unflatten_dense_tensors)
|
|
|
|
|
|
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
|
|
if bucket_size_mb > 0:
|
|
bucket_size_bytes = bucket_size_mb * 1024 * 1024
|
|
buckets = _take_tensors(tensors, bucket_size_bytes)
|
|
else:
|
|
buckets = OrderedDict()
|
|
for tensor in tensors:
|
|
tp = tensor.type()
|
|
if tp not in buckets:
|
|
buckets[tp] = []
|
|
buckets[tp].append(tensor)
|
|
buckets = buckets.values()
|
|
|
|
for bucket in buckets:
|
|
flat_tensors = _flatten_dense_tensors(bucket)
|
|
dist.all_reduce(flat_tensors)
|
|
flat_tensors.div_(world_size)
|
|
for tensor, synced in zip(
|
|
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
|
|
tensor.copy_(synced)
|
|
|
|
|
|
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
|
|
"""Allreduce gradients.
|
|
|
|
Args:
|
|
params (list[torch.Parameters]): List of parameters of a model
|
|
coalesce (bool, optional): Whether allreduce parameters as a whole.
|
|
Defaults to True.
|
|
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
|
|
Defaults to -1.
|
|
"""
|
|
grads = [
|
|
param.grad.data for param in params
|
|
if param.requires_grad and param.grad is not None
|
|
]
|
|
world_size = dist.get_world_size()
|
|
if coalesce:
|
|
_allreduce_coalesced(grads, world_size, bucket_size_mb)
|
|
else:
|
|
for tensor in grads:
|
|
dist.all_reduce(tensor.div_(world_size))
|