mindspore.nn.DistributedGradReducer

class mindspore.nn.DistributedGradReducer(parameters, mean=True, degree=None, fusion_type=1)[source]

A distributed optimizer.

Constructs a gradient reducer Cell, which applies communication and average operations on single-process gradient values.

Parameters
  • parameters (list) – the parameters to be updated.

  • mean (bool) – When mean is true, the mean coefficient (degree) would apply on gradients. Default: False.

  • degree (int) – The mean coefficient. Usually it equals to device number. Default: None.

  • fusion_type (int) – The type of all reduce fusion. Default: 1.

Raises

ValueError – If degree is not a int or less than 0.

Supported Platforms:

Ascend, GPU

Examples

>>> # This example should be run with multiple processes.
>>> # Please refer to the tutorial > Distributed Training on mindspore.cn.
>>> import numpy as np
>>> from mindspore.communication import init
>>> from mindspore.ops import composite as C
>>> from mindspore.ops import operations as P
>>> from mindspore.ops import functional as F
>>> from mindspore import context
>>> from mindspore.context import ParallelMode
>>> from mindspore import Parameter, Tensor
>>> from mindspore import nn
>>> from mindspore.nn.wrap.cell_wrapper import WithLossCell
>>> from mindspore.parallel._utils import (_get_device_num, _get_gradients_mean)
>>>
>>> context.set_context(mode=context.GRAPH_MODE)
>>> init()
>>> context.reset_auto_parallel_context()
>>> context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
>>>
>>> class TrainingWrapper(nn.Cell):
...     def __init__(self, network, optimizer, sens=1.0):
...         super(TrainingWrapper, self).__init__(auto_prefix=False)
...         self.network = network
...         self.network.add_flags(defer_inline=True)
...         self.weights = optimizer.parameters
...         self.optimizer = optimizer
...         self.grad = C.GradOperation(get_by_list=True, sens_param=True)
...         self.sens = sens
...         self.reducer_flag = False
...         self.grad_reducer = None
...         self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
...         if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
...             self.reducer_flag = True
...         if self.reducer_flag:
...             mean = _get_gradients_mean()
...             degree = _get_device_num()
...             self.grad_reducer = nn.DistributedGradReducer(optimizer.parameters, mean, degree)
...
...     def construct(self, *args):
...         weights = self.weights
...         loss = self.network(*args)
...         sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
...         grads = self.grad(self.network, weights)(*args, sens)
...         if self.reducer_flag:
...             # apply grad reducer on grads
...             grads = self.grad_reducer(grads)
...         return F.depend(loss, self.optimizer(grads))
>>>
>>> class Net(nn.Cell):
...     def __init__(self, in_features, out_features):
...         super(Net, self).__init__()
...         self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
...                                 name='weight')
...         self.matmul = P.MatMul()
...
...     def construct(self, x):
...         output = self.matmul(x, self.weight)
...         return output
>>>
>>> size, in_features, out_features = 16, 16, 10
>>> network = Net(in_features, out_features)
>>> loss = nn.MSELoss()
>>> net_with_loss = WithLossCell(network, loss)
>>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> train_cell = TrainingWrapper(net_with_loss, optimizer)
>>> inputs = Tensor(np.ones([size, in_features]).astype(np.float32))
>>> label = Tensor(np.zeros([size, out_features]).astype(np.float32))
>>> grads = train_cell(inputs, label)
>>> print(grads)
256.0
construct(grads)[source]

Under certain circumstances, the data precision of grads could be mixed with float16 and float32. Thus, the result of AllReduce is unreliable. To solve the problem, grads must be cast to float32 before AllReduce, and cast back after the operation.

Parameters

grads (Union[Tensor, tuple[Tensor]]) – The gradient tensor or tuple before operation.

Returns

new_grads (Union[Tensor, tuple[Tensor]]), the gradient tensor or tuple after operation.