# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators definition generated by gen_ops.py, includes functions."""
from .gen_ops_prim import *
from .pyboost_inner_prim import *
from mindspore.ops.operations.manually_defined.ops_def import *
from mindspore.ops._primitive_cache import _get_cache_prim
[文档]def abs(input):
r"""
Returns absolute value of a tensor element-wise.
.. math::
out_i = |input_i|
Args:
input (Tensor): The input tensor. The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
>>> output = ops.abs(input)
>>> print(output)
[1. 1. 0.]
"""
return abs_op(input)
[文档]def acos_ext(input):
r"""
Computes arccosine of input tensors element-wise.
.. math::
out_i = \cos^{-1}(input_i)
Args:
input (Tensor): The shape of tensor is
:math:`(N,*)`, where :math:`*` means any number of additional dimensions.
Returns:
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
>>> output = ops.acos_ext(input)
>>> print(output)
[0.7377037 1.5307857 1.2661037 0.9764114]
"""
return acos_ext_op(input)
[文档]def acos(input):
r"""
Computes arccosine of input tensors element-wise.
.. math::
out_i = \cos^{-1}(input_i)
Args:
input (Tensor): The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape and dtype as `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
>>> output = ops.acos(input)
>>> print(output)
[0.737726 1.5307857 1.2661036 0.9764105]
"""
return acos_op(input)
[文档]def acosh_ext(input):
r"""
Computes inverse hyperbolic cosine of the inputs element-wise.
.. math::
out_i = \cosh^{-1}(input_i)
.. note::
Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
Input range is [1, inf].
Args:
input (Tensor): The input tensor of inverse hyperbolic cosine function.
Returns:
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
>>> output = ops.acosh_ext(input)
>>> print(output)
[0. 0.9624236 1.7627472 5.298292 ]
"""
return acosh_ext_op(input)
[文档]def acosh(input):
r"""
Computes inverse hyperbolic cosine of the inputs element-wise.
.. math::
out_i = \cosh^{-1}(input_i)
.. note::
Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
Input range is [1, inf].
Args:
input (Tensor): The input tensor of inverse hyperbolic cosine function.
Returns:
Tensor, has the same shape and type as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
>>> output = ops.acosh(input)
>>> print(output)
[0. 0.9624237 1.7627472 5.298292 ]
"""
return acosh_op(input)
def adaptive_avg_pool1d(input, output_size):
r"""
Performs 1D adaptive average pooling on a multi-plane input signal.
That is, for any input size, the size of the specified output is L.
The number of output features is equal to the number of input features.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input of adaptive_avg_pool1d, which is a 2D or 3D tensor,
with float16 or float32 data type.
output_size (int): The target output feature size. `output_size` is an integer.
Returns:
Tensor, with the same type as the `input`.
Shape of the output is `input_shape[:len(input_shape) - 1] + [output_size]`.
Raises:
ValueError: If `output_size` is not integer.
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not float16, float32.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, mint
>>> input = Tensor([[2,3],[3,4]],dtype=mindspore.float16)
>>> output = mint.nn.functional.adaptive_avg_pool1d(input, 3)
>>> print(output)
[[2. 2.5 3. ]
[3. 3.5 4. ]]
"""
return adaptive_avg_pool1d_op(input, output_size)
def adaptive_avg_pool2d_grad_ext(grad_output, x):
r"""
"""
return adaptive_avg_pool2d_grad_ext_op(grad_output, x)
def add_ext(input, other, alpha=1):
r"""
Adds scaled other value to input Tensor.
.. math::
out_{i} = input_{i} + alpha \times other_{i}
Note:
- When the two inputs have different shapes,
they must be able to broadcast to a common shape.
- The two inputs and alpha comply with the implicit type conversion rules to make the data types
consistent.
Args:
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
alpha (number.Number): A scaling factor applied to `other`, default 1.
Returns:
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
and the data type is the one with higher precision or higher digits among the two inputs and alpha.
Raises:
TypeError: If the type of `input`, `other`, or `alpha` is not one of the following: Tensor, number.Number, bool.
TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> x = Tensor(1, mindspore.int32)
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
>>> alpha = 0.5
>>> output = ops.auto_generate.add_ext(x, y, alpha)
>>> print(output)
[3. 3.5 4.]
>>> # the data type of x is int32, the data type of y is float32,
>>> # alpha is a float, and the output is the data format of higher precision float32.
>>> print(output.dtype)
Float32
"""
return add_ext_op(input, other, alpha)
def add_layer_norm_grad(dy, x1, x2, rstd, mean, gamma, dsumOptional):
r"""
"""
return add_layer_norm_grad_op(dy, x1, x2, rstd, mean, gamma, dsumOptional)
def add_layernorm_v2(x1, x2, gamma, beta, epsilon=1e-5, additionalOut=False):
r"""
"""
return add_layernorm_v2_op(x1, x2, gamma, beta, epsilon, additionalOut)
[文档]def add(input, other):
r"""
Adds other value to input Tensor.
.. math::
out_{i} = input_{i} + other_{i}
Note:
- When the two inputs have different shapes,
they must be able to broadcast to a common shape.
- The two inputs can not be bool type at the same time,
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
- The two inputs comply with the implicit type conversion rules to make the data types
consistent.
- When the input is a Tensor, the dimension should be greater than or equal to 1.
Args:
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
Returns:
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
and the data type is the one with higher precision or higher digits among the two inputs.
Raises:
TypeError: If `input` and `other` is not one of the following: Tensor, number.Number, bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> # case 1: x and y are both Tensor.
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
>>> output = ops.add(x, y)
>>> print(output)
[5. 7. 9.]
>>> # case 2: x is a scalar and y is a Tensor
>>> x = Tensor(1, mindspore.int32)
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
>>> output = ops.add(x, y)
>>> print(output)
[5. 6. 7.]
>>> # the data type of x is int32, the data type of y is float32,
>>> # and the output is the data format of higher precision float32.
>>> print(output.dtype)
Float32
"""
return add_op(input, other)
def add_rms_norm(x1, x2, gamma, epsilon=1e-6):
r"""
The AddRmsNorm is a fusion operator that fusing RmsNorm and its preceding Add operator, reducing the time for
moving data in and out.
It computes the following expression:
.. math::
\begin{array}{ll} \\
x_i = x1_i + x2_i \\
y_i=RmsNorm(x_i)=\frac{x_i}{\sqrt{\frac{1}{n}\sum_{i=1}^{n}{ x_i^2}+\varepsilon}}\gamma_i
\end{array}
.. warning::
This is an experimental API that is subject to change or deletion. This API is only supported in Atlas A2
training series for now.
Args:
x1 (Tensor): Input data of AddRmsNorm. Support data type: float16, float32, bfloat16.
x2 (Tensor): Input data of AddRmsNorm. Support data type: float16, float32, bfloat16.
gamma (Tensor): Learnable parameter :math:`\gamma` . Support data type: float16, float32, bfloat16.
epsilon (float, optional): A float number ranged in (0, 1] to prevent division by 0. Default value is `1e-6`.
Returns:
- Tensor, denotes the normalized result, has the same type and shape as `x1`.
- Tensor, with the float data type, denotes the reciprocal of the input standard deviation, used by gradient
calculation.
- Tensor, the sum of `x1` and `x2`.
Raises:
TypeError: If data type of `x1` or `x2` is not one of the following: float16, float32, bfloat16.
TypeError: If data type of `gamma` is not one of the following: float16, float32, bfloat16.
ValueError: If `epsilon` is not a float between 0 and 1.
ValueError: If the rank of `gamma` is greater than the rank of `x1` or `x2`.
RuntimeError: If the shapes of `x1` and `x2` are not same.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x1 = Tensor(np.array([[0.5, 1.0, 1.5], [0.5, 1.0, 1.5]]), mindspore.float32)
>>> x2 = Tensor(np.array([[0.5, 1.0, 1.5], [0.5, 1.0, 1.5]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> y, rstd = ops.add_rms_norm(x1, x2, gamma)
>>> print(y)
[[0.46290997 0.92581993 1.3887299]
[0.46290997 0.92581993 1.3887299]]
>>> print(rstd)
[[0.46290997]
[0.46290997]]
"""
return add_rms_norm_op(x1, x2, gamma, epsilon)
def addn(x):
r"""
Computes addition of all input tensors element-wise.
All input tensors must have the same shape.
Args:
x (Union(tuple[Tensor], list[Tensor])):
A tuple or list composed of Tensor.
Returns:
Tensor, has the same shape and dtype as each Tensor of x.
Raises:
TypeError: If x is neither tuple nor list.
TypeError:
If there are Tensors with different shapes in x.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
import mindspore
import numpy as np
from mindspore import Tensor, ops
x = Tensor(np.array([1, 2, 3]), mindspore.float32)
y = Tensor(np.array([4, 5, 6]), mindspore.float32)
output = ops.addn([x, y, x, y])
print(output)
"""
return addn_op(x)
[文档]def all_gather_matmul(input, x2, group, world_size, bias=None, gather_index=0, gather_output=True, comm_turn=0, trans_input=False, trans_x2=False):
r"""
In the TP segmentation scenario, allgather and matmul are fused, and communication and computational pipelines
are parallelized within the fusion operator.
.. math::
output = allgather(input)@x2
gather\_out = allgather(input)
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The left matrix of matmul, the dtype supports float16 and bfloat16, the shape supports 2
dimensions, and the data format supports ND.
x2 (Tensor): The right matrix of matmul, the dtype needs to be consistent with ``input`` , the shape
supports 2 dimensions, and the data format supports ND.
group (str): Communication group name, can be created by ``create_group`` method, or use the default group
``mindspore.communication.GlobalComm.WORLD_COMM_GROUP``.
world_size (int): The total number of ranks in the communication group, should be consistent with the number
of devices actually running, supporting ``2`` , ``4`` , and ``8`` .
Keyword Args:
bias (Tensor, optional): Currently only ``None`` is supported. Default: ``None`` .
gather_index (int, optional): Indicates the allgather operation object, ``0`` means gather ``input`` ,
``1`` means gather ``x2`` . Currently only ``0`` is supported. Default: ``0`` .
gather_output (bool, optional): Indicates whether gather output is required. Default: ``True`` .
comm_turn (int, optional): Indicates the granularity of communication between ranks. Currently only ``0``
is supported. Default: ``0`` .
trans_input (bool, optional): Indicates whether ``input`` is transposed. Currently only ``False`` is
supported. Default: ``False`` .
trans_x2 (bool, optional): Indicates whether ``x2`` is transposed. Default: ``False`` .
Returns:
- output (Tensor) - The result of allgather and matmul fusion calculations.
- gather_out (Tensor) - The result of allgather. If gather_output is ``False`` , ``gather_out`` returns a
tensor with shape 0.
Note:
- When using this interface, please ensure that the driver firmware package and CANN package are both the
matching 8.0.RC2 version or a higher version, otherwise an error will be reported, such as BUS ERROR.
- The shape of ``input`` is (m, k), the shape of ``x2`` is (k, n), k is required to be equal, and the value
range of k is [256, 65535). The shape of ``output`` is (m * world_size, n), and the shape of
``gather_out`` is (m * world_size, k).
- The common fusion operators in a model only support the same communication group.
Raises:
TypeError: Any arg is of wrong type.
RuntimeError: The dtype of ``input`` or ``x2`` is neither float16 nor bfloat16.
RuntimeError: The dtypes of ``input`` and ``x2`` are different.
RuntimeError: The shape of ``input`` or ``x2`` is not two-dimensional.
RuntimeError: The k axis of ``input`` shape and ``x2`` shape are not equal.
RuntimeError: k is less than ``256`` or greater than or equal to ``65535`` .
RuntimeError: ``bias`` is not None.
RuntimeError: ``group`` does not exist.
RuntimeError: ``world_size`` is inconsistent with the actual number of running cards.
RuntimeError: ``world_size`` is not equal to ``2`` , ``4`` , ``8`` .
RuntimeError: ``gather_index`` is not ``0`` .
RuntimeError: ``trans_input`` is ``True`` .
Supported Platforms:
``Ascend``
Examples:
.. note::
Before running the following examples, you need to configure the communication environment variables.
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
for more details.
This example should be run with 2 devices.
>>> import mindspore as ms
>>> import numpy as np
>>> ms.communication.init()
>>> ms.set_context(mode=ms.PYNATIVE_MODE, device_target='Ascend')
>>> rank = ms.communication.get_rank()
>>> np.random.seed(rank)
>>> input = ms.Tensor(np.random.randn(128, 256).astype(np.float32), dtype=ms.float16)
>>> x2 = ms.Tensor(np.random.randn(256, 512).astype(np.float32), dtype=ms.float16)
>>> group = ms.communication.GlobalComm.WORLD_COMM_GROUP
>>> world_size = ms.communication.get_group_size()
>>> output, gather_out = ms.ops.all_gather_matmul(
... input, x2, group, world_size, None, 0, True, 0, False, False
... )
>>> print(output.shape)
(256, 512)
>>> print(gather_out.shape)
(256, 256)
"""
return all_gather_matmul_op(input, x2, group, world_size, bias, gather_index, gather_output, comm_turn, trans_input, trans_x2)
[文档]def angle(input):
r"""
Returns the element-wise argument of a complex tensor.
The elements in input are considered to be complex numbers of the form a+bj, where a is the real part and b
is the imaginary part. The argument returned by this function is of the form :math:`atan2(b, a)`.
Args:
input (Tensor):
The input tensor. types: complex64, complex128.
Returns:
Tensor, has the float32 or float64 type and the same shape as input.
Raises:
TypeError: If `input` is not a Tensor.
TypeError:
If the dtype of `input` is not one of: complex64, complex128.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([-1.5 + 7.8j, 3 + 5.75j], mindspore.complex64)
>>> output = ops.angle(input)
>>> print(output)
[1.7607845 1.0899091]
"""
return angle_op(input)
def apply_rotary_pos_emb_(query, key, cos, sin, position_ids, cos_format=0):
r"""
"""
apply_rotary_pos_emb_op = _get_cache_prim(ApplyRotaryPosEmb)(cos_format)
return apply_rotary_pos_emb_op(query, key, cos, sin, position_ids)
[文档]def argmax_ext(input, dim=None, keepdim=False):
r"""
Return the indices of the maximum values of a tensor across a dimension.
Args:
input (Tensor): Input tensor.
dim (Union[int, None], optional): The dimension to reduce. If `dim` is ``None`` , the indices of the maximum
value within the flattened input will be returned. Default: ``None`` .
keepdim (bool, optional): Whether the output tensor retains the specified
dimension. Ignored if `dim` is None. Default: ``False`` .
Returns:
Tensor, indices of the maximum values across a dimension.
Raises:
TypeError: If `keepdim` is not bool.
ValueError: If `dim` is out of range.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
>>> output = ops.auto_generate.argmax_ext(x, dim=-1)
>>> print(output)
[1 0 0]
"""
return argmax_ext_op(input, dim, keepdim)
[文档]def argmin_ext(input, dim=None, keepdim=False):
r"""
Return the indices of the minimum values of a tensor across a dimension.
Args:
input (Tensor): Input tensor.
dim (Union[int, None], optional): Specify the axis for calculation. If `dim` is ``None`` , the indices of the minimum
value within the flattened input will be returned. Default: ``None`` .
keepdim (bool, optional): Whether the output tensor retains the specified
dimension. Ignored if `dim` is None. Default: ``False`` .
Returns:
Tensor, indices of the minimum values of the input tensor across a dimension.
Raises:
TypeError: If `keepdim` is not bool.
ValueError: If `dim` is out of range.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore import mint
>>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
>>> output = mint.argmin(x, dim=-1)
>>> print(output)
[0 1 2]
"""
return argmin_ext_op(input, dim, keepdim)
def argsort_ext(input, dim=-1, descending=False):
r"""
Sorts the input tensor along the given dimension in specified order and return the sorted indices.
.. warning::
This is an experimental optimizer API that is subject to change.
Args:
input(Tensor): The input tensor to sort.
dim (int): The dim to sort along. Default: ``-1`` , means the last dimension.
The Ascend backend only supports sorting the last dimension.
descending (bool): The sort order. If `descending` is ``True`` then the elements
are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
Returns:
Tensor, the indices of sorted input tensor. Data type is int64.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor
>>> import mindspore.mint as mint
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
>>> sort = mint.argsort(x)
>>> print(sort)
[[2 1 0]
[2 0 1]
[0 1 2]]
"""
return argsort_op(input, dim, descending)
def as_strided(input, size, stride, storage_offset=0):
r"""
"""
return as_strided_op(input, size, stride, storage_offset)
[文档]def asin_ext(input):
r"""
Computes arcsine of input tensors element-wise.
.. math::
out_i = \sin^{-1}(input_i)
Args:
input (Tensor): The shape of tensor is
:math:`(N,*)`, where :math:`*` means any number of additional dimensions.
Returns:
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
>>> output = ops.asin_ext(input)
>>> print(output)
[0.8330927 0.04001068 0.30469266 0.59438497 ]
"""
return asin_ext_op(input)
[文档]def asin(input):
r"""
Computes arcsine of input tensors element-wise.
.. math::
out_i = \sin^{-1}(input_i)
Args:
input (Tensor): The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape and dtype as `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not float16, float32, float64, complex64, complex128.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
>>> output = ops.asin(input)
>>> print(output)
[0.8330704 0.04001067 0.30469266 0.5943858 ]
"""
return asin_op(input)
[文档]def asinh_ext(input):
r"""
Computes inverse hyperbolic sine of the input element-wise.
.. math::
out_i = \sinh^{-1}(input_i)
Args:
input (Tensor): The input tensor of inverse hyperbolic sine function.
Returns:
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
>>> output = ops.asinh_ext(input)
>>> print(output)
[-2.3124385 1.1947632 1.8184465 5.298342 ]
"""
return asinh_ext_op(input)
[文档]def asinh(input):
r"""
Computes inverse hyperbolic sine of the input element-wise.
.. math::
out_i = \sinh^{-1}(input_i)
Args:
input (Tensor): The input tensor of inverse hyperbolic sine function.
Returns:
Tensor, has the same shape and type as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
>>> output = ops.asinh(input)
>>> print(output)
[-2.3124382 1.1947632 1.8184465 5.298342 ]
"""
return asinh_op(input)
[文档]def assign_add(variable, value):
r"""
Updates a `Parameter` or `Tensor` by adding a value to it.
Args of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, the lower priority data type will be converted to
the relatively highest priority data type.
If `value` is a number, the number is automatically converted to Tensor,
and the data type is consistent with the Tensor data type involved in the operation.
Note:
Since `variable` is a data type Parameter or Tensor, the data type cannot be changed,
so only the type of `value` is allowed to be promoted to the type of `variable`.
And the conversion type supported by different devices will be different,
it is recommended to use the same data type when using this operator.
Args:
variable (Parameter, Tensor): The `Parameter` or `Tensor`.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
value (Union[Tensor, Number]): The value to be added to the `variable`.
If `value` is a number, the number is automatically converted to Tensor.
It must have the same shape as `variable`.
it is recommended to use the same data type when using this operator.
Returns:
Tensor, has the same data type and shape as original `variable`.
Raises:
TypeError: If `value` is neither Number nor Tensor.
RuntimeError: If the data type of `variable` and `value` conversion of Parameter or Tensor
is required when data type conversion of Parameter or Tensor is not supported.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore.common.initializer import initializer
>>> variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
>>> value = Tensor(np.ones([1]).astype(np.int32) * 100)
>>> ops.assign_add(variable, value)
>>> print(variable.asnumpy())
[101]
"""
return assign_add_op(variable, value)
[文档]def assign(variable, value):
r"""
Assigns `Parameter` or `Tensor` with a value.
Args of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, the lower priority data type will be converted to
the relatively highest priority data type.
Args:
variable (Parameter, Tensor): The `Parameter` or `Tensor`. :math:`(N,*)` where :math:`*` means,
any number of additional dimensions.
value (Tensor): The value to be assigned, has the same shape with `variable`.
Returns:
Tensor, has the same data type and shape as original `variable`.
Raises:
TypeError: If `variable` is neither a Parameter nor a Tensor.
TypeError: If `value` is not a Tensor.
RuntimeError: If the data type of `variable` and `value` conversion of Parameter or Tensor
is required when data type conversion of Parameter is not supported.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> value = Tensor([2.0], mindspore.float32)
>>> variable = mindspore.Parameter(Tensor([1.0], mindspore.float32), name="variable")
>>> ops.assign(variable, value)
>>> print(variable.asnumpy())
[2.]
"""
return assign_op(variable, value)
[文档]def assign_sub(variable, value):
r"""
Updates a `Parameter` or `Tensor` by subtracting a value from it.
Args of `variable` and `value` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, the lower priority data type will be converted to
the relatively highest priority data type.
If `value` is a number, the number is automatically converted to Tensor,
and the data type is consistent with the Tensor data type involved in the operation.
Note:
Since `variable` is a data type Parameter or Tensor, the data type cannot be changed,
so only the type of `value` is allowed to be promoted to the type of `variable`.
And the conversion type supported by different devices will be different,
it is recommended to use the same data type when using this operator.
Args:
variable (Parameter, Tensor): The `Parameter` or `Tensor`.
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
value (Tensor): The value to be subtracted from the `variable`.
It must have the same shape as `variable`.
it is recommended to use the same data type when using this operator.
Returns:
Tensor, has the same data type and shape as `variable`.
Raises:
TypeError: If `value` is neither Number nor Tensor.
RuntimeError: If the type conversion between `variable` and `value` is not supported.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore.common.initializer import initializer
>>> variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
>>> value = Tensor(np.ones([1]).astype(np.int32) * 100)
>>> ops.assign_sub(variable, value)
>>> print(variable.asnumpy())
[-99]
"""
return assign_sub_op(variable, value)
[文档]def atan2_ext(input, other):
r"""
Returns arctangent of input/other element-wise.
It returns :math:`\theta\ \in\ [-\pi, \pi]`
such that :math:`input = r*\sin(\theta), other = r*\cos(\theta)`, where :math:`r = \sqrt{input^2 + other^2}`.
Note:
- Arg `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, the lower precision data type will be converted to relatively the
highest precision data type.
Args:
input (Tensor, Number.number): The input tensor or scalar.
other (Tensor, Number.number): The input tensor or scalar. It has the same shape with `input` or
its shape is able to broadcast with `input`.
Returns:
Tensor, the shape is the same as the one after broadcasting.
The dtype of output is float32 when dtype of `input` is in
[bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
Raises:
TypeError: If `input` or `other` is not a Tensor or scalar.
RuntimeError: If the data type of `input` and `other` conversion of Parameter is required
when data type conversion of Parameter is not supported.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0, 1]), mindspore.float32)
>>> other = Tensor(np.array([1, 1]), mindspore.float32)
>>> output = mint.atan2(input, other)
>>> print(output)
[0. 0.7853982]
"""
return atan2_ext_op(input, other)
[文档]def atan2(input, other):
r"""
Returns arctangent of input/other element-wise.
It returns :math:`\theta\ \in\ [-\pi, \pi]`
such that :math:`input = r*\sin(\theta), other = r*\cos(\theta)`, where :math:`r = \sqrt{input^2 + other^2}`.
Note:
- Arg `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
If they have different data types, the lower precision data type will be converted to relatively the
highest precision data type.
Args:
input (Tensor, Number.number): The input tensor or scalar.
other (Tensor, Number.number): The input tensor or scalar. It has the same shape with `input` or
its shape is able to broadcast with `input`.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is same as `input`.
Raises:
TypeError: If `input` or `other` is not a Tensor or scalar.
RuntimeError: If the data type of `input` and `other` conversion of Parameter is required
when data type conversion of Parameter is not supported.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0, 1]), mindspore.float32)
>>> other = Tensor(np.array([1, 1]), mindspore.float32)
>>> output = ops.atan2(input, other)
>>> print(output)
[0. 0.7853982]
"""
return atan2_op(input, other)
[文档]def atan_ext(input):
r"""
Computes the trigonometric inverse tangent of the input element-wise.
.. math::
out_i = \tan^{-1}(input_i)
Args:
input (Tensor): The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1.0, 0.0]), mindspore.float32)
>>> output = ops.atan_ext(input)
>>> print(output)
[0.7853982 0. ]
"""
return atan_ext_op(input)
[文档]def atan(input):
r"""
Computes the trigonometric inverse tangent of the input element-wise.
.. math::
out_i = \tan^{-1}(input_i)
Args:
input (Tensor): The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Supported dtypes:
- Ascend: float16, float32.
- GPU/CPU: float16, float32, float64, complex64 or complex128.
Returns:
A Tensor, has the same type as the input.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not float16, float32, float64, complex64 or complex128.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1.0, 0.0]), mindspore.float32)
>>> output = ops.atan(input)
>>> print(output)
[0.7853982 0. ]
"""
return atan_op(input)
[文档]def atanh(input):
r"""
Computes inverse hyperbolic tangent of the input element-wise.
.. math::
out_i = \tanh^{-1}(input_{i})
Args:
input (Tensor): The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0, -0.5]), mindspore.float32)
>>> output = ops.atanh(input)
>>> print(output)
[ 0. -0.54930615]
"""
return atanh_op(input)
def avg_pool1d_ext(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
r"""
Applies a 1D average pooling over an input Tensor which can be regarded as a composition of 1D input planes.
Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, avg_pool1d outputs regional average in the
:math:`(L_{in})`-dimension. Given kernel size as :math:`ks = l_{ker}` and `stride` as :math:`s = s_0`, the
operation is as follows.
.. math::
\text{output}(N_i, C_j, l) = \frac{1}{l_{ker}} \sum_{n=0}^{l_{ker}-1}
\text{input}(N_i, C_j, s_0 \times l + n)
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): Tensor of shape :math:`(N, C_{in}, L_{in})`.
kernel_size (Union(int, tuple[int]): The size of kernel window used to take the average value.
stride (Union(int, tuple[int]), optional): The distance of kernel moving. `stride` can either be an int
number or a tuple of one int number. Default: ``None``, the same value as `kernel_size`.
padding (Union(int, tuple[int]), optional): The pad length to be filled. `padding` can either be an integer
or a tuple of one integer. Default: ``0`` .
ceil_mode (bool, optional): If True, apply ceil instead of floor to compute the output shape. Default: ``False``.
count_include_pad (bool, optional): If True, include the zero-padding in the averaging calculation. Default: ``True`` .
Returns:
Tensor of shape :math:`(N, C_{in}, L_{out})`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `kernel_size` or `stride` is not an int.
TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
ValueError: If `kernel_size` or `stride` is less than `1`.
ValueError: If `kernel_size` or `stride` or `padding` is not int nor a tuple whose length is greater than `1`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, mint
>>> input_x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
>>> output = mint.avg_pool1d(input_x, kernel_size=6, stride=1)
>>> print(output.shape)
(1, 3, 1)
"""
return avg_pool1d_op(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
def baddbmm(input, batch1, batch2, beta, alpha):
r"""
The result is the sum of the input and a batch matrix-matrix product of matrices in batch1 and batch2.
The formula is defined as follows:
.. math::
\text{out}_{i} = \beta \text{input}_{i} + \alpha (\text{batch1}_{i} \mathbin{@} \text{batch2}_{i})
Args:
input (Tensor): The input Tensor. When batch1 is a :math:`(C, W, T)` Tensor and batch2 is a
:math:`(C, T, H)` Tensor, input must be broadcastable with :math:`(C, W, H)` Tensor.
batch1 (Tensor): :math:`batch1` in the above formula. Must be 3-D Tensor, dtype is same as input.
batch2 (Tensor): :math:`batch2` in the above formula. Must be 3-D Tensor, dtype is same as input.
Keyword Args:
beta (Union[float, int], optional): multiplier for input. Default: ``1`` .
alpha (Union[float, int], optional): multiplier for :math:`batch1 @ batch2`. Default: ``1`` .
Returns:
Tensor, has the same dtype as input, shape will be :math:`(C, W, H)`.
Raises:
TypeError: If the type of `input`, `batch1`, `batch2` is not Tensor.
TypeError: If the types of `input`, `batch1`, `batch2` are different.
ValueError: If `batch1` and `batch2` are not 3-D tensors.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.ones([1, 3, 3]).astype(np.float32))
>>> batch1 = Tensor(np.ones([1, 3, 4]).astype(np.float32))
>>> batch2 = Tensor(np.ones([1, 4, 3]).astype(np.float32))
>>> output = ops.baddbmm_ext(input, batch1, batch2)
>>> print(output)
[[[5. 5. 5.]
[5. 5. 5.]
[5. 5. 5.]]]
"""
return baddbmm_op(input, batch1, batch2, beta, alpha)
def batch_norm_elemt(input, weight=None, bias=None, mean=None, invstd=None, eps=1e-5):
r"""
"""
return batch_norm_elemt_op(input, weight, bias, mean, invstd, eps)
def batch_norm_gather_stats_with_counts(input, mean, invstd, running_mean=None, running_var=None, momentum=1e-1, eps=1e-5, counts=None):
r"""
"""
return batch_norm_gather_stats_with_counts_op(input, mean, invstd, running_mean, running_var, momentum, eps, counts)
def batch_norm_stats(input, eps):
r"""
"""
return batch_norm_stats_op(input, eps)
def bincount_ext(input, weights=None, minlength=0):
r"""
Count the occurrences of each value in the input.
If `minlength` is not specified, the length of the output Tensor is the maximum value in the input plus one.
If `minlength` is specified, the length of the output Tensor is the maximum value between `minlength` or
the maximum value in the input plus one.
Each value in the output Tensor represents the number of occurrences of that index value in the input.
If `weights` is specified, the output results are weighted,
i.e., :math:`out[n] += weight[i]` instead of :math:`out[n] += 1`.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
- **input** (Tensor) - A one-dimensional Tensor.
- **weights** (Tensor, optional) - Weights with the same shape as the input. Default: `None`.
- **minlength** (int, optional) - The minimum length of output Tensor. Should be non-negative. Default: `0`.
Returns:
Tensor, If input is non-empty, the output shape is :math:`(max(max(input)+1, minlength), )`,
otherwise the shape is :math:`(0, )`.
Raises:
- **TypeError** - If `input` or `weights` is not a Tensor.
- **ValueError** - If `input` contains negative values.
- **ValueError** - If `input` is not one-dimensional or `input` and `weights` do not have the same shape.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import mint
>>> print(mint.bincount(np.arange(5)))
[1. 1. 1. 1. 1.]
>>> print(mint.bincount(np.array([0, 1, 1, 3, 2, 1, 7])))
[1. 3. 1. 1. 0. 0. 0. 1.]
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> print(mint.bincount(x, weights=w, minlength=5))
[0.3 0.7 1.1 0.0 0.0]
"""
return bincount_ext_op(input, weights, minlength)
[文档]def bmm_ext(input, mat2):
r"""
Performs batch matrix-matrix multiplication of two three-dimensional tensors.
.. math::
\text{output}= \text{input} @ \text{mat2}
Args:
input (Tensor): The first batch of matrices to be multiplied. Must be a three-dimensional tensor of shape `(b, n, m)`.
mat2 (Tensor): The second batch of matrices to be multiplied. Must be a three-dimensional tensor of shape `(b, m, p)`.
Returns:
Tensor, the output tensor of shape `(b, n, p)`, where each matrix is the product of the corresponding matrices in the input batches.
Raises:
ValueError: If `input` or `mat2` is not three-dimensional tensors.
ValueError: If the length of the third dimension of `input` is not equal to the length of the second dimension of `mat2`.
ValueError: If the batch size of the inputs is not equal to the batch size of the mat2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> a = Tensor(np.ones(shape=[2, 3, 4]), mindspore.float32)
>>> b = Tensor(np.ones(shape=[2, 4, 5]), mindspore.float32)
>>> output = ops.auto_generate.bmm_ext(a, b)
>>> print(output)
[[[4. 4. 4. 4. 4.]
[4. 4. 4. 4. 4.]
[4. 4. 4. 4. 4.]]
[[4. 4. 4. 4. 4.]
[4. 4. 4. 4. 4.]
[4. 4. 4. 4. 4.]]]
"""
return bmm_ext_op(input, mat2)
[文档]def broadcast_to(input, shape):
r"""
Broadcasts input tensor to a given shape. The dim of input shape must be smaller
than or equal to that of target shape. Suppose input shape is :math:`(x_1, x_2, ..., x_m)`,
target shape is :math:`(*, y_1, y_2, ..., y_m)`, where :math:`*` means any additional dimension.
The broadcast rules are as follows:
Compare the value of :math:`x_m` and :math:`y_m`, :math:`x_{m-1}` and :math:`y_{m-1}`, ...,
:math:`x_1` and :math:`y_1` consecutively and
decide whether these shapes are broadcastable and what the broadcast result is.
If the value pairs at a specific dim are equal, then that value goes right into that dim of output shape.
With an input shape :math:`(2, 3)`, target shape :math:`(2, 3)` , the inferred output shape is :math:`(2, 3)`.
If the value pairs are unequal, there are three cases:
Case 1: If the value of the target shape in the dimension is -1, the value of the
output shape in the dimension is the value of the corresponding input shape in the dimension.
With an input shape :math:`(3, 3)`, target
shape :math:`(-1, 3)`, the output shape is :math:`(3, 3)`.
Case 2: If the value of target shape in the dimension is not -1, but the corresponding
value in the input shape is 1, then the corresponding value of the output shape
is that of the target shape. With an input shape :math:`(1, 3)`, target
shape :math:`(8, 3)`, the output shape is :math:`(8, 3)`.
Case 3: If the corresponding values of the two shapes do not satisfy the above cases,
it means that broadcasting from the input shape to the target shape is not supported.
So far we got the last m dims of the outshape, now focus on the first :math:`*` dims, there are
two cases:
If the first :math:`*` dims of output shape does not have -1 in it, then fill the input
shape with ones until their length are the same, and then refer to
Case 2 mentioned above to calculate the output shape. With target shape :math:`(3, 1, 4, 1, 5, 9)`,
input shape :math:`(1, 5, 9)`, the filled input shape will be :math:`(1, 1, 1, 1, 5, 9)` and thus the
output shape is :math:`(3, 1, 4, 1, 5, 9)`.
If the first :math:`*` dims of output shape have -1 in it, it implies this -1 is corresponding to
a non-existing dim so they're not broadcastable. With target shape :math:`(3, -1, 4, 1, 5, 9)`,
input shape :math:`(1, 5, 9)`, instead of operating the dim-filling process first, it raises errors directly.
Args:
input (Tensor): The input Tensor.
shape (tuple): The target shape to broadcast. Can be fully specified, or have -1 in one position
where it will be substituted by the input tensor's shape in that position, see example.
Returns:
Tensor, with the given `shape` and the same data type as `input`.
Raises:
TypeError: If `shape` is not a tuple.
ValueError: If the target and input shapes are incompatible, or if a - 1 in the target shape is in an invalid
location.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> shape = (2, 3)
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
>>> output = ops.broadcast_to(x, shape)
>>> print(output)
[[1. 2. 3.]
[1. 2. 3.]]
>>> shape = (-1, 2)
>>> x = Tensor(np.array([[1], [2]]).astype(np.float32))
>>> output = ops.broadcast_to(x, shape)
>>> print(output)
[[1. 1.]
[2. 2.]]
"""
return broadcast_to_impl(input, shape)
cast_op=Cast()
[文档]def cast(input, dtype):
r"""
Returns a tensor with the new specified data type.
Note:
When converting complex numbers to boolean type, the imaginary part of the complex number is not
taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
Args:
input (Union[Tensor, Number]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The tensor to be cast.
dtype (dtype.Number): The valid data type of the output tensor. Only constant value is allowed.
Returns:
Tensor, the shape of tensor is the same as `input`, :math:`(x_1, x_2, ..., x_R)`.
Raises:
TypeError: If `input` is neither Tensor nor Number.
TypeError: If `dtype` is not a Number.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
>>> input = Tensor(input_np)
>>> dtype = mindspore.int32
>>> output = ops.cast(input, dtype)
>>> print(output.dtype)
Int32
>>> print(output.shape)
(2, 3, 4, 5)
"""
return cast_op(input, dtype)
[文档]def cdist(input_x, input_y, p=2.0):
r"""
Computes p-norm distance between each pair of row vectors of two input Tensors.
Note:
On Ascend, the supported dtypes are float16 and float32.
On CPU, the supported dtypes are float16 and float32.
On GPU, the supported dtypes are float32 and float64.
Args:
x1 (Tensor): Input tensor of shape :math:`(B, P, M)`.
Letter :math:`B` represents 0 or positive int number.
When :math:`B` is equal to 0, it means this dimension can be ignored,
i.e. shape of the tensor is :math:`(P, M)`.
x2 (Tensor): Input tensor of shape :math:`(B, R, M)`, has the same dtype as `x1`.
p (float, optional): P value for the p-norm distance to calculate between each
vector pair, P >= 0. Default: ``2.0`` .
Returns:
Tensor, p-norm distance, has the same dtype as `x1`, its shape is :math:`(B, P, R)`.
Raises:
TypeError: If `x1` or `x2` is not Tensor.
TypeError: If dtype of `x1` or `x2` is not listed in the "Note" above.
TypeError: If `p` is not float32.
ValueError: If `p` is negative.
ValueError: If dimension of `x1` is not the same as `x2`.
ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
ValueError: If the batch dim of `x1` and `x2` can not broadcast.
ValueError: If the number of columns of `x1` is not the same as that of `x2`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
>>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
>>> output = ops.cdist(x, y, 2.0)
>>> print(output)
[[[2.8284273 2.8284273]
[1.4142137 1.4142137]]]
"""
cdist_op = _get_cache_prim(Cdist)(p)
return cdist_op(input_x, input_y)
[文档]def ceil(input):
r"""
Rounds a tensor up to the closest integer element-wise.
.. math::
out_i = \lceil x_i \rceil = \lfloor x_i \rfloor + 1
Args:
input (Tensor): the input of Ceil.
Supported dtypes:
- Ascend: float16, float32, float64 or bfloat16.
- GPU/CPU: float16, float32, float64.
Returns:
Tensor, has the same shape as `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not float16, float32, float64 or bfloat16.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
>>> output = ops.ceil(input)
>>> print(output)
[ 2. 3. -1.]
>>> input = Tensor(2.1, mindspore.float32)
>>> output = ops.ceil(input)
>>> print(output)
3.0
"""
return ceil_op(input)
[文档]def celu(x, alpha=1.0):
r"""
celu activation function, computes celu (Continuously differentiable exponential
linear units) of input tensors element-wise. The formula is defined as follows:
.. math::
\text{CeLU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
For more details, please refer to `celu <https://arxiv.org/abs/1704.07483>`_.
.. warning::
This is an experimental API that is subject to change or deletion.
CELU Activation Function Graph:
.. image:: ../images/CELU.png
:align: center
Args:
x (Tensor): The input of celu with data type of float16 or float32.
alpha (float, optional): The :math:`\alpha` value for the Celu formulation. Default: 1.0
Returns:
Tensor, has the same data type and shape as the input.
Raises:
TypeError: If `alpha` is not a float.
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `x` is neither float16 nor float32.
ValueError: If `alpha` has the value of 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([-2.0, -1.0, 1.0, 2.0]), mindspore.float32)
>>> output = ops.celu(x, alpha=1.0)
>>> print(output)
[-0.86466473 -0.63212055 1. 2. ]
"""
celu_op = _get_cache_prim(CeLU)(alpha)
return celu_op(x)
def clamp_scalar(input, min=None, max=None):
r"""
Clamps tensor values between the specified minimum value and maximum value.
Limits the value of :math:`input` to a range, whose lower limit is `min` and upper limit is `max` .
.. math::
out_i= \left\{
\begin{array}{align}
max & \text{ if } input_i\ge max \\
input_i & \text{ if } min \lt input_i \lt max \\
min & \text{ if } input_i \le min \\
\end{array}\right
Note:
- `min` and `max` cannot be None at the same time;
- When `min` is None and `max` is not None, the elements in Tensor larger than `max` will become `max`;
- When `min` is not None and `max` is None, the elements in Tensor smaller than `min` will become `min`;
- If `min` is greater than `max`, the value of all elements in Tensor will be set to `max`;
- The data type of `input`, `min` and `max` should support implicit type conversion and cannot be bool type.
Args:
input (Tensor): Input data, which type is Tensor. Tensors of arbitrary dimensions are supported.
min (Union(float, int), optional): The minimum value. Default: ``None`` .
max (Union(float, int), optional): The maximum value. Default: ``None`` .
Returns:
Tensor, a clipped Tensor.
The data type and shape are the same as input.
Raises:
ValueError: If both `min` and `max` are None.
TypeError: If the type of `input` is not Tensor.
TypeError: If the type of `min` is not in None, float or int.
TypeError: If the type of `max` is not in None, float or int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> # case 1: the data type of input is number
>>> import mindspore
>>> from mindspore import Tensor
>>> from mindspore.ops.auto_generate import clamp_scalar
>>> import numpy as np
>>> min_value = 5
>>> max_value = 20
>>> input = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mindspore.float32)
>>> output = clamp_scalar(input, min_value, max_value)
>>> print(output)
[[ 5. 20. 5. 7.]
[ 5. 11. 6. 20.]]
"""
return clamp_scalar_op(input, min, max)
def clamp_tensor(input, min=None, max=None):
r"""
Clamps tensor values between the specified minimum value and maximum value.
Limits the value of :math:`input` to a range, whose lower limit is `min` and upper limit is `max` .
.. math::
out_i= \left\{
\begin{array}{align}
max & \text{ if } input_i\ge max \\
input_i & \text{ if } min \lt input_i \lt max \\
min & \text{ if } input_i \le min \\
\end{array}\right
Note:
- `min` and `max` cannot be None at the same time;
- When `min` is None and `max` is not None, the elements in Tensor larger than `max` will become `max`;
- When `min` is not None and `max` is None, the elements in Tensor smaller than `min` will become `min`;
- If `min` is greater than `max`, the value of all elements in Tensor will be set to `max`;
- The data type of `input`, `min` and `max` should support implicit type conversion and cannot be bool type.
Args:
input (Tensor): Input data, which type is Tensor. Tensors of arbitrary dimensions are supported.
min (Tensor, optional): The minimum value. Default: ``None`` .
max (Tensor, optional): The maximum value. Default: ``None`` .
Returns:
Tensor, a clipped Tensor.
The data type and shape are the same as input.
Raises:
ValueError: If both `min` and `max` are None.
TypeError: If the type of `input` is not Tensor.
TypeError: If the type of `min` is not in None, Tensor.
TypeError: If the type of `max` is not in None, Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> # case 1: the data type of input is Tensor
>>> import mindspore
>>> from mindspore import Tensor
>>> from mindspore.ops.auto_generate import clamp_tensor
>>> import numpy as np
>>> min_value = Tensor(5, mindspore.float32)
>>> max_value = Tensor(20, mindspore.float32)
>>> input = Tensor(np.array([[1., 25., 5., 7.], [4., 11., 6., 21.]]), mindspore.float32)
>>> output = clamp_tensor(input, min_value, max_value)
>>> print(output)
[[ 5. 20. 5. 7.]
[ 5. 11. 6. 20.]]
"""
return clamp_tensor_op(input, min, max)
def clone(input):
r"""
Returns a copy of the input tensor.
.. warning::
This is an experimental API that is subject to change or deletion.
Note:
This function is differentiable, and gradients will flow back directly from the calculation
result of the function to the `input`.
Args:
input (Tensor): A tensor to be copied.
Returns:
Tensor, with the same data, shape and type as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.ones((3,3)).astype("float32"))
>>> output = ops.clone(input)
>>> print(output)
[[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]
"""
return clone_op(input)
[文档]def fold_ext(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
r"""
Combines an array of sliding local blocks into a large containing tensor.
Consider a batched input tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)` ,
where :math:`N` is the batch dimension, :math:`C \times \prod(\text{kernel_size})` is the
total number of values within each block (a block has :math:`\prod(\text{kernel_size})` spatial
locations each containing a `C`-channeled vector), and :math:`L` is the total number of such blocks:
.. math::
L = \prod_d \left\lfloor\frac{\text{output_size}[d] + 2 \times \text{padding}[d] %
- \text{dilation}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
where :math:`d` is over all spatial dimensions.
Therefore, `output_size` is the spatial shape of the large containing tensor of the sliding local blocks.
The `dilation`, `padding` and `stride` arguments specify how the sliding blocks are retrieved.
.. warning::
Currently, only unbatched(3D) or batched(4D) image-like output tensors are supported.
Args:
input (Tensor): 2-D or 3-D Tensor.
output_size (Union[int, tuple[int], list[int]]): The shape of the spatial dimensions of
the output(i.e., output.shape[2:]).
kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
for height and width. If type is int, it means that height equal with width. Must be specified.
dilation (Union[int, tuple[int], list[int]], optional): The size of the dilation, should be two int
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
padding (Union[int, tuple[int], list[int]], optional): The size of the padding, should be two int
for height and width. If type is int, it means that height equal with width. Default: ``0`` .
stride (Union[int, tuple[int], list[int]], optional): The size of the stride, should be two int
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
Returns:
A Tensor, with same type as `input` .
Shape:
- Input: :math:`(N, C \times \prod(\text{kernel_size}), L)` or
:math:`(C \times \prod(\text{kernel_size}), L)`
- Output: :math:`(N, C, output\_size[0], output\_size[1], ...)` or
:math:`(C, output\_size[0], output\_size[1], ...)`
Raises:
TypeError: If `output_size`, `kernel_size`, `stride`, `dilation`, `padding` data type is not int, tuple or list.
ValueError: If `output_size`, `kernel_size`, `dilation`, `stride` value is not
greater than zero or elements number invalid.
ValueError: If `padding` value is less than zero or elements number invalid.
ValueError: If input.shape[-2] can't be divisible by the product of kernel_size.
ValueError: If `input.shape[-1]` is not equal to the calculated number of sliding blocks `L`.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.rand(16, 64, 25).astype(np.float32))
>>> output = ops.auto_generate.fold_ext(x, (8, 8), [2, 2], [2, 2], [2, 2], [2, 2])
>>> print(output.shape)
(16, 16, 8, 8)
"""
return col2im_ext_op(input, output_size, kernel_size, dilation, padding, stride)
[文档]def cat(tensors, axis=0):
r"""
Connect input tensors along with the given axis.
The input data is a tuple or a list of tensors. These tensors have the same rank :math:`R`.
Set the given axis as :math:`m`, and :math:`0 \le m < R`. Set the number of input tensors as :math:`N`.
For the :math:`i`-th tensor :math:`t_i`, it has the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`.
:math:`x_{mi}` is the :math:`m`-th dimension of the :math:`t_i`. Then, the shape of the output tensor is
.. math::
(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)
Args:
tensors (Union[tuple, list]): A tuple or a list of input tensors.
Suppose there are two tensors in this tuple or list, namely t1 and t2.
To perform `concat` in the axis 0 direction, except for the :math:`0`-th axis,
all other dimensions should be equal, that is,
:math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
where :math:`R` represents the rank of tensor.
axis (int): The specified axis, whose value is in range :math:`[-R, R)`. Default: ``0`` .
Returns:
Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
The data type is the same with `tensors`.
Raises:
TypeError: If `axis` is not an int.
ValueError: If `tensors` have different dimension of tensor.
ValueError: If `axis` not in range :math:`[-R, R)`.
ValueError: If tensor's shape in `tensors` except for `axis` are different.
ValueError: If `tensors` is an empty tuple or list.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
>>> input_x2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
>>> output = ops.cat((input_x1, input_x2))
>>> print(output)
[[0. 1.]
[2. 1.]
[0. 1.]
[2. 1.]]
>>> output = ops.cat((input_x1, input_x2), 1)
>>> print(output)
[[0. 1. 0. 1.]
[2. 1. 2. 1.]]
"""
return concat_impl(tensors, axis)
[文档]def conj(input):
r"""
Returns a tensor of complex numbers that are the complex conjugate of each element in input.
The complex numbers in input must be of the form a + bj, where a is the real part and b is the imaginary part.
The complex conjugate returned by this operation is of the form a - bj.
If `input` is real, it is returned unchanged.
Args:
input (Tensor): The input tensor to compute to. Must have numeric type.
Returns:
Tensor, has the same dtype as the `input`.
Raises:
TypeError: If the dtype of `input` is not a numeric type.
TypeError: If the `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
>>> output = ops.conj(x)
>>> print(output)
(1.3-0.4j)
"""
return conj_op(input)
def contiguous(input):
r"""
Converts a Tensor into a continuous-memory Tensor that contains the same data as the original Tensor.
Returns:
A contiguous in memory tensor containing the same data as self tensor.
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32)
>>> y = ops.transpose(x, (1, 0))
>>> y.contiguous()
>>> y[:, 1] = 1
>>> print(x)
[[1. 2. 3.]
[4. 5. 6.]]
"""
return contiguous_op(input)
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
r"""
Applies a 2D transposed convolution operator over an input image composed of several input planes,
sometimes also called deconvolution (although it is not an actual deconvolution).
Refer to :func:`mindspore.mint.nn.ConvTranspose2D` for more details.
.. warning::
- This is an experimental API that is subject to change or deletion.
- In the scenario where inputs are non-contiguous, `output_padding` must be less than `stride` .
- For Atlas training products, when the dtype of input is float32, the `groups` only supports 1.
Args:
input (Tensor): input tensor of shape :math:`(minibatch, in\_channels, iH, iW)` or :math:`(in\_channels, iH, iW)` .
weight (Tensor): filters of shape :math:`(in\_channels, \frac{out\_channels}{\text{groups}}, kH, kW)` .
bias (Tensor, optional): bias of shape :math:`(out\_channels)` . Default: ``None`` .
stride (Union[int, tuple(int), list[int]], optional): the stride of the convolving kernel. Can be a single number or a
tuple :math:`(sH, sW)` . Default: ``1`` .
padding (Union[int, tuple(int), list[int]], optional): :math:`dilation * (kernel\_size - 1) - padding` zero-padding will
be added to both sides of each dimension in the input. Can be a single number or a tuple :math:`(padH, padW)` .
Default: ``0`` .
output_padding (Union[int, tuple(int), list[int]], optional): additional size added to one side of each dimension in the
output shape. Can be a single number or a tuple :math:`(out\_padH, out\_padW)` . The value of `output_padding` must
be less than `stride` or `dilation` . Default: ``0`` .
groups (int, optional): split input into groups, :math:`in\_channels` should be divisible by the
number of groups. Default: ``1`` .
dilation (Union[int, tuple(int), list[int]], optional): the spacing between kernel elements. Can be a single number or
a tuple :math:`(dH, dW)` . Default: ``1`` .
Returns:
Tensor of shape :math:`(minibatch, out\_channels, oH, oW)` or :math:`(out\_channels, oH, oW)` , where
.. math::
oH = (iH - 1) \times sH - 2 \times padH + dH \times (kH - 1) + out\_padH + 1
.. math::
oW = (iW - 1) \times sW - 2 \times padW + dW \times (kW - 1) + out\_padW + 1
Raises:
TypeError: If `stride`, `padding`, `output_padding` or `dilation` is neither an int nor a tuple or a list.
TypeError: If `groups` is not an int.
ValueError: If the shape of `bias` is not :math:`(out\_channels)` .
ValueError: If `stride` or `dilation` is less than 1.
ValueError: If `padding` or `output_padding` is less than 0.
VlaueError: If `stride`, `padding`, `output_padding` or `dilation` is a tuple whose length is not equal to 2.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.ones([1, 4, 5, 5]), mindspore.float32)
>>> weight = Tensor(np.ones([4, 8, 3, 3]), mindspore.float32)
>>> output = ops.conv_transpose2d(x, weight)
>>> print(output.shape)
(1, 8, 7, 7)
"""
return conv_transpose2d_op(input, weight, bias, stride, padding, output_padding, groups, dilation)
def convolution(input, weight, bias=None, stride=1, padding=0, dilation=1, transposed=False, output_padding=0, groups=1):
r"""
"""
return convolution_op(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups)
def copy(input):
r"""
"""
return copy_op(input)
def correlate(a, v, mode='valid'):
r"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal processing texts:
:math:`c_{av}[k] = \sum_{n}{a[n+k] * conj(v[n])}`
with `a` and `v` sequences being zero-padded where necessary and conj being the conjugate.
Note:
- `correlate` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `correlate` is not supported on Windows platform yet.
Args:
a (Union[list, tuple, Tensor]): First input sequence.
v (Union[list, tuple, Tensor]): Second input sequence.
mode (str, optional): Specifies padding mode. The optional values are
``"same"`` , ``"valid"`` and ``"full"`` . Default: ``"valid"`` .
- ``"same"``: it returns output of length :math:`max(M, N)`. Boundary
effects are still visible.
- ``"valid"``: it returns output of length :math:`max(M, N) - min(M, N) + 1`.
The convolution product is only given for points where the signals overlap
completely. Values outside the signal boundary have no effect.
- ``"full"``: it returns the convolution at each point of overlap, with
an output shape of :math:`(N + M - 1,)`.At the end-points of the convolution,
the signals do not overlap completely, and boundary effects may be seen.
Returns:
Tensor, Discrete cross-correlation of `a` and `v`.
Raises:
TypeError: If `a` or `v` is not a tensor.
TypeError: If `a` and `v` is of different dtype.
ValueError: If `a` and `v` are empty or have wrong dimensions
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore.ops.auto_generate import correlate
>>> from mindspore import Tensor
>>> output = correlate(Tensor([1., 2., 3.]), Tensor([0., 1., 0.5]))
>>> print(output)
[3.5]
>>> output = correlate(Tensor([1., 2., 3.]), Tensor([0., 1., 0.5]), mode="same")
>>> print(output)
[2. 3.5 3. ]
>>> output = correlate(Tensor([1., 2., 3., 4., 5.]), Tensor([1., 2.]), mode="full")
>>> print(output)
[ 2. 5. 8. 11. 14. 5.]
"""
correlate_op = _get_cache_prim(Correlate)(mode)
return correlate_op(a, v)
[文档]def cos(input):
r"""
Computes cosine of input element-wise.
.. math::
out_i = \cos(x_i)
.. warning::
Using float64 may cause a problem of missing precision.
Args:
input (Tensor): The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape as the `input`.
The dtype of output is float32 when dtype of `input` is in
[bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as the `input`.
:raise TypeError: If `input` is not a Tensor.
:raise TypeError:
* CPU/GPU: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
* Ascend: If dtype of `input` is not bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = ops.cos(input)
>>> print(output)
[0.971338 0.6748758 0.95233357 0.9959527]
"""
return cos_op(input)
[文档]def cosh(input):
r"""
Computes hyperbolic cosine of input element-wise.
.. math::
out_i = \cosh(input_i)
Args:
input (Tensor): The input tensor of hyperbolic cosine function.
Supported dtypes:
- GPU/CPU: float16, float32, float64, complex64 or complex128.
- Ascend: bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
Returns:
Tensor, has the same shape as `input`.
:raise TypeError: If `input` is not a Tensor.
:raise TypeError:
* CPU/GPU: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
* Ascend: If dtype of `input` is not bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
>>> output = ops.cosh(x)
>>> print(output)
[1.0289385 1.364684 1.048436 1.0040528]
>>> x = Tensor(2.1, mindspore.float32)
>>> output = ops.cosh(x)
>>> print(output)
4.144313
"""
return cosh_op(input)
def count_nonzero(input, dim=None):
r"""
Counts the number of non-zero values in the tensor input along the given dim. If no dim is specified then all non-zeros in the tensor are counted.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): Input data is used to count non-zero numbers. With shape
:math:`(*)` where :math:`*` means, any number of additional dimensions.
dim (Union[int, tuple(int), list(int)], optional): The dimension to reduce. Default value: ``None``, which indicates that the number of non-zero elements is calculated. If `dim` is ``None``, all elements in the tensor are summed up.
Returns:
Tensor, number of nonzero element across dim specified by `dim`.
Raises:
TypeError: If `input` is not tensor.
TypeError: If `dim` is not int, tuple(int), list(int) or None.
ValueError: If any value in `dim` is not in range [-x.ndim, x.ndim).
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import Tensor, ops
>>> import numpy as np
>>> import mindspore
>>> # case 1: each value specified.
>>> x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))
>>> nonzero_num = ops.count_nonzero(input=x, dim=[0, 1])
>>> print(nonzero_num)
[[3]]
>>> # case 2: all value is default.
>>> nonzero_num = ops.count_nonzero(input=x)
>>> print(nonzero_num)
3
>>> # case 3: dim value was specified 0.
>>> nonzero_num = ops.count_nonzero(input=x, dim=[0,])
>>> print(nonzero_num)
[1 2 0]
>>> # case 4: dim value was specified 1.
>>> nonzero_num = ops.count_nonzero(input=x, dim=[1,])
>>> print(nonzero_num)
[1 2]
"""
return count_nonzero_op(input, dim)
[文档]def cummax(input, axis):
r"""
Returns a tuple (values,indices) where 'values' is the cumulative maximum value of input Tensor `input`
along the dimension `axis`, and `indices` is the index location of each maximum value.
.. math::
\begin{array}{ll} \\
y_{i} = \max(x_{1}, x_{2}, ... , x_{i})
\end{array}
Args:
input (Tensor): The input Tensor, rank of `input` > 0.
axis (int): The dimension to do the operation over. The value of `axis` must be in the range
`[-input.ndim, input.ndim - 1]`.
Returns:
tuple [Tensor], tuple of 2 Tensors, containing the cumulative maximum of elements and the index.
The shape of each output tensor is the same as input `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `axis` is not an int.
ValueError: If `axis` is out the range of `[-input.ndim, input.ndim - 1]`.
.. note::
O2 mode is not supported in Ascend.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
>>> output = ops.cummax(x, axis=0)
>>> print(output[0])
[[ 3. 4. 6. 10.]
[ 3. 6. 7. 10.]
[ 4. 6. 8. 10.]
[ 4. 6. 8. 10.]]
>>> print(output[1])
[[0 0 0 0]
[0 1 1 0]
[2 1 2 0]
[2 1 2 0]]
"""
return cummax_impl(input, axis)
[文档]def cummin_ext(input, dim):
r"""
Returns a tuple (values, indices) where `values` is the cumulative minimum value of input Tensor `input`
along the dimension `dim`, and `indices` is the index location of each minimum value.
.. math::
\begin{array}{ll} \\
y_{i} = \min(x_{1}, x_{2}, ... , x_{i})
\end{array}
Args:
input (Tensor): The input Tensor, The dimension must be greater than 0.
dim (int): Operation dimension. The value of `dim` must be in the range `[-input.ndim, input.ndim - 1]`.
Returns:
tuple [Tensor], tuple of 2 Tensors, containing the cumulative minimum of elements and the index.
The shape of each output tensor is the same as that of input `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `input` is a Tensor, but the type is complex or bool.
TypeError: If `dim` is not an int.
ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
.. note::
O2 mode is not supported in Ascend.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import Tensor, ops
>>> import mindspore
>>> a = Tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220], mindspore.float32)
>>> output = ops.cummin_ext(a, dim=0)
>>> print(output[0])
[-0.2284 -0.6628 -0.6628 -0.6628 -1.3298 -1.3298]
>>> print(output[1])
[0 1 1 1 4 4]
"""
return cummin_ext_op(input, dim)
[文档]def cumsum_ext(input, dim, dtype=None):
r"""
Computes the cumulative sum of input Tensor along `dim`.
.. math::
y_i = x_1 + x_2 + x_3 + ... + x_i
Args:
input (Tensor): The input Tensor.
dim (int): Dim along which the cumulative sum is computed.
dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If specified,
the input Tensor will be cast to `dtype` before the computation. This is useful for preventing overflows.
If not specified, stay the same as original Tensor. Default: ``None`` .
Returns:
Tensor, the shape of the output Tensor is consistent with the input Tensor's.
Raises:
TypeError: If `input` is not a Tensor.
ValueError: If the `dim` is out of range.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> import mindspore.ops as ops
>>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
>>> # case 1: along the dim 0
>>> y = ops.auto_generate.cumsum_ext(x, 0)
>>> print(y)
[[ 3. 4. 6. 10.]
[ 4. 10. 13. 19.]
[ 8. 13. 21. 26.]
[ 9. 16. 28. 35.]]
>>> # case 2: along the dim 1
>>> y = ops.auto_generate.cumsum_ext(x, 1)
>>> print(y)
[[ 3. 7. 13. 23.]
[ 1. 7. 14. 23.]
[ 4. 7. 15. 22.]
[ 1. 4. 11. 20.]]
"""
return cumsum_ext_op(input, dim, dtype)
def decoder_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len):
r"""
The DecoderKVCache is used for decoding the KVCache of transformer network.
Args:
cache (Tensor): The cahe tensor with data type of int8, uint8, int16, uint16, float16, float32 and int32.
When format is BHSD, cache tensor of shape
:math:`(batch\_size, num\_head, max\_seq\_length, size\_pre\_head)`.
When format is BSD, cache tensor of shape
:math:`(batch\_size, max\_seq\_length, hidden\_size)`.
update (Tensor]): The tensor which is used to update the cache tensor. Same data type as cache tensor.
When format is BHSD, update tensor of shape
:math:`(batch\_size, num\_head, update\_seq\_length, size\_pre\_head)`.
When format is BSD, update tensor of shape
:math:`(batch\_size, update\_seq\_length, hidden\_size)`.
valid_seq_len (Tensor): The valid_seq_len tensor with data type of int64.
Valid_seq_len tensor of shape :math:`(batch\_size)`.
batch_index (Tensor): The batch_index tensor with data type of int64.
Batch_index tensor of shape :math:`(batch\_size)`. Indicate that which batch of cache tensor is going to be update. Not abel for now.
seq_len_axis (Tensor): The seq_len_axis indicate which axis is seq_eln, set to '1' or '2'. Not able for now.
new_max_seq_len (Tensor): The new_max_seq_len tensor with data type of int64.
New_max_seq_len tensor of shape :math:`(1)`.
Indicate that user want to change the shape of cache tensor from
:math:`(batch\_size, num_head, max\_seq\_length, hidden\_size)`. to
:math:`(batch\_size * max\_seq\_length / new\_max\_seq\_length, num_head, new\_max\_seq\_length, hidden\_size)`.
to update the cache tensor. This will not real change the shape of `cache` tensor.
cur_max_seq_len (Tensor): The new_max_seq_len tensor with data type of int64.
Cur_max_seq_len tensor of shape :math:`(1)`. Keep the current seq_len of cache tensor. Not abel for now.
Outputs:
With same data type and same shape as `cache` tensor.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore.ops.operations import _inner_ops
>>> b = 4
>>> h = 40
>>> max_s = 1024
>>> s = 1
>>> d = 128
>>> cache = Tensor(np.random.randn(b, h, max_s, d).astype(np.float16))
>>> update = Tensor(np.random.randn(b, h, s, d).astype(np.float16))
>>> valid_seq_len = Tensor(np.random.randint(-1, s, size=b).astype(np.int64))
>>> batch_index = Tensor(np.random.choice(np.arange(-1, b), size=b, replace=False).astype(np.int64))
>>> new_max_seq_len = Tensor(np.random.randn(1).astype(np.int64))
>>> cur_max_seq_len = Tensor(np.random.randn(1).astype(np.int64))
>>> decoder_kv_cache = _inner_ops.DecoderKVCache()
>>> output = decoder_kv_cache(cache, update, valid_seq_len, batch_index, Tensor(2), new_max_seq_len, cur_max_seq_len)
>>> print(cache)
"""
return decoder_k_v_cache_op(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
[文档]def dense(input, weight, bias=None):
r"""
Applies the dense connected operation to the `input`. The dense function is defined as:
.. math::
output = input * weight^{T} + bias
.. warning::
- This is an experimental API that is subject to change or deletion.
- In PYNATIVE mode, if `bias` is not 1D, the `input` cannot be greater than 6D.
Args:
input (Tensor): Input Tensor of shape :math:`(*, in\_channels)`,
where :math:`*` means any number of additional dimensions.
weight (Tensor): The weight applied to the input.
The shape is :math:`(out\_channels, in\_channels)` or :math:`(in\_channels)`.
bias (Tensor, optional): Additive biases to the output.
The shape is :math:`(out\_channels)` or :math:`()`. Defaults: ``None``, the `bias` is 0.
Returns:
Output whose shape is determined by the shape of the input and the weight.
Raises:
TypeError: If `input` is not Tensor.
TypeError: If `weight` is not Tensor.
TypeError: If `bias` is not Tensor.
RuntimeError: If `bias` is not 1D and `input` is greater than 6D in PYNATIVE mode.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([[-1., 1., 2.], [-3., -3., 1.]], mindspore.float32)
>>> weight = Tensor([[-2., -2., -2.], [0., -1., 0.]], mindspore.float32)
>>> bias = Tensor([0., 1.], mindspore.float32)
>>> output = ops.dense(input, weight, bias)
>>> print(output)
[[-4. 0.]
[10. 4.]]
"""
return dense_op(input, weight, bias)
[文档]def diag(input):
r"""
Constructs a diagonal tensor with a given diagonal values.
Assume `input` has dimensions :math:`(D_1,... D_k)` , the output is a tensor of
rank 2k with dimensions :math:`(D_1,..., D_k, D_1,..., D_k)` where:
:math:`output[i_1,..., i_k, i_1,..., i_k] = input[i_1,..., i_k]` and 0 everywhere else.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input tensor.
Returns:
Tensor, has the same dtype as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
ValueError: If rank of `input` less than 1.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore import Tensor, ops
>>> input = Tensor([1, 2, 3, 4]).astype('int32')
>>> output = ops.diag(input)
>>> print(output)
[[1 0 0 0]
[0 2 0 0]
[0 0 3 0]
[0 0 0 4]]
"""
return diag_op(input)
[文档]def diagonal(input, offset=0, dim1=0, dim2=1):
r"""
Returns specified diagonals of `input`.
If `input` is 2-D, returns the diagonal of `input` with the given offset.
If `input` has more than two
dimensions, then the axes specified by `dim1` and `dim2` are used to determine
the 2-D sub-array whose diagonal is returned. In this case, remove the `dim1` and `dim2` dimensions of `input`
and insert the last dimension of `input` by the diagonal elements determined by `dim1` and `dim2`.
Args:
input (Tensor): Array from which the diagonals are taken.
offset (int, optional): Offset of the diagonal from the main diagonal.
Can be positive or negative. Default: ``0`` .
dim1 (int, optional): Axis to be used as the first axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
first axis (0). Default: ``0`` .
dim2 (int, optional): Axis to be used as the second axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
second axis (1). Default: ``1`` .
Returns:
Tensor, if `input` is 2-D, then `input` 1-D array containing the diagonal. If
``input.ndim > 2``, then the dimensions specified by `dim1` and `dim2` are removed,
and a new axis inserted at the end corresponding to the diagonal.
Raises:
TypeError: if `dim1` or `dim2` are not an int.
ValueError: if the input tensor has less than two dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor([[0, 1], [2, 3]], mstype.float32)
>>> output = ops.diagonal(x)
>>> print(output)
[0 3]
"""
diagonal_op = _get_cache_prim(Diagonal)(offset, dim1, dim2)
return diagonal_op(input)
def dot(input, other):
r"""
Computes the dot product of two 1D tensor.
Inputs:
- **input** (Tensor) - The first input in the dot product, must be 1D.
- **other** (Tensor) - The second input in the dot product, must be 1D.
Outputs:
Tensor, the shape is [] and the data type is same as `input`.
Raises:
TypeError: If dtype of `input`, `other` is not tensor.
TypeError: If dtype of `input`, `other` are not in float16, float32 or bfloat16.
RuntimeError: If dtypes of `input` and `other` are not same.
RuntimeError: If shapes of `input` and `other` are not same.
RuntimeError: If shapes of `input` and `other` are not 1D.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, mint
>>> x = Tensor([2.0, 3.0], mindspore.float32)
>>> y = Tensor([2.0, 1.0], mindspore.float32)
>>> dot = mint.dot()
>>> output = dot(x, y)
>>> print(output)
[7. ]
>>> print(output.dtype)
Float32
"""
return dot_op(input, other)
[文档]def elu_ext(input, alpha=1.0):
r"""
Exponential Linear Unit activation function.
Applies the exponential linear unit function element-wise.
The activation function is defined as:
.. math::
\text{ELU}(x)= \left\{
\begin{array}{align}
\alpha(e^{x} - 1) & \text{if } x \le 0\\
x & \text{if } x \gt 0\\
\end{array}\right.
Where :math:`x` is the element of input Tensor `input`, :math:`\alpha` is param `alpha`,
it determines the smoothness of ELU.
ELU function graph:
.. image:: ../images/ELU.png
:align: center
Args:
input (Tensor): The input of ELU is a Tensor of any dimension.
alpha (float, optional): The alpha value of ELU, the data type is float.
Default: ``1.0`` .
Returns:
Tensor, has the same shape and data type as `input`.
Raises:
TypeError: If `alpha` is not a float.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> output = ops.auto_generate.elu_ext(x)
>>> print(output)
[[-0.63212055 4. -0.99966455]
[ 2. -0.99326205 9. ]]
"""
return elu_ext_impl(input, alpha)
[文档]def elu(input_x, alpha=1.0):
r"""
Exponential Linear Unit activation function.
Applies the exponential linear unit function element-wise.
The activation function is defined as:
.. math::
\text{ELU}(x)= \left\{
\begin{array}{align}
\alpha(e^{x} - 1) & \text{if } x \le 0\\
x & \text{if } x \gt 0\\
\end{array}\right.
Where :math:`x` is the element of input Tensor `input_x`, :math:`\alpha` is param `alpha`,
it determines the smoothness of ELU.
ELU function graph:
.. image:: ../images/ELU.png
:align: center
Args:
input_x (Tensor): The input of ELU is a Tensor of any dimension with data type of float16 or float32.
alpha (float, optional): The alpha value of ELU, the data type is float. Only support ``1.0`` currently.
Default: ``1.0`` .
Returns:
Tensor, has the same shape and data type as `input_x`.
Raises:
TypeError: If `alpha` is not a float.
TypeError: If dtype of `input_x` is neither float16 nor float32.
ValueError: If `alpha` is not equal to 1.0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> output = ops.elu(x)
>>> print(output)
[[-0.63212055 4. -0.99966455]
[ 2. -0.99326205 9. ]]
"""
elu_op = _get_cache_prim(Elu)(alpha)
return elu_op(input_x)
def embedding_apply_ada_grad(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
r"""
"""
return embedding_apply_ada_grad_op(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
def embedding_apply_adam(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
r"""
"""
return embedding_apply_adam_op(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
def embedding_apply_adam_w(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad=(0,), mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
r"""
"""
return embedding_apply_adam_w_op(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
def embedding_apply_ftrl(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
r"""
"""
return embedding_apply_ftrl_op(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
def embedding_apply_rmsprop(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
r"""
"""
return embedding_apply_rmsprop_op(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
def embedding_apply_sgd(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
r"""
"""
return embedding_apply_sgd_op(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
def embedding_feature_mapping_export(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id):
r"""
"""
return embedding_feature_mapping_export_op(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id)
def embedding_feature_mapping_file_size(file_path, table_name, global_step, embedding_dim, only_offset_flag=True):
r"""
"""
return embedding_feature_mapping_file_size_op(file_path, table_name, global_step, embedding_dim, only_offset_flag)
def embedding_feature_mapping_find(table_name, feature_size, num=1):
r"""
"""
return embedding_feature_mapping_find_op(table_name, feature_size, num)
def embedding_feature_mapping_import(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag=True, num=1):
r"""
"""
return embedding_feature_mapping_import_op(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag, num)
def embedding_feature_mapping_insert(table_name, num, feature_id, offset_id):
r"""
"""
return embedding_feature_mapping_insert_op(table_name, num, feature_id, offset_id)
def embedding_feature_mapping_table_size(table_name):
r"""
"""
return embedding_feature_mapping_table_size_op(table_name)
def embedding_feature_mapping_v2(table_name, feature_id, table_total_size, table_actual_size):
r"""
"""
return embedding_feature_mapping_v2_op(table_name, feature_id, table_total_size, table_actual_size)
def embedding_table_evict(var_handle, global_step, steps_to_live=0):
r"""
"""
return embedding_table_evict_op(var_handle, global_step, steps_to_live)
[文档]def equal(input, other):
r"""
Computes the equivalence between two tensors element-wise.
The second argument can be a number or a tensor whose shape is broadcastable with the first argument and vise versa.
.. math::
out_{i} =\begin{cases}
& \text{True, if } input_{i} = other_{i} \\
& \text{False, if } input_{i} \ne other_{i}
\end{cases}
Note:
- `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
- The input must be two Tensors, or a Tensor and a Scalar.
- The shapes of the inputs can be broadcasted to each other.
Args:
input (Union[Tensor, Number]): The first input is a number or
a tensor whose data type is number.
other (Union[Tensor, Number]): The second input is a number or
a tensor whose data type is number.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Raises:
TypeError: If neither `input` nor `other` is a Tensor or number.Number.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> # case 1: The shape of two inputs are different
>>> input = Tensor([1, 2, 3], mindspore.float32)
>>> output = ops.equal(input, 2.0)
>>> print(output)
[False True False]
>>> # case 2: The shape of two inputs are the same
>>> input = Tensor([1, 2, 3], mindspore.int32)
>>> other = Tensor([1, 2, 4], mindspore.int32)
>>> output = ops.equal(input, other)
>>> print(output)
[ True True False]
"""
return equal_op(input, other)
[文档]def erf(input):
r"""
Computes the Gauss error function of `input` element-wise.
.. math::
erf(x)=\frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
Args:
input (Tensor): The input tensor of Gaussian error function. :math:`x` in the following formula.
Supported dtypes:
- GPU/CPU: float16, float32, float64.
- Ascend: float16, float32, float64, int64, bool.
Returns:
Tensor, has the same shape as the `input`.
The dtype of output is float32 when dtype of `input` is in
[bool, int64]. Otherwise output has the same dtype as the `input`.
:raise TypeError: If `input` is not a Tensor.
:raise TypeError:
* GPU/CPU: If dtype of `input` is not float16, float32, float64.
* Ascend: If dtype of `input` is not float16, float32, float64, int64, bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
>>> output = ops.erf(input)
>>> print(output)
[-0.8427168 0. 0.8427168 0.99530876 0.99997765]
"""
return erf_op(input)
[文档]def erfc(input):
r"""
Computes the complementary error function of `input` element-wise.
.. math::
erfc(x) = 1 - \frac{2} {\sqrt{\pi}} \int\limits_0^{x} e^{-t^{2}} dt
Args:
input (Tensor): The input tensor of the complementary error function, :math:`x` in the above formula.
Supported dtypes:
- Ascend: float16, float32, float64, int64, bool, bfloat16.
- GPU/CPU: float16, float32, float64.
Returns:
Tensor.
The dtype of output is float32 when dtype of `input` is in
[bool, int64]. Otherwise output has the same dtype as the `input`.
:raise TypeError: If `input` is not a Tensor.
:raise TypeError: If dtype of `input` is not the following:
* Ascend: float16, float32, float64, int64, bool, bfloat16.
* GPU/CPU: float16, float32, float64.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
>>> output = ops.erfc(input)
>>> print(output)
[1.8427008e+00 1.0000000e+00 1.5729921e-01 4.6777348e-03 2.2090497e-05]
"""
return erfc_op(input)
[文档]def erfinv(input):
r"""
Returns the result of the inverse error function with `input`. It is defined in the range `(-1, 1)` as:
.. math::
erfinv(erf(x)) = x
where :math:`x` is the `input`.
Args:
input (Tensor): The input tensor to compute with.
Supported dtypes:
- Ascend: float16, float32, int8, int16, int32, int64, uint8, bool.
- GPU/CPU: float16, float32 or float64.
Returns:
Tensor. When the `input` is int8, int16, int32, int64, uint8, bool, the return value type is float32.
Otherwise, the return value type is the same as the input type.
:raise TypeError: If dtype of `input` is not as follows
- Ascend: float16, float32, int8, int16, int32, int64, uint8, bool.
- GPU/CPU: float16, float32 or float64.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0, 0.5, -0.9]), mindspore.float32)
>>> output = ops.erfinv(input)
>>> print(output)
[ 0. 0.47693613 -1.1630869 ]
"""
return erfinv_op(input)
[文档]def exp2(input):
r"""
Calculates the base-2 exponent of the Tensor `input` element by element.
.. math::
out_i = 2^{input_i}
Args:
input (Tensor): The input Tensor.
Returns:
Tensor, which has the same shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
>>> output = ops.exp2(x)
>>> print(output)
[ 1. 2. 4. 16.]
"""
return exp2_op(input)
[文档]def exp(input):
r"""
Returns exponential of a tensor element-wise.
.. math::
out_i = e^{x_i}
Args:
input (Tensor): The input tensor. :math:`x` in the following formula.
Returns:
Tensor, has the same shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.0, 1.0, 3.0]), mindspore.float32)
>>> output = ops.exp(input)
>>> print(output)
[ 1. 2.7182817 20.085537]
"""
return exp_op(input)
def expand_as(input, other):
r"""
Broadcast the shape of the input tensor to be the same as the another input tensor. The dim of the
input shape must be smaller than or equal to that of another and the broadcast rules must be met.
Args:
input (Tensor): The input Tensor.
other (Tensor): The target Tensor. It's shape is the target shape that input tensor need to be broadcasted.
Returns:
Tensor, with the given shape of `other` and the same data type as `input`.
Raises:
TypeError: If `other` is not a tensor.
ValueError: If the shape of `other` and `input` are incompatible.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.function.array_func import expand_as
>>> x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]).astype(np.float32))
>>> other = Tensor(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]).astype(np.float32))
>>> output = expand_as(x, other)
>>> print(output)
[[1. 2. 3.]
[1. 2. 3.]
[1. 2. 3.]]
>>> shape = (3, 3)
"""
return expand_as_op(input, other)
[文档]def expand_dims(input_x, axis):
r"""
Adds an additional dimension to `input_x` at the given axis, the dimension
of `input_x` should be greater than or equal to 1.
Note:
If the specified axis is a negative number, the index is counted
backward from the end and starts at 1.
Args:
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
axis (int): Specifies the dimension index at which to expand
the shape of `input_x`. The value of axis must be in the range
`[-input_x.ndim-1, input_x.ndim]`. Only constant value is allowed.
Returns:
Tensor, the shape of tensor is :math:`(1, x_1, x_2, ..., x_R)` if the
value of `axis` is 0. It has the same data type as `input_x`.
Raises:
TypeError: If `axis` is not an int.
ValueError: If `axis` is not in the valid range :math:`[-a.ndim-1, a.ndim]`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
>>> output = ops.expand_dims(input_tensor, 0)
>>> print(output)
[[[2. 2.]
[2. 2.]]]
"""
return expand_dims_op(input_x, axis)
[文档]def expm1(input):
r"""
Returns exponential then minus 1 of a tensor element-wise.
.. math::
out_i = e^{x_i} - 1
Args:
input (Tensor): The input Tensor. :math:`x` in the above formula.
Returns:
Tensor, has the same shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
>>> output = ops.expm1(x)
>>> print(output)
[ 0. 1.718282 6.389056 53.598152]
"""
return expm1_op(input)
def extract_image_patches(input_x, ksizes, strides, rates, padding='VALID'):
r"""
Extracts patches from images.
The input tensor must be a 4-D tensor and the data format is NCHW.
Args:
input_x (Tensor): A 4-D tensor whose shape is :math:`(in\_batch, in\_depth, in\_row, in\_col)`.
ksizes (Union[tuple[int], list[int]]): The size of sliding window, must be a tuple or a list of integers,
the size must be 4, and the format is [1, 1, ksize_row, ksize_col].
strides (Union[tuple[int], list[int]]): Distance between the centers of the two consecutive patches,
must be a tuple or list of int, the size must be 4, and the format is [1, 1, stride_row, stride_col].
rates (Union[tuple[int], list[int]]): In each extracted patch, the gap between the corresponding dimension
pixel positions, must be a tuple or a list of integers, the size must be 4, and the format is [1, 1, rate_row, rate_col].
padding (str): The type of padding algorithm, is a string whose value is "same" or "valid",
not case sensitive. Default: "valid".
- same: Means that the patch can take the part beyond the original image, and this part is filled with 0.
- valid: Means that the taken patch area must be completely covered in the original image.
Outputs:
Tensor, a 4-D tensor whose data type is same as 'input_x', and the shape
is :math:`(out\_batch, out\_depth, out\_row, out\_col)`,where the out_batch is the same as the in_batch
and
.. math::
out_depth=ksize\_row * ksize\_col * in\_depth
and
if 'padding' is "valid":
.. math::
out\_row=floor((in\_row - (ksize\_row + (ksize\_row - 1) * (rate\_row - 1))) / stride\_row) + 1
out\_col=floor((in\_col - (ksize\_col + (ksize\_col - 1) * (rate\_col - 1))) / stride\_col) + 1
if 'padding' is "same":
.. math::
out\_row=floor((in\_row - 1) / stride\_row) + 1
out\_col=floor((in\_col - 1) / stride\_col) + 1
Supported Platforms:
``Ascend`` ``GPU``
"""
extract_image_patches_op = _get_cache_prim(ExtractImagePatches)(ksizes, strides, rates, padding)
return extract_image_patches_op(input_x)
[文档]def fast_gelu(x):
r"""
Fast Gaussian Error Linear Units activation function.
FastGeLU is defined as follows:
.. math::
\text{output} = \frac {x} {1 + \exp(-1.702 * \left| x \right|)} * \exp(0.851 * (x - \left| x \right|)),
where :math:`x` is the element of the input.
FastGelu function graph:
.. image:: ../images/FastGelu.png
:align: center
Args:
x (Tensor): Input to compute the FastGeLU with data type of float16 or float32.
Returns:
Tensor, with the same type and shape as `x`.
Raises:
TypeError: If dtype of `x` is neither float16 nor float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> output = ops.fast_gelu(x)
>>> print(output)
[[-1.5418735e-01 3.9921875e+00 -9.7473649e-06]
[ 1.9375000e+00 -1.0052517e-03 8.9824219e+00]]
"""
return fast_gelu_op(x)
def ffn_ext(x, weight1, weight2, expertTokens=None, bias1=None, bias2=None, scale=None, offset=None, deqScale1=None, deqScale2=None, antiquant_scale1=None, antiquant_scale2=None, antiquant_offset1=None, antiquant_offset2=None, activation='fastgelu', inner_precise=0):
r"""
"""
return ffn_ext_impl(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise)
def fft2(input, s=None, dim=(-2, -1), norm=None):
r"""
Calculates the two dimensional discrete Fourier transform of `input`.
Note:
- `fft2` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `fft2` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `fft2`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `fft2`.
Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1/n`).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `fft2()` function. The default is the same shape as `input`.
If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
When the input is float64 or complex128, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((4, 4))
>>> out = ops.fft2(input, s=(4, 4), dim=(0, 1), norm="backward")
>>> print(out)
[[16.+0.j 0.+0.j 0.+0.j 0.+0.j]
[ 0.+0.j 0.+0.j 0.+0.j 0.+0.j]
[ 0.+0.j 0.+0.j 0.+0.j 0.+0.j]
[ 0.+0.j 0.+0.j 0.+0.j 0.+0.j]]
"""
return fft2_op(input, s, dim, norm)
def fft(input, n=None, dim=-1, norm=None):
r"""
Calculates the one dimensional discrete Fourier transform of `input`.
Note:
- `fft` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `fft` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
n (int, optional): Length of the transformed `dim` of the result.
If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `fft`.
Default: ``None`` , which does not need to process `input`.
dim (int, optional): The dimension along which to take the one dimensional `fft`.
Default: ``-1`` , which means transform the last dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as,
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1/n`).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `fft()` function. The default is the same shape as `input`.
If `n` is given, the size of the `dim` axis is changed to `n`.
When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
When the input is float64 or complex128, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If `n` or `dim` type is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `n` is less than 1.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
>>> out = ops.fft(input, n=4, dim=-1, norm="backward")
>>> print(out)
[-0.5885514+0.j 2.1525173-0.46121222j 2.7808986+0.j
2.1525173+0.46121222j]
"""
return fft_op(input, n, dim, norm)
def fftfreq(n, d=1.0, dtype=None):
r"""
Computes the discrete Fourier Transform sample frequencies for a signal of size `n`.
For instance, Given a length `n` and a sample spacing `d` , the returned result `f` is:
.. math::
f = [0, 1, ..., (n - 1) // 2, -(n // 2), ..., -1] / (d * n)
Note:
- `fftfreq` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `fftfreq` is not supported on Windows platform yet.
Args:
n (int): Window length.
d (float, optional): Sample spacing (inverse of the sampling rate). Default: ``1.0`` .
dtype (mindspore.dtype, optional): The dtype of the returned frequencies. Default: ``None`` represents float32.
Returns:
Tensor, Array of length ``n`` containing the sample frequencies.
Raises:
ValueError: If `n` is less than 1.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import ops
>>> out = ops.fftfreq(n=4, d=1.0)
>>> print(out)
[ 0. 0.25 -0.5 -0.25]
"""
return fftfreq_op(n, d, dtype)
def fftn(input, s=None, dim=None, norm=None):
r"""
Computes the N dimensional discrete Fourier transform of `input`.
Note:
- `fftn` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `fftn` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `fftn`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `fftn`.
Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1/n`).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `fftn()` function. The default is the same shape as `input`.
If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
When the input is float64 or complex128, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((2, 2, 2))
>>> out = ops.fftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
>>> print(out)
[[[8.+0.j 0.+0.j]
[0.+0.j 0.+0.j]]
[[0.+0.j 0.+0.j]
[0.+0.j 0.+0.j]]]
"""
return fftn_op(input, s, dim, norm)
def fftshift(input, dim=None):
r"""
Shift the zero-frequency component to the center of the spectrum.
Note:
- `fftshift` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `fftshift` is not supported on Windows platform yet.
Args:
input (Tensor): Input tensor.
dim (Union[int, list(int), tuple(int)], optional): The dimensions which to shift.
Default is ``None``, which shifts all dimensions.
Returns:
output (Tensor), the shifted tensor with the same shape and dtype as `input`.
Raises:
TypeError: If `input` is not a tensor.
TypeError: If the type/dtype of `dim` is not int.
ValueError: If `dim` is out of the range of :math:`[-input.ndim, input.ndim)`.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> from mindspore.ops import fftshift
>>> from mindspore import Tensor
>>> from mindspore import dtype as mstype
>>> input = Tensor([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], dtype=mstype.int32)
>>> fftshift(input)
Tensor(shape=[10], dtype=Int32, value= [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4])
"""
return fftshift_op(input, dim)
[文档]def flatten_ext(input, start_dim=0, end_dim=-1):
r"""
Flatten a tensor along dimensions from `start_dim` to `end_dim`.
Args:
input (Tensor): The input Tensor.
start_dim (int, optional): The first dimension to flatten. Default: ``0`` .
end_dim (int, optional): The last dimension to flatten. Default: ``-1`` .
Returns:
Tensor. If no dimensions are flattened, returns the original `input`, otherwise return the flattened Tensor.
If `input` is a 0-dimensional Tensor, a 1-dimensional Tensor will be returned.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `start_dim` or `end_dim` is not int.
ValueError: If `start_dim` is greater than `end_dim` after canonicalized.
ValueError: If `start_dim` or `end_dim` is not in range of [-input.dim, input.dim-1].
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> output = ops.auto_generate.flatten_ext(input_x)
>>> print(output.shape)
(24,)
"""
return flatten_ext_op(input, start_dim, end_dim)
[文档]def floor_divide(input, other):
r"""
Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
When the inputs are two tensors,
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
When the inputs are one tensor and one scalar,
the scalar could only be a constant.
.. math::
out_{i} = \text{floor}( \frac{input_i}{other_i})
where the :math:`floor` indicates the Floor operator, for more details,
please refer to the :class:`mindspore.ops.Floor` operator.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Union[Tensor, Number, bool]): The first input is a number or
a bool or a tensor whose data type is number or bool.
other (Union[Tensor, Number, bool]): The second input is a number or
a bool or a tensor whose data type is number or bool.
Returns:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Raises:
TypeError: If `input` and `other` are not the following: Tensor, number.Number or bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> import numpy as np
>>> input = Tensor(np.array([2, 4, -1]), mindspore.int32)
>>> other = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> output = ops.floor_divide(input, other)
>>> print(output)
[ 0 1 -1]
>>> input = Tensor(2.0, mindspore.float32)
>>> other = Tensor(2.0, mindspore.float32)
>>> output = ops.floor_divide(input, other)
>>> print(output)
1.0
"""
return floor_div_op(input, other)
[文档]def floor_mod(x, y):
r"""
Computes the remainder of division element-wise. It's a flooring divide.
E.g. :math:`floor(x / y) * y + mod(x, y) = x`.
Inputs of `x` and `y` comply with the implicit type conversion rules to make the data types consistent.
When the inputs are two tensors,
dtypes of them cannot be both bool, and the shapes of them could be broadcast.
When the inputs are one tensor and one scalar,
the scalar could only be a constant.
.. math::
out_{i} =\text{floor}(x_{i} // y_{i})
where the :math:`floor` indicates the Floor operator, for more details,
please refer to the :class:`mindspore.ops.Floor` operator.
.. warning::
- Data of input `y` should not be 0, or the maximum value of its dtype will be returned.
- When the elements of input exceeds 2048 , the accuracy of operator cannot guarantee the requirement of
double thousandths in the mini form.
- Due to different architectures, the calculation results of this operator on NPU and CPU may be inconsistent.
- If shape is expressed as :math:`(D1, D2 ..., Dn)`, then D1\*D2... \*DN<=1000000,n<=8.
Args:
x (Union[Tensor, Number, bool]): The first input is a number or
a bool or a tensor whose data type is number or bool.
y (Union[Tensor, Number, bool]): The second input is a number or
a bool or a tensor whose data type is number or bool.
Returns:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision of the two inputs.
Raises:
TypeError: If neither `x` nor `y` is a Tensor, number.Number, or bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([2, 4, -1]), mindspore.int32)
>>> y = Tensor(np.array([3, 3, 3]), mindspore.int32)
>>> output = ops.floor_mod(x, y)
>>> print(output)
[2 1 2]
"""
return floor_mod_op(x, y)
[文档]def floor(input):
r"""
Rounds a tensor down to the closest integer element-wise.
.. math::
out_i = \lfloor input_i \rfloor
Args:
input (Tensor): The input tensor. Its supported data types are:
- Ascend: float16, float32, float64, bfloat16, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
- GPU/CPU: float16, float32, float64.
Returns:
Tensor, has the same shape as `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not support.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
>>> output = ops.floor(input)
>>> print(output)
[ 1. 2. -2.]
"""
return floor_op(input)
def frac_ext(input):
r"""
Calculates the fractional part of each element in the input.
.. math::
out_i = input_i - \lfloor |input_i| \rfloor * sgn(input_i)
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input Tensor.
Returns:
Tensor, has the same shape and type as input.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor([2, 4.2, -2.5], mindspore.float16)
>>> output = ops.frac_ext(x)
>>> print(output)
[ 0. 0.1992 -0.5 ]
"""
return frac_op(input)
[文档]def gather_d(x, dim, index):
r"""
Gathers elements along an axis specified by dim.
Refer to :func:`mindspore.ops.gather_elements` for more detail.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
>>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
>>> dim = 1
>>> output = ops.gather_d(x, dim, index)
>>> print(output)
[[1 1]
[4 3]]
"""
return gather_d_op(x, dim, index)
[文档]def gather_nd(input_x, indices):
r"""
Gathers slices from a tensor by indices.
Using given indices to gather slices from a tensor with a specified shape.
`indices` is an K-dimensional integer tensor.
Supposes it as a (K-1)-dimensional tensor and each element of it defines a slice of `input_x`:
.. math::
output[(i_0, ..., i_{K-2})] = input\_x[indices[(i_0, ..., i_{K-2})]]
The last dimension of `indices` can not more than the rank of `input_x`:
:math:`indices.shape[-1] <= input\_x.rank`.
Args:
input_x (Tensor): The target tensor to gather values.
indices (Tensor): The index tensor, with int32 or int64 data type.
Returns:
Tensor, has the same type as `input_x` and the shape is
:math:`indices\_shape[:-1] + input\_x\_shape[indices\_shape[-1]:]`.
Raises:
ValueError: If length of shape of `input_x` is less than the last dimension of `indices`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
>>> output = ops.gather_nd(input_x, indices)
>>> print(output)
[-0.1 0.5]
"""
return gather_nd_op(input_x, indices)
[文档]def gather(input_params, input_indices, axis, batch_dims=0):
r"""
Returns the slice of the input tensor corresponding to the elements of `input_indices` on the specified `axis`.
The following figure shows the calculation process of Gather commonly:
.. image:: ../images/Gather.png
where params represents the input `input_params`, and indices represents the index to be sliced `input_indices`.
.. note::
1. The value of input_indices must be in the range of `[0, input_param.shape[axis])`.
On CPU and GPU, an error is raised if an out of bound indice is found. On Ascend, the results may be
undefined.
2. The data type of input_params cannot be
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ on Ascend
platform currently.
Args:
input_params (Tensor): The original Tensor. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
input_indices (Tensor): Index tensor to be sliced, the shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
Specifies the indices of elements of the original Tensor. The data type can be int32 or int64.
axis (Union(int, Tensor[int])): Specifies the dimension index to gather indices.
It must be greater than or equal to `batch_dims`.
When `axis` is a Tensor, the size must be 1.
batch_dims (int): Specifies the number of batch dimensions. It must be less than or euqal to the rank
of `input_indices`. Default: ``0`` .
Returns:
Tensor, the shape of tensor is
:math:`input\_params.shape[:axis] + input\_indices.shape[batch\_dims:] + input\_params.shape[axis + 1:]`.
Raises:
TypeError: If `axis` is not an int or Tensor.
ValueError: If `axis` is a Tensor and its size is not 1.
TypeError: If `input_params` is not a tensor.
TypeError: If `input_indices` is not a tensor of type int.
RuntimeError: If `input_indices` is out of range `[0, input_param.shape[axis])` on CPU or GPU.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> # case1: input_indices is a Tensor with shape (5, ).
>>> input_params = Tensor(np.array([1, 2, 3, 4, 5, 6, 7]), mindspore.float32)
>>> input_indices = Tensor(np.array([0, 2, 4, 2, 6]), mindspore.int32)
>>> axis = 0
>>> output = ops.gather(input_params, input_indices, axis)
>>> print(output)
[1. 3. 5. 3. 7.]
>>> # case2: input_indices is a Tensor with shape (2, 2). When the input_params has one dimension,
>>> # the output shape is equal to the input_indices shape.
>>> input_indices = Tensor(np.array([[0, 2], [2, 6]]), mindspore.int32)
>>> axis = 0
>>> output = ops.gather(input_params, input_indices, axis)
>>> print(output)
[[1. 3.]
[3. 7.]]
>>> # case3: input_indices is a Tensor with shape (2, ) and
>>> # input_params is a Tensor with shape (3, 4) and axis is 0.
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
>>> input_indices = Tensor(np.array([0, 2]), mindspore.int32)
>>> axis = 0
>>> output = ops.gather(input_params, input_indices, axis)
>>> print(output)
[[ 1. 2. 3. 4.]
[ 9. 10. 11. 12.]]
>>> # case4: input_indices is a Tensor with shape (2, ) and
>>> # input_params is a Tensor with shape (3, 4) and axis is 1, batch_dims is 1.
>>> input_params = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]), mindspore.float32)
>>> input_indices = Tensor(np.array([0, 2, 1]), mindspore.int32)
>>> axis = 1
>>> batch_dims = 1
>>> output = ops.gather(input_params, input_indices, axis, batch_dims)
>>> print(output)
[ 1. 7. 10.]
"""
gather_op = _get_cache_prim(Gather)(batch_dims)
return gather_op(input_params, input_indices, axis)
[文档]def gcd(input, other):
r"""
Computes greatest common divisor of input tensors element-wise.
The shape of two inputs should be broadcastable, and data types should be one of: int16 (supported when using the Ascend backend, GRAPH mode is only supported when the graph compilation level is O0), int32, int64.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The first input tensor.
other (Tensor): The second input tensor.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is one
with higher precision in the two inputs.
Raises:
TypeError: If data type `input` or `other` is not int32 or int64.
ValueError: If shapes of two inputs are not broadcastable.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([7, 8, 9]))
>>> other = Tensor(np.array([14, 6, 12]))
>>> y = ops.gcd(input, other)
>>> print(y)
[7 2 3]
"""
return gcd_op(input, other)
[文档]def geqrf(input):
r"""
Decomposes a matrix into the product of an orthogonal matrix `Q` and an upper triangular matrix `R`.
The process is called QR decomposition: :math:`A = QR`.
Both `Q` and `R` matrices are stored in the same output tensor `y`.
The elements of `R` are stored on and above the diagonal, whereas elementary reflectors
(or Householder vectors) implicitly defining matrix `Q` are stored below the diagonal.
This function returns two tensors (`y`, `tau`).
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): Tensor of shape :math:`(*, m, n)`, input must be a matrix greater than or equal to 2D,
with dtype of float32, float64, complex64, complex128.
Returns:
- **y** (Tensor) - Tensor of shape :math:`(*, m, n)`, has the same dtype as the `input`.
- **tau** (Tensor) - Tensor of shape :math:`(*, p)` and :math:`p = min(m, n)`, has the same dtype as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If the dtype of `input` is not float32, float64, complex64 or complex128.
ValueError: If dimension of `input` is less than 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[-2.0, -1.0], [1.0, 2.0]]).astype(np.float32))
>>> y, tau = ops.geqrf(input)
>>> print(y)
[[ 2.236068 1.7888544]
[-0.236068 1.3416407]]
>>> print(tau)
[1.8944271 0. ]
"""
return geqrf_op(input)
[文档]def greater_equal(input, other):
r"""
Given two Tensors, compares them element-wise to check if each element in the first
Tensor is greater than or equal to the corresponding element in the second Tensor.
Refer to :func:`mindspore.ops.ge` for more details.
Args:
input (Union[Tensor, Number]): The first input is a number or
a bool or a tensor whose data type is number or bool.
other (Union[Tensor, Number]): When the first input is a Tensor, the second input should be a Number or Tensor with data type number or bool.
When the first input is a Scalar, the second input must be a Tensor with data type number or bool.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> output = ops.greater_equal(input, other)
>>> print(output)
[True True False]
"""
return greater_equal_op(input, other)
[文档]def greater(input, other):
r"""
Compare the value of the input parameters :math:`input > other` element-wise, and the output result is a bool value.
Refer to :func:`mindspore.ops.gt` for more details.
Args:
input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ .
other (Union[Tensor, Number]): The second input, which is a Number or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> output = ops.greater(input, other)
>>> print(output)
[False True False]
"""
return greater_op(input, other)
def hardtanh(input, min_val=-1, max_val=1):
r"""
"""
return hardtanh_op(input, min_val, max_val)
def hfft2(input, s=None, dim=(-2, -1), norm=None):
r"""
Calculates the two dimensional discrete Fourier transform of of a Hermitian symmetric `input`.
Note:
- `hfft2` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `hfft2` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `hfft2`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `hfft2`.
Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1/n`).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `hfft2()` function.
If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
result.shape[dim[-1]] is :math:`(s[-1] - 1) * 2`, otherwise :math:`(input.shape[dim[-1]] - 1) * 2`.
When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
When the input is float64 or complex128, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((4, 4))
>>> out = ops.hfft2(input, s=(4, 4), dim=(0, 1), norm="backward")
>>> print(out)
[[16. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]]
"""
return hfft2_op(input, s, dim, norm)
def hfft(input, n=None, dim=-1, norm=None):
r"""
Calculates the one dimensional discrete Fourier transform of of a Hermitian symmetric `input` signal.
Note:
- `hfft` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `hfft` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
n (int, optional): Length of the transformed `dim` of the result.
If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `hfft`.
Default: ``None`` , which does not need to process `input`.
dim (int, optional): The dimension along which to take the one dimensional `hfft`.
Default: ``-1`` , which means transform the last dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as,
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1/n`).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `hfft()` function.
If `n` is given, result.shape[dim] is :math:`(n - 1) * 2`, otherwise math:`(input.shape[dim] - 1) * 2`.
When the `input` is int16, int32, int64, float16, float32, complex64, the return value type is float32.
When the `input` is float64 or complex128, the return value type is float64.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If `n` or `dim` type is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `n` is less than 1.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
>>> out = ops.hfft(input, n=4, dim=-1, norm="backward")
>>> print(out)
[-0.12733912 2.1525173 2.3196864 2.1525173 ]
"""
return hfft_op(input, n, dim, norm)
def hfftn(input, s=None, dim=None, norm=None):
r"""
Calculates the N dimensional discrete Fourier transform of of a Hermitian symmetric `input`.
Note:
- `hfftn` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `hfftn` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `hfftn`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `hfftn`.
Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1/n`).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `hfftn()` function.
If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
result.shape[dim[-1]] is :math:`(s[-1] - 1) * 2`, otherwise :math:`(input.shape[dim[-1]] - 1) * 2`.
When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
When the input is float64 or complex128, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((4, 4))
>>> out = ops.hfftn(input, s=(4, 4), dim=(0, 1), norm="backward")
>>> print(out)
[[16. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]]
"""
return hfftn_op(input, s, dim, norm)
def histc_ext(input, bins=100, min=0, max=0):
r"""
Computes the histogram of a tensor.
The elements are sorted into equal width bins between `min` and `max`.
If `min` and `max` are both zero, the minimum and maximum values of the data are used.
Elements lower than min or higher than max are ignored.
.. warning::
This is an experimental API that is subject to change or deletion.
If input is int64, valid values fit within int32; exceeding this may cause precision errors.
Args:
input (Tensor): the input tensor.
bins (int, optional): Number of histogram bins, optional. If specified, must be positive. Default: ``100`` .
min (int, float, optional): the lower end of the range (inclusive), optional. Default: ``0`` .
max (int, float, optional): the upper end of the range (inclusive), optional. Default: ``0`` .
Returns:
A 1-D Tensor, has the same type as `input` with the shape :math:`(bins, )`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `input` datatype is not in support list.
TypeError: If attr `min` or `max` is not float or int.
TypeError: If attr `bins` is not int.
ValueError: If attr value `min` > `max`.
ValueError: If attr `bins` <= 0.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import Tensor, ops
>>> x = Tensor([1., 2, 1])
>>> y = ops.histc_ext(x, bins=4, min=0, max=3)
>>> print(y)
[0 2 1 0]
"""
return histc_ext_op(input, bins, min, max)
[文档]def hardshrink(input, lambd=0.5):
r"""
Hard Shrink activation function. Calculates the output according to the input elements.
The formula is defined as follows:
.. math::
\text{HardShrink}(x) =
\begin{cases}
x, & \text{ if } x > \lambda \\
x, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
HShrink Activation Function Graph:
.. image:: ../images/HShrink.png
:align: center
Args:
input (Tensor): The input of Hard Shrink. Supported dtypes:
- Ascend: float16, float32, bfloat16.
- CPU/GPU: float16, float32.
lambd (number, optional): The threshold :math:`\lambda` defined by the Hard Shrink formula.
Default: ``0.5`` .
Returns:
Tensor, has the same data type and shape as the input `input`.
Raises:
TypeError: If `lambd` is not a float, int or bool.
TypeError: If `input` is not a tensor.
TypeError: If dtype of `input` is not float16, float32 or bfloat16.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[0.5, 1, 2.0], [0.0533, 0.0776, -2.1233]]), mindspore.float32)
>>> output = ops.hardshrink(input)
>>> print(output)
[[ 0. 1. 2. ]
[ 0. 0. -2.1233]]
"""
return hshrink_impl(input, lambd)
[文档]def hardsigmoid(input):
r"""
Hard Sigmoid activation function. Calculates the output according to the input elements.
Hard Sigmoid is defined as:
.. math::
\text{Hardswish}(input) =
\begin{cases}
0, & \text{ if } input \leq -3, \\
1, & \text{ if } input \geq +3, \\
input/6 + 1/2, & \text{ otherwise }
\end{cases}
HSigmoid Activation Function Graph:
.. image:: ../images/HSigmoid.png
:align: center
Args:
input (Tensor): The input Tensor.
Returns:
Tensor, with the same type and shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `input` is neither int nor float.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> output = ops.hardsigmoid(input)
>>> print(output)
[0.3333 0.1666 0.5 0.8335 0.6665]
"""
return hsigmoid_op(input)
[文档]def hardswish(input):
r"""
Hard Swish activation function. The input is a Tensor with any valid shape.
Hard swish is defined as:
.. math::
\text{Hardswish}(input) =
\begin{cases}
0, & \text{ if } input \leq -3, \\
input, & \text{ if } input \geq +3, \\
input*(input + 3)/6, & \text{ otherwise }
\end{cases}
HSwish Activation Function Graph:
.. image:: ../images/HSwish.png
:align: center
Args:
input (Tensor): The input Tensor.
Returns:
Tensor, with the same type and shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `input` is neither int nor float.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> output = ops.hardswish(input)
>>> print(output)
[-0.3333 -0.3333 0 1.667 0.6665]
"""
return hswish_op(input)
[文档]def deepcopy(input_x):
r"""
Returns a deepcopy of input tensor.
Args:
input_x (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Returns:
Tensor, a deepcopy of `input_x`.
Raises:
TypeError: If `input_x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
>>> output = ops.deepcopy(input)
>>> print(output)
[[0 1]
[2 1]]
"""
return identity_op(input_x)
def ifft2(input, s=None, dim=(-2, -1), norm=None):
r"""
Computes the two dimensional inverse discrete Fourier transform of `input`.
Note:
- `ifft2` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `ifft2` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifft2`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `ifft2`.
Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (normalize by :math:`1/n`).
- ``"forward"`` (no normalization).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `ifft2()` function. The default is the same shape as `input`.
If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
When the input is float64 or complex128, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((4, 4))
>>> out = ops.ifft2(input, s=(4, 4), dim=(0, 1), norm="backward")
>>> print(out)
[[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 0.+0.j]]
"""
return ifft2_op(input, s, dim, norm)
def ifft(input, n=None, dim=-1, norm=None):
r"""
Calculates the inverse of `fft()`.
Note:
- `ifft` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `ifft` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
n (int, optional): Length of the transformed `dim` of the result.
If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `ifft`.
Default: ``None`` , which does not need to process `input`.
dim (int, optional): The dimension along which to take the one dimensional `ifft`.
Default: ``-1`` , which means transform the last dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as,
- ``"backward"`` (normalize by :math:`1/n`).
- ``"forward"`` (no normalization).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `ifft()` function. The default is the same shape as `input`.
If `n` is given, the size of the `dim` axis is changed to `n`.
When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
When the input is float64 or complex128, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If `n` or `dim` type is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `n` is less than 1.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
>>> out = ops.ifft(input, n=4, dim=-1, norm="backward")
>>> print(out)
[-0.14713785+0.j 0.5381293 +0.11530305j 0.69522464+0.j
0.5381293 -0.11530305j]
"""
return ifft_op(input, n, dim, norm)
def ifftn(input, s=None, dim=None, norm=None):
r"""
Computes the N dimensional inverse discrete Fourier transform of `input`.
Note:
- `ifftn` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `ifftn` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ifftn`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `ifftn`.
Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (normalize by :math:`1/n`).
- ``"forward"`` (no normalization).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `ifftn()` function. The default is the same shape as `input`.
If `s` is given, the size of the `dim[i]` axis is changed to `s[i]`.
When the input is int16, int32, int64, float16, float32, complex64, the return value type is complex64.
When the input is float64 or complex128, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((2, 2, 2))
>>> out = ops.ifftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
>>> print(out)
[[[1.+0.j 0.+0.j]
[0.+0.j 0.+0.j]]
[[0.+0.j 0.+0.j]
[0.+0.j 0.+0.j]]]
"""
return ifftn_op(input, s, dim, norm)
def ifftshift(input, dim=None):
r"""
The inverse of :func:`mindspore.ops.fftshift` .
Note:
- `ifftshift` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `ifftshift` is not supported on Windows platform yet.
Args:
input (Tensor): Input tensor.
dim (Union[int, list(int), tuple(int)], optional): The dimensions which to shift.
Default is ``None``, which shifts all dimensions.
Returns:
output (Tensor), the shifted tensor with the same shape and dtype as `input`.
Raises:
TypeError: If `input` is not a tensor.
TypeError: If the type/dtype of `dim` is not int.
ValueError: If `dim` is out of the range of :math:`[-input.ndim, input.ndim)`.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> from mindspore.ops import fftshift, ifftshift
>>> from mindspore import Tensor
>>> from mindspore import dtype as mstype
>>> input = Tensor([0, 1, 2, 3, 4, -5, -4, -3, -2, -1], dtype=mstype.int32)
>>> ifftshift(fftshift(input))
Tensor(shape=[10], dtype=Int32, value= [ 0, 1, 2, 3, 4, -5, -4, -3, -2, -1])
"""
return ifftshift_op(input, dim)
def ihfft2(input, s=None, dim=(-2, -1), norm=None):
r"""
Computes the two dimensional inverse discrete Fourier transform of real `input`.
Note:
- `ihfft2` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `ihfft2` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ihfft2`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `ihfft2`.
Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (normalize by :math:`1/n`).
- ``"forward"`` (no normalization).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `ihfft2()` function.
If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`, otherwise :math:`input.shape[dim[-1]] // 2 + 1`.
When the input is int16, int32, int64, float16, float32, the return value type is complex64.
When the input is float64, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((4, 4))
>>> out = ops.ihfft2(input, s=(4, 4), dim=(0, 1), norm="backward")
>>> print(out)
[[1.-0.j 0.-0.j 0.-0.j]
[0.-0.j 0.-0.j 0.-0.j]
[0.-0.j 0.-0.j 0.-0.j]
[0.-0.j 0.-0.j 0.-0.j]]
"""
return ihfft2_op(input, s, dim, norm)
def ihfft(input, n=None, dim=-1, norm=None):
r"""
Calculates the inverse of `hfft()`.
Note:
- `ihfft` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `ihfft` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64.
n (int, optional): Length of the transformed `dim` of the result.
If given, the size of the `dim` axis will be zero-padded or truncated to `n` before calculating `ihfft`.
Default: ``None`` , which does not need to process `input`.
dim (int, optional): The dimension along which to take the one dimensional `ihfft`.
Default: ``-1`` , which means transform the last dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as,
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1*n`).
- ``"ortho"`` (normalize by :math:`1*\sqrt{n}`).
Returns:
Tensor, The result of `ihfft()` function.
If `n` is given, result.shape[dim] is :math:`n // 2 + 1`, otherwise math:`input.shape[dim] // 2 + 1`.
When the input is int16, int32, int64, float16, float32, the return value type is complex64.
When the input is float64, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
TypeError: If `n` or `dim` type is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `n` is less than 1.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([ 1.6243454, -0.6117564, -0.5281718, -1.0729686])
>>> out = ops.ihfft(input, n=4, dim=-1, norm="backward")
>>> print(out)
[-0.14713785-0.j 0.5381293 +0.11530305j 0.69522464-0.j ]
"""
return ihfft_op(input, n, dim, norm)
def ihfftn(input, s=None, dim=None, norm=None):
r"""
Computes the N dimensional inverse discrete Fourier transform of real `input`.
Note:
- `ihfftn` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `ihfftn` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `ihfftn`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `ihfftn`.
Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (normalize by :math:`1/n`).
- ``"forward"`` (no normalization).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `ihfftn()` function.
If `s` is given, result.shape[dim[i]] is s[i], and for the last transformed dim,
result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`, otherwise :math:`input.shape[dim[-1]] // 2 + 1`.
When the input is int16, int32, int64, float16, float32, the return value type is complex64.
When the input is float64, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((4, 4))
>>> out = ops.ihfftn(input, s=(4, 4), dim=(0, 1), norm="backward")
>>> print(out)
[[16. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]]
"""
return ihfftn_op(input, s, dim, norm)
[文档]def unfold_ext(input, kernel_size, dilation=1, padding=0, stride=1):
r"""
Extracts sliding local blocks from a batched input tensor.
Consider a batched input tensor of shape :math:`(N, C, *)`,
where :math:`N` is the batch dimension, :math:`C` is the channel dimension,
and :math:`*` represent arbitrary spatial dimensions. This operation flattens
each sliding `Kernel_size`- sized block within the spatial dimensions
of `input` into a column (i.e., last dimension) of a 3-D output
tensor of shape :math:`(N, C \times \prod(\text{kernel_size}), L)`, where
:math:`C \times \prod(\text{kernel_size})` is the total number of values
within each block (a block has :math:`\prod(\text{kernel_size})` spatial
locations each containing a `C`-channeled vector), and :math:`L` is
the total number of such blocks:
.. math::
L = \prod_d \left\lfloor\frac{\text{spatial_size}[d] + 2 \times \text{padding}[d] %
- \text{dilation}[d] \times (\text{kernel_size}[d] - 1) - 1}{\text{stride}[d]} + 1\right\rfloor,
where :math:`\text{spatial_size}` is formed by the spatial dimensions
of `input` (:math:`*` above), and :math:`d` is over all spatial
dimensions.
Therefore, indexing `output` at the last dimension (column dimension)
gives all values within a certain block.
The `dilation`, `padding` and `stride` arguments specify
how the sliding blocks are retrieved.
.. warning::
- Currently, batched(4D) image-like tensors are supported.
- For Ascend, it is only supported on platforms above Atlas A2.
Args:
input (Tensor): 4-D Tensor.
kernel_size (Union[int, tuple[int], list[int]]): The size of the kernel, should be two int
for height and width. If type is int, it means that height equal with width. Must be specified.
dilation (Union[int, tuple[int], list[int]], optional): The dilation of the window, should be two int
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
padding (Union[int, tuple[int], list[int]], optional): The pad of the window, should be two int
for height and width. If type is int, it means that height equal with width. Default: ``0`` .
stride (Union[int, tuple[int], list[int]], optional): The stride of the window, should be two int
for height and width. If type is int, it means that height equal with width. Default: ``1`` .
Returns:
A Tensor, with same type as `input` .
Shape:
- Input: :math:`(N, C, *)`
- Output: :math:`(N, C \times \prod(\text{kernel_size}), L)`
Raises:
TypeError: If any data type of `kernel_size`, `stride`, `dilation`, `padding` is not int, tuple or list.
ValueError: If `kernel_size`, `dilation`, `stride` value is not
greater than zero or elements number more than `2`.
ValueError: If `padding` value is less than zero.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.rand(4, 4, 32, 32), mindspore.float32)
>>> output = ops.auto_generate.unfold_ext(x, kernel_size=3, dilation=1, stride=1)
>>> print(output.shape)
(4, 36, 900)
"""
return im2col_ext_op(input, kernel_size, dilation, padding, stride)
def index_fill_scalar(input, dim, index, value):
r"""
"""
return index_fill_scalar_op(input, dim, index, value)
def index_fill_tensor(input, dim, index, value):
r"""
"""
return index_fill_tensor_op(input, dim, index, value)
def index(input, indices):
r"""
Index the Tensor using an `indices`.
.. warning::
This is an experimental optimizer API that is subject to change.
Args:
input (Tensor): The input Tensor.
indices (tuple[Tensor], list[Tensor]): the indices of type is bool, uint8, int32 or int64, used to index into the `input`.
The size of indices should <= the rank of `input` and the tensors in indices should be broadcastable.
When the tensor types are bool and uint8, shape will match the input dimensions in turn. For example: the first tensor of `indices` is of type bool,
Shape(x, y), `input` Shape(a, b, c), and (x, y) needs to match (a, b).
Returns:
Tensor, has the same dtype as input Tensor.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
TypeError: If the dtype of tensors in `indices` is not bool, uint8, int32 or int64.
ValueError: If the tensors in `indices` is not be broadcastable.
ValueError: If size(`indices`) > rank(`input`).
ValueError: If rank of `input` = 0.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input1 = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.int32)
>>> indices1 = Tensor(np.array([0, 1, 1]), mindspore.int32)
>>> indices2 = Tensor(np.array([1, 2, 1]), mindspore.int32)
>>> output = ops.auto_generate.index(input1, [indices1, indices2])
>>> print(output)
[2 6 5]
>>> input2 = Tensor(np.arange(4 * 3 * 3).reshape(4, 3, 3), mindspore.int32)
>>> indices3 = Tensor(np.array([1, 0]), mindspore.int32)
>>> indices4 = Tensor(np.array([1, 1, 0]), mindspore.bool_)
>>> output2 = ops.auto_generate.index(input2, [indices3, indices4])
>>> print(output2)
[[ 9 10 11]
[ 3 4 5]]
"""
return index_op(input, indices)
[文档]def index_select_ext(input, dim, index):
r"""
Generates a new Tensor that accesses the values of `input` along the specified `dim` dimension
using the indices specified in `index`. The new Tensor has the same number of dimensions as `input`,
with the size of the `dim` dimension being equal to the length of `index`, and the size of all other
dimensions will be unchanged from the original `input` Tensor.
.. note::
The value of index must be in the range of `[0, input.shape[dim])`, the result is undefined out of range.
Args:
input (Tensor): The input Tensor.
dim (int): The dimension to be indexed.
index (Tensor): A 1-D Tensor with the indices.
Returns:
Tensor, has the same dtype as input Tensor.
Raises:
TypeError: If `input` or `index` is not a Tensor.
TypeError: If `dim` is not int number.
ValueError: If the value of `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
ValueError: If the dimension of `index` is not equal to 1.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> import numpy as np
>>> input = Tensor(np.arange(16).astype(np.float32).reshape(2, 2, 4))
>>> print(input)
[[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
[[ 8. 9. 10. 11.]
[12. 13. 14. 15.]]]
>>> index = Tensor([0,], mindspore.int32)
>>> y = ops.auto_generate.index_select_ext(input, 1, index)
>>> print(y)
[[[ 0. 1. 2. 3.]]
[[ 8. 9. 10. 11.]]]
"""
return index_select_op(input, dim, index)
def inplace_add_ext(input, other, alpha=1):
r"""
"""
return inplace_add_ext_op(input, other, alpha)
def inplace_adds_ext(input, other, alpha=1):
r"""
"""
return inplace_adds_ext_op(input, other, alpha)
def inplace_clamp_scalar(input, min=None, max=None):
r"""
"""
return inplace_clamp_scalar_op(input, min, max)
def inplace_clamp_tensor(input, min=None, max=None):
r"""
"""
return inplace_clamp_tensor_op(input, min, max)
def inplace_copy(variable, value):
r"""
"""
return inplace_copy_op(variable, value)
def div_tensor_(input, other):
r"""
"""
return inplace_div_op(input, other)
def divmod_tensor_(input, other, rounding_mode=None):
r"""
"""
return inplace_divmod_op(input, other, rounding_mode)
def divmod_scalar_(input, other, rounding_mode=None):
r"""
"""
return inplace_divmods_op(input, other, rounding_mode)
def div_scalar_(input, other):
r"""
"""
return inplace_divs_op(input, other)
def inplace_fill_scalar(input, value):
r"""
"""
return inplace_fill_scalar_op(input, value)
def inplace_fill_tensor(input, value):
r"""
"""
return inplace_fill_tensor_op(input, value)
def floor_(input):
r"""
"""
return inplace_floor_op(input)
def inplace_hardtanh(input, min_val=-1, max_val=1):
r"""
Update the `input` tensor in-place by computing the hardtanh activation function `input`, The activation
function is defined as:
.. math::
\text{hardtanh}(input) = \begin{cases}
max\_val, & \text{ if } input > max\_val \\
min\_val, & \text{ if } input < min\_val \\
input, & \text{ otherwise. }
\end{cases}
Linear region range :math:`[min\_val, max\_val]` can be adjusted using `min_val` and `max_val`.
Hardtanh Activation Function Graph:
.. image:: ../images/Hardtanh.png
:align: center
.. warning::
This is an experimental optimizer API that is subject to change.
Args:
input (Tensor): Input Tensor.
min_val (Union[bool, int, float], optional): Minimum value of the linear region range. Default: ``-1.0`` .
max_val (Union[bool, int, float], optional): Maximum value of the linear region range. Default: ``1.0`` .
Returns:
Tensor.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not one of: int8, int16, int32, int64, uint8, float16, float32, bfloat16.
TypeError: If dtype of `min_val` is neither float nor int.
TypeError: If dtype of `max_val` is neither float nor int.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, mint
>>> x = Tensor([-1, -2, 0, 2, 1], mindspore.float16)
>>> mint.hardtanh_(x, min_val=-1.0, max_val=1.0)
>>> print(x)
[-1. -1. 0. 1. 1.]
"""
return inplace_hardtanh_op(input, min_val, max_val)
def index_put_(input, indices, values, accumulate=False):
r"""
Returns a Tensor. According to the index number of `indices` ,
replace the value corresponding to the "self Tensor" with the value in `values`.
Args:
indices (tuple[Tensor], list[Tensor]): the indices of type is bool, uint8, int32 or int64,
used to index into the "self Tensor". The rank of tensors in indices should be 1-D,
size of indices should <= the rank of "self Tensor" and the tensors in indices should be broadcastable.
When the tensor types are bool and uint8, shape will match the input dimensions in turn.
For example: the first tensor of `indices` is of type bool, Shape(x, y), `input` Shape(a, b, c), and (x, y) needs to match (a, b).
values (Tensor): 1-D Tensor of the same type as "self Tensor". If size == 1, it will be broadcastable.
accumulate (bool, optional): If `accumulate` is True, the elements in values are added to "self Tensor",
else the elements in `values` replace the corresponding element in the "self Tensor".
Default: ``False``.
Returns:
Tensor, with the same type and shape as the "self Tensor".
Raises:
TypeError: If the dtype of the "self Tensor" is not equal to the dtype of `values`.
TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
TypeError: If the dtype of tensors in `indices` are not bool, uint8, int32 or int64.
TypeError: If the dtypes of tensors in `indices` are inconsistent.
TypeError: If the dtype of `accumulate` is not bool.
ValueError: If rank(`values`) is not 1-D.
ValueError: If size(`values`) is not 1 or max size of the tensors in `indices` when
rank("self Tensor") == size(`indices`).
ValueError: If size(`values`) is not 1 or "self Tensor".shape[-1] when
rank("self Tensor") > size(`indices`).
ValueError: If the rank of tensors in `indices` is not 1-D.
ValueError: If the tensors in `indices` is not be broadcastable.
ValueError: If size(`indices`) > rank("self Tensor").
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
>>> values = Tensor(np.array([3]).astype(np.int32))
>>> indices = [Tensor(np.array([0, 1, 1]).astype(np.int32)), Tensor(np.array([1, 2, 1]).astype(np.int32))]
>>> accumulate = True
>>> output = x.index_put_(indices, values, accumulate)
>>> print(output)
[[1 5 3]
[4 8 9]]
"""
return inplace_index_put_op(input, indices, values, accumulate)
def masked_fill_scalar_(input, mask, value):
r"""
"""
return inplace_masked_fill_scalar_op(input, mask, value)
def masked_fill_tensor_(input, mask, value):
r"""
"""
return inplace_masked_fill_tensor_op(input, mask, value)
def inplace_scatter_add(input, dim, index, src):
r"""
"""
return inplace_scatter_add_op(input, dim, index, src)
def sub_tensor_(input, other, alpha=1):
r"""
"""
return inplace_sub_ext_op(input, other, alpha)
def sub_scalar_(input, other, alpha=1):
r"""
"""
return inplace_sub_scalar_op(input, other, alpha)
def zero_(input):
r"""
"""
return inplace_zero_op(input)
def irfft2(input, s=None, dim=(-2, -1), norm=None):
r"""
Calculates the inverse of `rfft2()`.
Note:
- `irfft2` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `irfft2` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `irfft2`.
Default: ``None`` , the dim[-1] of the `input` will be zero-padded to :math:`2*(input.shape[dim[-1]]-1)`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `irfft2`.
Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (normalize by :math:`1/n`).
- ``"forward"`` (no normalization).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `irfft2()` function, result.shape[dim[i]] is s[i].
When the input is int16, int32, int64, float16, float32, complex64, the return value type is float32.
When the input is float64 or complex128, the return value type is float64.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((4, 4))
>>> ops.irfft2(input, s=(4, 4), dim=(0, 1), norm="backward")
Tensor(shape=[4, 4], dtype=Float32, value=
[[ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]])
"""
return irfft2_op(input, s, dim, norm)
def irfft(input, n=None, dim=-1, norm=None):
r"""
Calculates the inverse of `rfft()`.
Note:
- `irfft` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `irfft` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
n (int, optional): Length of the transformed `dim` of the result.
If given, the input will either be zero-padded or trimmed to this length before computing `irfft`.
If n is not given, it is taken to be :math:`2*(input.shape[dim]-1)`.
Default: ``None``.
dim (int, optional): The dimension along which to take the one dimensional `irfft`.
Default: ``-1``, which means transform the last dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"``.
Three modes are defined as,
- ``"backward"`` (normalize by :math:`1/n`).
- ``"forward"`` (no normalization).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, the result of `irfft()` function, dtype of the result is float32/64, result.shape[dim] is :math:`n`.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int16, int32, int64, float32, float64, complex64, complex128.
TypeError: If `n` or `dim` type is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `n` is less than 1.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"``.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([1, 2, 3, 4])
>>> y = ops.irfft(input, n=6, dim=-1, norm='backward')
>>> print(y)
[ 2.5 -0.6666667 0. -0.16666667 0. -0.6666667 ]
"""
return irfft_op(input, n, dim, norm)
def irfftn(input, s=None, dim=None, norm=None):
r"""
Calculates the inverse of `rfftn()`.
Note:
- `irfftn` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `irfftn` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64, complex64, complex128.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `irfftn`.
Default: ``None`` , the dim[-1] of the `input` will be zero-padded to :math:`2*(input.shape[dim[-1]]-1)`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `irfftn`.
Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (normalize by :math:`1/n`).
- ``"forward"`` (no normalization).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `irfftn()` function, result.shape[dim[i]] is s[i].
When the input is int16, int32, int64, float16, float32 the return value type is float32.
When the input is float64, the return value type is float64.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64, complex64, complex128.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((2, 2, 2))
>>> ops.irfftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
Tensor(shape=[2, 2, 2], dtype=Float32, value=
[[[ 1.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00]],
[[ 0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00]]])
"""
return irfftn_op(input, s, dim, norm)
[文档]def isfinite(x):
r"""
Determine which elements are finite for each position. If elements are not ``NaN`` , ``-INF`` , ``INF``,
they are finite.
.. math::
out_i = \begin{cases}
& \text{ if } x_{i} = \text{Finite},\ \ True \\
& \text{ if } x_{i} \ne \text{Finite},\ \ False
\end{cases}
Args:
x (Tensor): The input tensor.
Returns:
Tensor, has the same shape of input, and the dtype is bool.
Raises:
TypeError: If x is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> output = ops.isfinite(x)
>>> print(output)
[False True False]
>>> x = Tensor(2.1, mindspore.float64)
>>> output = ops.isfinite(x)
>>> print(output)
True
"""
return isfinite_op(x)
[文档]def isinf(input):
r"""
Determines which elements are inf or -inf for each position.
.. math::
out_i = \begin{cases}
& \ True,\ \text{ if } x_{i} = \text{Inf} \\
& \ False,\ \text{ if } x_{i} \ne \text{Inf}
\end{cases}
where :math:`Inf` means value is infinite.
.. warning::
- This is an experimental API that is subject to change.
- For Ascend, it is only supported on platforms above Atlas A2.
Args:
input (Tensor): The input tensor.
Returns:
Tensor, has the same shape of input, and the dtype is bool.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``CPU`` ``GPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> output = ops.isinf(x)
>>> print(output)
[False False True]
>>> x = Tensor(2.1, mindspore.float64)
>>> output = ops.isinf(x)
>>> print(output)
False
"""
return isinf_op(input)
def isneginf_ext(input):
r"""
Determines which elements are -inf for each position.
.. warning::
- This is an experimental API that is subject to change.
- This API can be used only on the Atlas A2 training series.
Args:
input (Tensor): Input Tensor.
Returns:
Tensor with the same shape as the input, where elements are `True` if the corresponding element in the `input` is negative infinity, and `False` otherwise.
Raises:
TypeError: If the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore import ops, Tensor
>>> from mindspore import dtype as mstype
>>> output = ops.isneginf(Tensor([[-float("inf"), float("inf")], [1, -float("inf")]], mstype.float32))
>>> print(output)
[[ True False]
[False True]]
"""
return isneginf_op(input)
[文档]def l1_loss_ext(input, target, reduction='mean'):
r"""
Calculate the mean absolute error between the `input` value and the `target` value.
Assuming that the :math:`x` and :math:`y` are the predicted value and target value,
both are one-dimensional tensors of length :math:`N`, length :math:`N`, `reduction` is set to ``'none'`` ,
then calculate the loss of :math:`x` and :math:`y` without dimensionality reduction.
The formula is as follows:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,
where :math:`N` is the batch size.
If `reduction` is ``'mean'`` or ``'sum'`` , then:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
\end{cases}
Args:
input (Tensor): Predicted value, Tensor of any dimension.
target (Tensor): Target value, usually has the same shape as the `input`.
If `input` and `target` have different shapes, make sure they can broadcast to each other.
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
``'sum'`` . Default: ``'mean'`` .
- ``'none'``: no reduction will be applied.
- ``'mean'``: compute and return the mean of elements in the output. Notice: At least one of the input and target is float type when the reduction is ``'mean'`` .
- ``'sum'``: the output elements will be summed.
Returns:
Tensor or Scalar, if `reduction` is ``'none'`` , return a Tensor with same shape and dtype as `input`.
Otherwise, a scalar value will be returned.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `target` is not a Tensor.
ValueError: If `reduction` is not one of ``'none'`` , ``'mean'`` or ``'sum'`` .
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], mstype.float32)
>>> target = Tensor([[6, 5, 4], [3, 2, 1]], mstype.float32)
>>> output = ops.l1_loss_ext(x, target, reduction="mean")
>>> print(output)
3.0
"""
return l1_loss_ext_op(input, target, reduction)
[文档]def leaky_relu_ext(input, negative_slope=0.01):
r"""
leaky_relu activation function. The element of `input` less than 0 times `negative_slope` .
The activation function is defined as:
.. math::
\text{leaky_relu}(input) = \begin{cases}input, &\text{if } input \geq 0; \cr
\text{negative_slope} * input, &\text{otherwise.}\end{cases}
where :math:`negative\_slope` represents the `negative_slope` parameter.
For more details, see `Rectifier Nonlinearities Improve Neural Network Acoustic Models
<https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`_.
LeakyReLU Activation Function Graph:
.. image:: ../images/LeakyReLU.png
:align: center
Args:
input (Tensor): The input of leaky_relu is a Tensor of any dimension.
negative_slope (Union[int, float]): Slope of the activation function when the element of `input` is less than 0.
Default: ``0.01`` .
Returns:
Tensor, has the same type and shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `negative_slope` is not a float or an int.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> print(ops.extend.leaky_relu_ext(input, negative_slope=0.2))
[[-0.2 4. -1.6]
[ 2. -1. 9. ]]
"""
return leaky_relu_ext_op(input, negative_slope)
def lerp(input, end, weight):
r"""
Does a linear interpolation of two tensors start and end based on a float or tensor weight.
If `weight` is a tensor, the shapes of three inputs need to be broadcast;
If `weight` is a float, the shapes of `input` and `end` need to be broadcast.
.. warning::
This is an experimental API that is subject to change or deletion.
.. math::
output_{i} = input_{i} + weight_{i} * (end_{i} - input_{i})
Args:
input (Tensor): The tensor with the starting points. Data type must be float16 or float32.
end (Tensor): The tensor with the ending points. Data type must be the same as `input`.
weight (Union[float, Tensor]): The weight for the interpolation formula. Must be a float scalar
or a tensor with float16 or float32 data type.
Returns:
Tensor, has the same type and shape as input `input`.
Raises:
TypeError: If `input` or `end` is not a tensor.
TypeError: If `weight` is neither scalar(float) nor tensor.
TypeError: If dtype of `input` or `end` is neither float16 nor float32.
TypeError: If dtype of `weight` is neither float16 nor float32 when it is a tensor.
TypeError: If `input` and `end` have different data types.
TypeError: If `input`, `end` and `weight` have different data types when `weight` is a tensor.
ValueError: If `end` could not be broadcast to a tensor with shape of `input`.
ValueError: If `weight` could not be broadcast to tensors with shapes of `input` and `end` when it is a tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
>>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
>>> output = ops.lerp(start, end, 0.5)
>>> print(output)
[5.5 6. 6.5 7. ]
"""
return lerp_op(input, end, weight)
def lerp_scalar(input, end, weight):
r"""
"""
return lerp_scalar_op(input, end, weight)
[文档]def less_equal(input, other):
r"""
Computes the boolean value of :math:`input <= other` element-wise.
.. math::
out_{i} =\begin{cases}
& \text{True, if } input_{i}<=other_{i} \\
& \text{False, if } input_{i}>other_{i}
\end{cases}
.. note::
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
consistent.
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
Args:
input (Union[Tensor, Number, bool]): The first input is a Number or a bool or a tensor whose data type is
number or bool\_.
other (Union[Tensor, Number, bool]): The second input is a Number or a bool or a tensor whose data type is
number or bool\_.
Returns:
Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
Raises:
TypeError: If neither `input` nor `other` is a Tensor, number.Number or bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> output = ops.less_equal(x, other)
>>> print(output)
[ True False True]
"""
return less_equal_op(input, other)
[文档]def less(input, other):
r"""
Computes the boolean value of :math:`input < other` element-wise.
The inputs of `input` and `other` follow implicit type conversion rules to ensure consistent data types.
When the inputs are a Tensor and a Scalar, the Scalar can only be a constant.
.. math::
out_{i} =\begin{cases}
& \text{True, if } input_{i}<other_{i} \\
& \text{False, if } input_{i}>=other_{i}
\end{cases}
Args:
input (Union[Tensor, Number, bool]): The first input is a number or
a bool or a tensor whose data type is number or bool.
other (Union[Tensor, Number, bool]): The second input is a number or
a bool or a tensor whose data type is number or bool.
Returns:
Tensor, the shape is the same as the one after broadcasting,and the data type is bool.
Raises:
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> other = Tensor(np.array([1, 1, 4]), mindspore.int32)
>>> output = ops.less(input, other)
>>> print(output)
[False False True]
"""
return less_op(input, other)
def log10_ext(input):
r"""
Returns the logarithm to the base 10 of a tensor element-wise.
.. math::
y_i = \log_10(x_i)
.. warning::
- This is an experimental API that is subject to change or deletion.
- If the input value of operator Log10 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
may be affacted.
Args:
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
Returns:
Tensor, has the same shape as the `input`, and the dtype changes according to the `input.dtype`.
- if `input.dtype` is in [float16, float32, float64, bfloat16], the output dtype is the same as the `input.dtype`.
- if `input.dtype` is integer or boolean type, the output dtype is float32.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, mint
>>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
>>> output = mint.log10(x)
>>> print(output)
[0.47712136 0.69897 0.845098 ]
"""
return log10_op(input)
[文档]def log1p(input):
r"""
Returns the natural logarithm of one plus the input tensor element-wise.
.. math::
out_i = \log_e(input_i + 1)
Args:
input (Tensor): The input tensor. The value must be greater than -1.
Returns:
Tensor, has the same shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> output = ops.log1p(x)
>>> print(output)
[0.6931472 1.0986123 1.609438 ]
"""
return log1p_op(input)
def log2_ext(input):
r"""
Returns the logarithm to the base 2 of a tensor element-wise.
.. math::
y_i = \log_2(x_i)
.. warning::
- This is an experimental API that is subject to change or deletion.
- If the input value of operator Log2 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
may be affacted.
Args:
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
Returns:
Tensor, has the same shape as the `input`, and the dtype changes according to the `input.dtype`.
- if `input.dtype` is in [float16, float32, float64, bfloat16], the output dtype is the same as the `input.dtype`.
- if `input.dtype` is integer or boolean type, the output dtype is float32.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, mint
>>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
>>> output = mint.log2(x)
>>> print(output)
[1.5849625 2.321928 2.807355 ]
"""
return log2_op(input)
[文档]def log(input):
r"""
Returns the natural logarithm of a tensor element-wise.
.. math::
y_i = \log_e(x_i)
.. warning::
If the input value of operator Log is within the range (0, 0.01] or [0.95, 1.05], the output accuracy may
be affacted.
Args:
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
Returns:
Tensor, has the same shape and dtype as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> output = ops.log(x)
>>> print(output)
[0. 0.6931472 1.3862944]
"""
return log_op(input)
def log_softmax_ext(input, dim=None, dtype=None):
r"""
Applies the Log Softmax function to the input tensor on the specified axis.
Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
the Log Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
where :math:`N` is the length of the Tensor.
Args:
input (Tensor): The input Tensor.
dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
Keyword Args:
dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If not set to None, the input
Tensor will be cast to `dtype` before the operation is performed. This is useful for preventing overflows.
If set to None, stay the same as original Tensor. Default: ``None`` . Supported data type is {float16, float32, double, bfloat16}.
Returns:
Tensor, with the same shape as the input.
Raises:
TypeError: If `dim` is not an int.
ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> output = ops.auto_generate.log_softmax(logits, dim=-1)
>>> print(output)
[-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
"""
return log_softmax_ext_op(input, dim, dtype)
[文档]def log_softmax(logits, axis=-1):
r"""
Applies the Log Softmax function to the input tensor on the specified axis.
Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
the Log Softmax function is shown as follows:
.. math::
\text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
where :math:`N` is the length of the Tensor.
Args:
logits (Tensor): The input Tensor, which is the :math:`x` in the formula above, it's shape is :math:`(N, *)`,
where :math:`*` means, any number of additional dimensions, with float16 or float32 data type.
axis (int): The axis to perform the Log softmax operation. Default: ``-1`` .
Returns:
Tensor, with the same type and shape as the logits.
Raises:
TypeError: If `axis` is not an int.
TypeError: If dtype of `logits` is neither float16 nor float32.
ValueError: If `axis` is not in range [-len(logits.shape), len(logits.shape)).
ValueError: If dimension of `logits` is less than 1.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> output = ops.log_softmax(logits)
>>> print(output)
[-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
"""
return log_softmax_impl(logits, axis)
def logaddexp_ext(input, other):
r"""
Computes the logarithm of the sum of exponentiations of the inputs.
This function is useful in statistics where the calculated probabilities of events may be
so small as to exceed the range of normal floating point numbers.
.. math::
out_i = \log(exp(input_i) + \exp(other_i))
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): Input Tensor. The dtype of `input` must be float.
other (Tensor): Input Tensor. The dtype of `other` must be float.
If the shape of `input` is not equal to the shape of `other`,
they must be broadcastable to a common shape (which becomes the shape of the output).
Returns:
Tensor, with the same dtype as `input` and `other`.
Raises:
TypeError: If `input` or `other` is not a Tensor.
TypeError: The dtype of `input` or `other` is not float.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x1 = Tensor(np.array([1, 2, 3]).astype(np.float16))
>>> x2 = Tensor(np.array(2).astype(np.float16))
>>> output = ops.logaddexp_ext(x1, x2)
>>> print(output)
[2.312 2.693 3.312]
"""
return logaddexp_op(input, other)
def logsigmoid_grad(dy, input, buffer):
r"""
"""
return logsigmoid_grad_op(dy, input, buffer)
def logsumexp_ext(input, dim, keepdim=False):
r"""
Computes the logarithm of the sum of exponentiations of all elements along the specified `dim` dimension of the `input` (with numerical stabilization), and retains the dimension based on the `keepdim` parameter.
.. math::
logsumexp(input) = \log(\sum(e^{input-input_{max}})) + input_{max}
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): Input Tensor.
dim (Union[int, tuple(int), list(int)], optional): The dimension to be reduced (the value should be within `[0, len(input.shape) - 1]`), when the `dim` is `()`, all dimensions are reduced.
keepdim (bool): Whether the output tensor retains the dimension `dim`, default: `False`.
Returns:
Tensor, the dtype changes according to the `input.dtype`, and the shape changes according to the values of `dim` and `keepdim`.
- If `input.dtype` is in [float16, float32, bfloat16], the output dtype is the same as the `input.dtype`.
- If `input.dtype` is an integer or boolean type, the output dtype is float32.
- If `dim` is (), and `keepdim` is False, the output is a 0-D tensor representing the logarithm of the sum of exponentiations of all elements in the `input` tensor.
- If `dim` is `1`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[2], ..., input.shape[n])`.
- If `dim` is `(1, 2)`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[3], ..., input.shape[n])`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not one of: bool, int8, int16, int32, int64, uint8, float16, float32, bfloat16.
TypeError: If `dim` is not an int or tuple(int) or list(list).
TypeError: If `keepdim` is not a bool.
ValueError: If the value of any elements of `dim` is not in the range `[0, len(input.shape) - 1]`.
RuntimeError: If any element of `dim` is repeated.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.logsumexp_ext(x, 1, keep_dims=True)
>>> print(output.shape)
(3, 1, 5, 6)
"""
return logsumexp_op(input, dim, keepdim)
[文档]def masked_fill(input_x, mask, value):
r"""
Fills elements of Tensor with value where mask is True.
The shapes of `input_x` and `mask` need to be the same or broadcastable.
Args:
input_x (Tensor): The source Tensor whose data type is one of bool, uint8, int8, int16, int32,
int64, float16, float32, float64, complex64, complex128.
mask (Tensor[bool]): The boolean mask.
value (Union[Number, Tensor]): The value to fill in with, which dtype is the same as `input_x`.
Returns:
Tensor, has the same type and shape as `input_x`.
Raises:
TypeError: If dtype of `mask` is not bool.
TypeError: If `input_x` or `mask` is not a Tensor.
ValueError: If the shapes of `input_x` and `mask` could not be broadcast.
TypeError: If dtype of `input_x` or `value` is not one of bool, uint8, int8, int16, int32,
int64, float16, float32, float64, complex64, complex128.
TypeError: If dtype of `value` is different from that of `input_x` in CPU and GPU.
TypeError: If `value` is neither float number nor Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
>>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
>>> output = ops.masked_fill(input_x, mask, 0.5)
>>> print(output)
[0.5 0.5 3. 0.5]
"""
return masked_fill_op(input_x, mask, value)
[文档]def masked_select(input, mask):
r"""
Returns a new 1-D Tensor which indexes the `input` tensor according to the boolean `mask`.
The shapes of the `mask` tensor and the `input` tensor don't need to match, but they must be broadcastable.
Args:
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Returns:
A 1-D Tensor, with the same type as `input`.
Raises:
TypeError: If `input` or `mask` is not a Tensor.
TypeError: If dtype of `mask` is not bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
>>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
>>> output = ops.masked_select(x, mask)
>>> print(output)
[1 3]
"""
return masked_select_op(input, mask)
def matmul_ext(input, other):
r"""
"""
return matmul_ext_op(input, other)
[文档]def matmul_reduce_scatter(input, x2, group, world_size, reduce_op='sum', bias=None, comm_turn=0, trans_input=False, trans_x2=False):
r"""
In the TP segmentation scenario, matmul and reducescatter are fused, and communication and computational
pipelines are parallelized within the fusion operator.
.. math::
output = reducescatter(input@x2)
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The left matrix of matmul, the dtype supports float16 and bfloat16, the shape supports 2
dimensions, and the data format supports ND.
x2 (Tensor): The right matrix of matmul, the dtype needs to be consistent with ``input`` , the shape
supports 2 dimensions, and the data format supports ND.
group (str): Communication group name, can be created by ``create_group`` method, or use the default group
``mindspore.communication.GlobalComm.WORLD_COMM_GROUP``.
world_size (int): The total number of ranks in the communication group, should be consistent with the number
of devices actually running, supporting ``2`` , ``4`` , and ``8`` .
Keyword Args:
reduce_op (str, optional) The reduce operation type. Currently only ``'sum'`` is supported. Default:
``'sum'`` .
bias (Tensor, optional): Currently only ``None`` is supported. Default: ``None`` .
comm_turn (int, optional): Indicates the granularity of communication between ranks. Currently only ``0``
is supported. Default: ``0`` .
trans_input (bool, optional): Indicates whether ``input`` is transposed. Currently only ``False`` is
supported. Default: ``False`` .
trans_x2 (bool, optional): Indicates whether ``x2`` is transposed. Default: ``False`` .
Returns:
- output (Tensor) - The result of allgather and matmul fusion calculations.
Note:
- When using this interface, please ensure that the driver firmware package and CANN package are both the
matching 8.0.RC2 version or a higher version, otherwise an error will be reported, such as BUS ERROR.
- The shape of ``input`` is (m, k), the shape of ``x2`` is (k, n), k is required to be equal, and the value
range of k is [256, 65535), and m is required to be an integer multiple of ``world_size`` . The shape of
``output`` is (m * world_size, n).
- The common fusion operators in a model only support the same communication group.
Raises:
TypeError: Any arg is of wrong type.
RuntimeError: The dtype of ``input`` or ``x2`` is neither float16 nor bfloat16.
RuntimeError: The dtypes of ``input`` and ``x2`` are different.
RuntimeError: The shape of ``input`` or ``x2`` is not two-dimensional.
RuntimeError: The k axis of ``input`` shape and ``x2`` shape are not equal.
RuntimeError: k is less than ``256`` or greater than or equal to ``65535`` .
RuntimeError: ``bias`` is not None.
RuntimeError: ``group`` does not exist.
RuntimeError: ``world_size`` is inconsistent with the actual number of running cards.
RuntimeError: ``world_size`` is not equal to ``2`` , ``4`` , ``8`` .
RuntimeError: ``reduce_op`` is not ``'sum'`` .
RuntimeError: ``trans_input`` is ``True`` .
Supported Platforms:
``Ascend``
Examples:
.. note::
Before running the following examples, you need to configure the communication environment variables.
For Ascend/GPU/CPU devices, it is recommended to use the msrun startup method without any third-party or
configuration file dependencies. Please see the `msrun start up <https://www.mindspore.cn/docs/en/master/model_train/parallel/msrun_launcher.html>`_
for more details.
This example should be run with 2 devices.
>>> import mindspore as ms
>>> import numpy as np
>>> ms.communication.init()
>>> ms.set_context(mode=ms.PYNATIVE_MODE, device_target='Ascend')
>>> rank = ms.communication.get_rank()
>>> np.random.seed(rank)
>>> input = ms.Tensor(np.random.randn(1024, 256).astype(np.float32), dtype=ms.float16)
>>> x2 = ms.Tensor(np.random.randn(256, 512).astype(np.float32), dtype=ms.float16)
>>> group = ms.communication.GlobalComm.WORLD_COMM_GROUP
>>> world_size = ms.communication.get_group_size()
>>> reduce_op = ms.ops.ReduceOp.SUM
>>> output = ms.ops.matmul_reduce_scatter(input, x2, group, world_size, reduce_op, None, 0, False, False)
>>> print(output.shape)
(512, 512)
"""
return matmul_reduce_scatter_op(input, x2, group, world_size, reduce_op, bias, comm_turn, trans_input, trans_x2)
def matrix_exp(input):
r"""
Computes the exponential of a single or a batch of square matrices.
.. math::
matrix\_exp(x) = \sum_{k=0}^{\infty} \frac{1}{k !} x^{k} \in \mathbb{K}^{n \times n}
where :math:`x` corresponds to `input` .
Args:
input (Tensor): The shape of tensor is :math:`(*, n, n)` where * is zero or more batch dimensions.
Must be one of the following types: float16, float32, float64, complex64, complex128.
Returns:
Tensor, has the same shape and dtype as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If the dtype of `input` is not one of the following dtype:
float16, float32, float64, complex64, complex128.
ValueError: If the rank of `input` is less than 2.
ValueError: If the size of last two dimensions of `input` are not equal.
Supported Platforms:
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[1, 2], [0, 1]]), mindspore.float32)
>>> output = ops.matrix_exp(input)
>>> print(output)
[[2.7182817 5.436563 ]
[0. 2.7182817]]
"""
return matrix_exp_op(input)
def matrix_inverse_ext(input):
r"""
Compute the inverse of the input matrix.
Args:
input (Tensor): A matrix to be calculated. Input `input` must be at least two dimensions, and the size of
the last two dimensions must be the same size.
Returns:
Tensor, has the same type and shape as input`.
Raises:
TypeError: If `input` is not a Tensor.
ValueError: If the size of the last two dimensions of `input` is not the same.
ValueError: If the dimension of `input` is 1.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor([[1., 2.], [3., 4.]], mstype.float32)
>>> print(ops.matrix_inverse_ext(x))
[[-2. 1. ]
[ 1.5 -0.5]]
"""
return matrix_inverse_ext_op(input)
def max_(input):
r"""
Calculates the maximum value of the input tensor.
Also see :func:`mindspore.ops.extend.max`.
"""
return max_op(input)
def max_unpool2d_ext(input, indices, kernel_size, stride=None, padding=0, output_size=None):
r"""
Computes the inverse of `max_pool2d`.
`max_unpool2d` keeps the maximal value and set all position of non-maximal values to zero. Typically the input is of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`, and the output is of shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`. The operation is as follows.
.. math::
\begin{array}{ll} \\
H_{out} = (H{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
W_{out} = (W{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
\end{array}
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input Tensor to invert. Tensor of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
indices (Tensor): Max values' index represented by the indices. Tensor of shape must be same with input 'input'. Values of indices must belong to :math:`[0, H_{in} \times W_{in} - 1]`. Data type must be in int32 or int64.
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value, an int number that represents height and width of the kernel, or a tuple of two int numbers that represent height and width respectively.
stride (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents the height and width of movement are both stride, or a tuple of two int numbers that represent height and width of movement respectively. Default: ``None`` , which indicates the moving step is `kernel_size` .
padding (Union[int, tuple[int]], optional): The pad value to be filled. Default: ``0`` . If `padding` is an integer, the paddings of height and width are the same, equal to padding. If `padding` is a tuple of two integers, the padding of height and width equal to padding[0] and padding[1] correspondingly.
output_size (tuple[int], optional): The target output size. Default: ``None`` . If output_size == (), then the shape of output computed by `kernel_size`, `stride` and `padding`. If output_size != (), then output_size must be :math:`(N, C, H, W)` , :math:`(C, H, W)` or :math:`(H, W)` and output_size must belong to :math:`[(N, C, H_{out} - stride[0], W_{out} - stride[1]), (N, C, H_{out} + stride[0], W_{out} + stride[1])]`.
Returns:
Tensor, with shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, with the same data type with `input`.
Raises:
TypeError: If data type of `input` or `indices` is not supported.
TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
ValueError: If numbers in `stride`, `padding` or `kernel_size` are not positive.
ValueError: If the shape of `input` and `indices` are different.
ValueError: If the length of `input` is not 3 or 4.
ValueError: If the type of `output_size` is not tuple.
ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[[[0, 1], [8, 9]]]]).astype(np.float32))
>>> indices = Tensor(np.array([[[[0, 1], [2, 3]]]]).astype(np.int64))
>>> output = ops.max_unpool2d_ext(input, indices, 1, stride=1, padding=0)
>>> print(output.asnumpy())
[[[[0. 1.]
[8. 9.]]]]
"""
return max_unpool2d_ext_op(input, indices, kernel_size, stride, padding, output_size)
[文档]def maximum(input, other):
r"""
Computes the maximum of input tensors element-wise.
.. math::
output_i = \max(input_i, other_i)
Note:
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
consistent.
- When the inputs are two tensors,
dtypes of them cannot be bool at the same time, and the shapes of them could be broadcast.
- When the inputs are one tensor and one scalar,
the scalar could only be a constant.
- Broadcasting is supported.
- If one of the elements being compared is a NaN, then that element is returned.
.. warning::
If all inputs are scalar of integers. In GRAPH mode, the output will be Tensor of int32, while in
PYNATIVE mode, the output will be Tensor of int64.
Args:
input (Union[Tensor, Number, bool]): The first input is a number or
a bool or a tensor whose data type is number or bool.
other (Union[Tensor, Number, bool]): The second input is a number or
a bool when the first input is a tensor or a tensor whose data type is number or bool.
Returns:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Raises:
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
ValueError: If `input` and `other` are not the same shape.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> # case 1 : same data type
>>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
>>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
>>> output = ops.maximum(input, other)
>>> print(output)
[4. 5. 6.]
>>> # case 2 : different data type
>>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
>>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
>>> output = ops.maximum(input, other)
>>> print(output.dtype)
Float32
"""
return maximum_op(input, other)
def mean_ext(input, axis=None, keep_dims=False, dtype=None):
r"""
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
And reduce a dimension of `input` along the specified `axis`. `keep_dims`
determines whether the dimensions of the output and input are the same.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
axis (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
and the value range is [-r,r).
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
Returns:
Tensor, has the same data type as input tensor.
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the product of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
Raises:
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
TypeError: If `keep_dims` is not a bool.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.mean(x, 1, keep_dims=True)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by averaging all elements in the dimension.
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
... mindspore.float32)
>>> output = ops.mean(x)
>>> print(output)
5.0
>>> print(output.shape)
()
>>> # case 2: Reduces a dimension along the axis 0
>>> output = ops.mean(x, 0, True)
>>> print(output)
[[[4. 4. 4. 4. 4. 4.]
[5. 5. 5. 5. 5. 5.]
[6. 6. 6. 6. 6. 6.]]]
>>> # case 3: Reduces a dimension along the axis 1
>>> output = ops.mean(x, 1, True)
>>> print(output)
[[[2. 2. 2. 2. 2. 2.]]
[[5. 5. 5. 5. 5. 5.]]
[[8. 8. 8. 8. 8. 8.]]]
>>> # case 4: Reduces a dimension along the axis 2
>>> output = ops.mean(x, 2, True)
>>> print(output)
[[[ 2.]
[ 2.]
[ 2.]]
[[ 4.]
[ 5.]
[ 6.]]
[[ 6.]
[ 8.]
[10.]]]
"""
return mean_ext_op(input, axis, keep_dims, dtype)
def min_(input):
r"""
Calculates the minimum value of the input tensor.
Also see :func:`mindspore.ops.extend.min`.
"""
return min_op(input)
[文档]def minimum(input, other):
r"""
Computes the minimum of input tensors element-wise.
Note:
- Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types
consistent.
- When the inputs are two tensors, dtypes of them cannot be bool at the same time.
- When the inputs are one tensor and one scalar, the scalar could only be a constant.
- Shapes of them are supposed to be broadcast.
- If one of the elements being compared is a NaN, then that element is returned.
.. math::
output_i = \min(input_i, other_i)
Args:
input (Union[Tensor, Number, bool]): The first input is a number or
a bool or a tensor whose data type is number or bool.
other (Union[Tensor, Number, bool]): The second input is a number or
a bool when the first input is a tensor or a tensor whose data type is number or bool.
Returns:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Raises:
TypeError: If `input` and `other` is not one of the following: Tensor, Number, bool.
ValueError: If `input` and `other` are not the same shape after broadcast.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> # case 1 : same data type
>>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
>>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
>>> output = ops.minimum(input, other)
>>> print(output)
[1. 2. 3.]
>>> # case 2 : different data type
>>> input = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.int32)
>>> other = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
>>> output = ops.minimum(input, other)
>>> print(output.dtype)
Float32
"""
return minimum_op(input, other)
[文档]def mish_ext(input):
r"""
Computes MISH (A Self Regularized Non-Monotonic Neural Activation Function)
of input tensors element-wise.
The formula is defined as follows:
.. math::
\text{mish}(input) = input * \tanh(softplus(\text{input}))
See more details in `A Self Regularized Non-Monotonic Neural Activation Function
<https://arxiv.org/abs/1908.08681>`_.
Mish Activation Function Graph:
.. image:: ../images/Mish.png
:align: center
Args:
input (Tensor): The input of MISH. Supported dtypes:
- Ascend: float16, float32.
Returns:
Tensor, has the same type and shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not float16 or float32.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> import numpy as np
>>> x = Tensor(np.array([[-1.1, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> output = ops.mish(x)
>>> print(output)
[[-3.0764845e-01 3.9974124e+00 -2.6832507e-03]
[ 1.9439589e+00 -3.3576239e-02 8.9999990e+00]]
"""
return mish_ext_op(input)
def mm_ext(input, mat2):
r"""
Returns the matrix product of two arrays.
If `input` is a :math:`(n \times m)` Tensor, `mat2` is a
:math:`(m \times p)` Tensor, `out` will be a :math:`(n \times p)` Tensor.
Note:
This function cannot support broadcasting.
Refer to :func:`mindspore.ops.matmul` instead if you need a broadcastable function.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The first matrix of matrix multiplication.
The last dimension of `input` must be the same size as the first dimension of `mat2`.
mat2 (Tensor): The second matrix of matrix multiplication.
The last dimension of `input` must be the same size as the first dimension of `mat2`.
Returns:
Tensor, the matrix product of the inputs.
Raises:
ValueError: If the last dimension of `input` is not the same size as the
second-to-last dimension of `mat2`.
TypeError: If `input` or `mat2` is not a Tensor.
TypeError: If dtype of `input` or `mat2` is not float16, float32 or bfloat16.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> from mindspore import ops
>>> import numpy as np
>>> x1 = ms.Tensor(np.random.rand(2, 3), ms.float32)
>>> x2 = ms.Tensor(np.random.rand(3, 4), ms.float32)
>>> out = ops.mm_ext(x1, x2)
>>> print(out.shape)
(2, 4)
"""
return mm_ext_op(input, mat2)
[文档]def mse_loss_ext(input, target, reduction='mean'):
r"""
Calculates the mean squared error between the predicted value and the label value.
For detailed information, please refer to :class:`mindspore.nn.MSELoss`.
Args:
input (Tensor): Tensor of any dimension. The data type needs to be consistent with the `target`.
It should also be broadcastable with the `target`.
target (Tensor): The input label. Tensor of any dimension. The data type needs to be consistent with the `input`.
It should also be broadcastable with the `input`.
reduction (str, optional): Apply specific reduction method to the output: ``'mean'`` , ``'none'`` ,
``'sum'`` . Default: ``'mean'`` .
- ``'none'``: no reduction will be applied.
- ``'mean'``: compute and return the mean of elements in the output.
- ``'sum'``: the output elements will be summed.
Returns:
- Tensor. If `reduction` is ``'mean'`` or ``'sum'``, the shape of output is `Tensor Scalar`.
- If reduction is ``'none'``, the shape of output is the broadcasted shape of **input** and **target** .
Raises:
ValueError: If `reduction` is not one of ``'mean'`` , ``'sum'`` or ``'none'``.
ValueError: If `input` and `target` are not broadcastable.
TypeError: If `input` and `target` are in different data type.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> labels = Tensor(np.array([[1, 1, 1], [1, 2, 2]]), mindspore.float32)
>>> output = ops.mse_loss_ext(logits, labels, reduction='none')
>>> print(output)
[[0. 1. 4.]
[0. 0. 1.]]
"""
return mse_loss_ext_op(input, target, reduction)
[文档]def mul(input, other):
r"""
Multiplies two tensors element-wise.
.. math::
out_{i} = input_{i} * other_{i}
Note:
- When the two inputs have different shapes,
they must be able to broadcast to a common shape.
- The two inputs can not be bool type at the same time,
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
- The two inputs comply with the implicit type conversion rules to make the data types
consistent.
Args:
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
other (Union[Tensor, number.Number, bool]): The second input, which is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
Returns:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Raises:
TypeError: If `input` and `other` is not one of the following: Tensor, number.Number, bool.
ValueError: If `input` and `other` are not the same shape.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
>>> output = ops.mul(x, y)
>>> print(output)
[ 4. 10. 18.]
"""
return mul_op(input, other)
def muls(input, other):
r"""
"""
return muls_op(input, other)
[文档]def mv(input, vec):
r"""
Multiply matrix `input` and vector `vec`.
If `input` is a tensor with shape :math:`(N, M)` and `vec` is a tensor with shape :math:`(M,)`,
The output is a 1-D tensor which shape is :math:`(N,)`.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input matrix which shape is :math:`(N,M)` and the rank must be 2-D.
vec (Tensor): The input vector which shape is :math:`(M,)` and the rank is 1-D.
Returns:
Tensor, the shape is :math:`(N,)`.
Raises:
TypeError: If `input` or `vec` is not a tensor.
TypeError: If the dtype of `input` or `vec` is not float16 or float32.
TypeError: If the dtypes of `input` and `vec` are different.
ValueError: If the `input` is not a 2-D tensor or the `vec` is not a 1-D tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, mint
>>> input = Tensor(np.array([[3., 4.], [1., 6.], [1., 3.]]).astype(np.float32))
>>> vec = Tensor(np.array([1., 2.]).astype(np.float32))
>>> output = mint.mv(input, vec)
>>> print(output)
[11. 13. 7.]
"""
return mv_op(input, vec)
[文档]def nan_to_num(input, nan=None, posinf=None, neginf=None):
r"""
Replace the `NaN`, positive infinity and negative infinity values in `input` with the
specified values in `nan`, `posinf` and `neginf` respectively.
.. warning::
For Ascend, it is only supported on Atlas A2 Training Series Products.
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The shape of tensor is :math:`(input_1, input_2, ..., input_R)`.
nan (number, optional): The replace value of `NaN`. Default value is ``None``.
posinf (number, optional): the value to replace positive infinity values with. Default: ``None``,
replacing positive infinity with the maximum value supported by the data type of `input`.
neginf (number, optional): the value to replace negative infinity values with. Default: ``None``,
replacing negative infinity with the minimum value supported by the data type of `input`.
Returns:
Tensor, has the same shape and dtype as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([float('nan'), float('inf'), -float('inf'), 5.0]), mindspore.float32)
>>> output = ops.nan_to_num(input, 1.0, 2.0, 3.0)
>>> print(output)
[1. 2. 3. 5.0]
"""
return nan_to_num_impl(input, nan, posinf, neginf)
def nansum(input, dim=None, keepdim=False, dtype=None):
r"""
Computes sum of `input` over a given dimension, treating NaNs as zero.
.. warning::
It is only supported on Atlas A2 Training Series Products.
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input Tensor.
dim (Union[int, tuple(int)], optional): The dimensions to sum.
Dim must be in the range [-rank(input), rank(input)). Default: ``None``, which indicates the sum of all elements in a tensor.
keepdim (bool, optional): Whether the output Tensor keeps dimensions or not. Default: ``False``.
Keyword Args:
dtype (:class:`mindspore.dtype`, optional): The dtype of output Tensor. Default: ``None``.
Returns:
Tensor, the sum of input `input` in the given dimension dim, treating NaNs as zero.
- If dim is None, keepdim is False,
the output is a 0-D Tensor representing the sum of all elements in the input Tensor.
- If dim is int, set as 2, and keepdim is False,
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
- If dim is tuple(int) or list(int), set as (2, 3), and keepdim is False,
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
Raises:
TypeError: If `input` is not Tensor.
TypeError: If `keepdim` is not a bool.
TypeError: If the dtype of `input` or `dtype` is complex type.
ValueError: If `dim` not in [-rank(`input`), rank(`input`)).
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[float("nan"), 2, 3], [1, 2, float("nan")]]), mindspore.float32)
>>> output1 = ops.nansum(x, dim=0, keepdim=False, dtype=mindspore.float32)
>>> output2 = ops.nansum(x, dim=0, keepdim=True, dtype=mindspore.float32)
>>> print(output1)
[1. 4. 3.]
>>> print(output2)
[[1. 4. 3.]]
"""
return nansum_op(input, dim, keepdim, dtype)
[文档]def narrow(input, dim, start, length):
r"""
Obtains a tensor of a specified length at a specified start position along a specified axis.
Args:
input (Tensor): the tensor to narrow.
dim (int): the axis along which to narrow.
start (int): the starting dimension.
length (int): the distance to the ending dimension.
Returns:
output (Tensors) - The narrowed tensor.
Raises:
ValueError: the rank of `input` is 0.
ValueError: the value of `dim` is out the range [-input.ndim, input.ndim).
ValueError: the value of `start` is out the range [-input.shape[dim], input.shape[dim]].
ValueError: the value of `length` is out the range [0, input.shape[dim]-start].
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import mint
>>> from mindspore import Tensor
>>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
>>> output = mint.narrow(x, 0, 0, 2)
>>> print(output)
[[ 1 2 3]
[ 4 5 6]]
>>> output = mint.narrow(x, 1, 1, 2)
>>> print(output)
[[ 2 3]
[ 5 6]
[ 8 9]]
"""
return narrow_op(input, dim, start, length)
[文档]def neg(input):
r"""
Returns a tensor with negative values of the input tensor element-wise.
.. math::
out_{i} = - input_{i}
Args:
input (Tensor): The input tensor with a dtype of Number.
Returns:
Tensor, has the same shape and dtype as input.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
>>> output = ops.neg(input)
>>> print(output)
[-1. -2. 1. -2. 0. 3.5]
"""
return neg_op(input)
[文档]def nextafter(input, other):
r"""
Returns the next representable floating-point value after `input` towards `other` element-wise.
Say there are two float32 numbers :math:`a`, :math:`b`, and let the
representable delta of float32 datatype is :math:`eps`. If :math:`a < b`,
then the next representable of :math:`a` towards :math:`b` is :math:`a+eps`,
the next representable of :math:`b` towards :math:`a` is :math:`b-eps`.
For more detailed information, refer to `A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_.
Args:
input (Tensor): The first input tensor. The shape of tensor is :math:`(N,*)` where :math:`*` means,
any number of additional dimensions. Must be one of the following types: float32, float64.
other (Tensor): The second input tensor. The shape of tensor is :math:`(N,*)` where :math:`*` means,
any number of additional dimensions. Must be one of the following types: float32, float64.
Returns:
Tensor, has the same shape and data type as `input`.
Raises:
TypeError: If neither `input` nor `other` is a Tensor.
TypeError: If the dtype of `input` and `other` is not one of: float32, float64.
TypeError: If the dtypes of `input` and `other` are not same.
ValueError: If `input`'s shape is not the same as `other`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_ = Tensor(np.asarray([0.0]), mindspore.float32)
>>> other_ = Tensor(np.asarray([0.1]), mindspore.float32)
>>> output_ = ops.nextafter(input_, other_)
>>> print(output_)
[1.e-45]
"""
return next_after_op(input, other)
[文档]def not_equal(input, other):
r"""
Alias for :func:`mindspore.ops.ne` .
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
return not_equal_op(input, other)
ones_op=Ones()
[文档]def ones(shape, dtype=None):
r"""
Creates a tensor filled with value ones, whose shape and type are described by the first argument `size` and second argument `dtype` respectively.
.. warning::
For argument `shape`, Tensor type input will be deprecated in the future version.
Args:
shape (Union[tuple[int], list[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
tuple or Tensor containing positive integers are allowed. If it is a Tensor,
it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
dtype (:class:`mindspore.dtype`): The specified type of output tensor. If `dtype` is ``None`` ,
`mindspore.float32` will be used. Default: ``None`` .
Returns:
Tensor, whose dtype and size are defined by input.
Raises:
TypeError: If `shape` is neither an int nor an tuple/list/Tensor of int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import ops
>>> output = ops.ones((2, 2), mindspore.float32)
>>> print(output)
[[1. 1.]
[1. 1.]]
"""
return ones_op(shape, dtype)
def outer_ext(input, vec2):
r"""
Return outer product of `input` and `vec2`. If `input` is a vector of size :math:`n`
and `vec2` is a vector of size :math:`m` , then output must be a matrix of shape :math:`(n, m)` .
.. warning::
This is an experimental API that is subject to change or deletion.
.. note::
This function does not broadcast.
Args:
input (Tensor): 1-D input vector.
vec2 (Tensor): 1-D input vector.
Returns:
out, 2-D matrix, the outer product of two vectors.
Raises:
TypeError: If `input` or `vec2` is not a Tensor.
TypeError: The implicitly converted data types of `input` and `vec2` are not one of float16, float32, float64, bool, uint8, int8, int16, int32, int64, complex64, complex128, bfloat16
ValueError: If the dimension of `input` or `vec2` is not equal to 1.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> input = Tensor(np.array([7, 8, 9]), mindspore.int32)
>>> vec2 = Tensor(np.array([7, 10, 11]), mindspore.int32)
>>> out = ops.outer(input, vec2)
>>> print(out)
[[49 70 77]
[56 80 88]
[63 90 99]]
"""
return outer_op(input, vec2)
[文档]def pow(input, exponent):
r"""
Calculates the `exponent` power of each element in `input`.
When `exponent` is a Tensor, the shapes of `input` and `exponent` must be broadcastable.
.. math::
out_{i} = input_{i} ^{ exponent_{i}}
Args:
input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
exponent (Union[Tensor, Number]): The second input is a Number or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
Returns:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> exponent = 3.0
>>> output = ops.pow(input, exponent)
>>> print(output)
[ 1. 8. 64.]
>>>
>>> input = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> exponent = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
>>> output = ops.pow(input, exponent)
>>> print(output)
[ 1. 16. 64.]
"""
return pow_op(input, exponent)
[文档]def prelu(input, weight):
r"""
Parametric Rectified Linear Unit activation function.
PReLU is described in the paper `Delving Deep into Rectifiers: Surpassing Human-Level Performance on
ImageNet Classification <https://arxiv.org/abs/1502.01852>`_. Defined as follows:
.. math::
prelu(x_i)= \max(0, x_i) + \min(0, w * x_i),
where :math:`x_i` is an element of a channel of the input, `w` is the weight of the channel.
PReLU Activation Function Graph:
.. image:: ../images/PReLU2.png
:align: center
.. note::
Channel dim is the 2nd dim of input. When input has dims < 2, then there is
no channel dim and the number of channels = 1.
Args:
input (Tensor): The input Tensor of the activation function.
weight (Tensor): Weight Tensor. The size of the weight should be 1 or the number of channels at Tensor `input`.
Returns:
Tensor, with the same shape and dtype as `input`.
For detailed information, please refer to :class:`mindspore.mint.nn.PReLU`.
Raises:
TypeError: If the `input` or the `weight` is not a Tensor.
ValueError: If the `weight` is not a 0-D or 1-D Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.arange(-6, 6).reshape((2, 3, 2)), mindspore.float32)
>>> weight = Tensor(np.array([0.1, 0.6, -0.3]), mindspore.float32)
>>> output = ops.prelu(x, weight)
>>> print(output)
[[[-0.60 -0.50]
[-2.40 -1.80]
[ 0.60 0.30]]
[[ 0.00 1.00]
[ 2.00 3.00]
[ 4.0 5.00]]]
"""
return prelu_op(input, weight)
def prod_ext(input, axis=None, keep_dims=False, dtype=None):
r"""
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Args:
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
axis (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
Returns:
Tensor, has the same data type as input tensor.
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the product of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `axis` is not one of the following: int or None.
TypeError: If `keep_dims` is not a bool.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.ProdExt()(x, 1, keep_dims=True)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = ops.ProdExt()(x)
>>> print(output)
2.2833798e+33
>>> print(output.shape)
()
>>> # case 2: Reduces a dimension along axis 0.
>>> output = ops.ProdExt()(x, 0, True)
>>> print(output)
[[[ 28. 28. 28. 28. 28. 28.]
[ 80. 80. 80. 80. 80. 80.]
[162. 162. 162. 162. 162. 162.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = ops.ProdExt()(x, 1, True)
>>> print(output)
[[[ 6. 6. 6. 6. 6. 6.]]
[[120. 120. 120. 120. 120. 120.]]
[[504. 504. 504. 504. 504. 504.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = ops.ProdExt()(x, 2, True)
>>> print(output)
[[[1.00000e+00]
[6.40000e+01]
[7.29000e+02]]
[[4.09600e+03]
[1.56250e+04]
[4.66560e+04]]
[[1.17649e+05]
[2.62144e+05]
[5.31441e+05]]]
"""
return prod_ext_op(input, axis, keep_dims, dtype)
def prompt_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len, align_mode='LEFT'):
r"""
The PromptKVCache is used for prefill the KVCache of transformer network.
Args:
cache (Tensor): The cahe tensor with data type of int8, uint8, int16, uint16, float16, float32 and int32.
When format is BHSD, cache tensor of shape
:math:`(cache\_batch\_size, num\_head, max\_seq\_length, size\_pre\_head)`.
When format is BSD, cache tensor of shape
:math:`(cache\_batch\_size, max\_seq\_length, hidden\_size)`.
update (Tensor]): The tensor which is used to update the cache tensor. Same data type as cache tensor.
When format is BHSD, cache tensor of shape
:math:`(update\_batch\_size, num\_head, max\_seq\_length, size\_pre\_head)`.
When format is BSD, cache tensor of shape
:math:`(update\_batch\_size, max\_seq\_length, hidden\_size)`.
valid_seq_len (Tensor): The valid_seq_len tensor with data type of int64.
Valid_seq_len tensor of shape :math:`(update\_batch\_size)`.
batch_index (Tensor): The batch_index tensor with data type of int64.
Batch_index tensor of shape :math:`(update\_batch\_size)`. Indicate that which batch of cache tensor is going to be update.
seq_len_axis (Tensor): The seq_len_axis indicate which axis is seq_eln, set to '1' or '2'. Not able for now.
new_max_seq_len (Tensor): The new_max_seq_len tensor with data type of int64.
New_max_seq_len tensor of shape :math:`(1)`.
Indicate that user want to change the shape of cache tensor from
:math:`(batch\_size, num_head, max\_seq\_length, hidden\_size)` to
:math:`(batch\_size * max\_seq\_length / new\_max\_seq\_length, num_head, new\_max\_seq\_length, hidden\_size)`
to update the cache tensor. This will not real change the shape of `cache` tensor. Not able for now.
cur_max_seq_len (Tensor): The new_max_seq_len tensor with data type of int64.
Cur_max_seq_len tensor of shape :math:`(1)`. Keep the current seq_len of cache tensor. Not abel for now.
align_mode (str): indicate which axis is seq_len. Default: left.
Outputs:
With same data type and same shape as `cache` tensor.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _inner_ops
>>> b = 4
>>> h = 40
>>> max_s = 1024
>>> s = 256
>>> d = 128
>>> cache = Tensor(np.random.randn(b, h, max_s, d).astype(np.float16))
>>> update = Tensor(np.random.randn(b, h, s, d).astype(np.float16))
>>> valid_seq_len = Tensor(np.random.randint(-1, s, size=ub).astype(np.int64))
>>> batch_index = Tensor(np.random.choice(np.arange(-1, b), size=ub, replace=False).astype(np.int64))
>>> new_max_seq_len = Tensor(np.random.randn(1).astype(np.int64))
>>> cur_max_seq_len = Tensor(np.random.randn(1).astype(np.int64))
>>> prompt_kv_cache = _inner_ops.PromptKVCache(0)
>>> output = prompt_kv_cache(cache, update, valid_seq_len, batch_index, Tensor(2), new_max_seq_len, cur_max_seq_len)
>>> print(cache)
"""
prompt_k_v_cache_op = _get_cache_prim(PromptKVCache)(align_mode)
return prompt_k_v_cache_op(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
[文档]def randperm(n, seed=0, offset=0, dtype=mstype.int64):
r"""
Generates random permutation of integers from 0 to n-1.
Returns the tensor with the determined shape inferred by n, the random numbers in it drawn from the data range
that a given type can represent.
.. warning::
- This is an experimental API that is subject to change or deletion.
- The Ascend backend does not support the reproducibility of random numbers, so
the `seed` parameter has no effect.
Args:
n (Union[Tensor, int]): The input n Tensor with shape: () or (1,) and with data type of int64.
The value of `n` must be greater than zero.
seed (int, optional): Random seed. Default: ``0`` . When seed is -1(only negative value), offset is 0,
it's determined by time.
offset (int, optional): Offset to generate random numbers. Priority is higher than random seed.
Default: ``0`` . It must be non-negative.
dtype (mindspore.dtype, optional): The type of output.
Its value must be one of the following types: int32, int16, int8,
uint8, int64, float64, float32, float16. Default: mstype.int64.
Returns:
Tensor. Its shape is specified by the required args `n`. Its type is specified by `dtype`.
Otherwise is default.
Raises:
TypeError: If `dtype` is not allowed.
ValueError: If `n` is a negative or 0 element.
ValueError: If `seed` is a negative element.
ValueError: If `n` is larger than the maximal data of the set dtype.
Supported Platforms:
``CPU``
Examples:
>>> from mindspore import ops
>>> from mindspore import dtype as mstype
>>> n = 4
>>> seed = 0
>>> offset = 0
>>> output = ops.randperm(n, seed, offset, dtype=mstype.int64)
>>> print(output)
[0 2 1 3]
"""
randperm_v2_op = _get_cache_prim(RandpermV2)(seed, offset, dtype)
return randperm_v2_op(n)
[文档]def range(start, end, step, maxlen=1000000):
r"""
Creates a sequence of numbers that begins at `start` and extends by increments of
`step` up to but not including `end`.
The types of all 3 inputs must be all integers or floating-point numbers.
Args:
start (number): The first number in the sequence. Must have
type: int32 ,int64, float32 or float64.
end (number): Upper end of the sequence, exclusive. Must
have type: int32 ,int64, float32 or float64.
step (number): Number that increments `start`. Must have
type: int32 ,int64, float32 or float64.
maxlen (int, optional): Memory that can fit `maxlen` many elements
will be allocated for the output. Optional, must be positive. Default: 1000000.
If the output has more than `maxlen` elements, a runtime error will occur.
Returns:
A 1-D Tensor. If `start`, `end` and `step` are all integers, the type of output is int64.
If `start`, `end` and `step` are all floating-point numbers, the type of output is float32.
Raises:
TypeError: If `start`, `end`, `step` have both integers and floating-point numbers.
TypeError: If datatype of `start`, `end` or `step` is not supported.
ValueError: If `step` = 0.
ValueError: If `start` >= `end` when `step` > 0.
ValueError: If `start` <= `end` when `step` < 0.
Supported Platforms:
``GPU`` ``CPU``
Examples:
>>> from mindspore import ops
>>> start = 0
>>> end = 10
>>> step = 4
>>> output = ops.range(start, end, step)
>>> print(output)
[0 4 8]
"""
range_op = _get_cache_prim(Range)(maxlen)
return range_op(start, end, step)
[文档]def real(input):
r"""
Returns a Tensor that is the real part of the input. If input is real, it is returned unchanged.
Args:
input (Tensor): The input tensor to compute to.
Returns:
Tensor, the shape is the same as the input.
Raises:
TypeError: If input is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore as ms
>>> import mindspore.ops as ops
>>> import numpy as np
>>> input = ms.Tensor(np.asarray(np.complex(1.3+0.4j)), ms.complex64)
>>> output = ops.real(input)
>>> print(output)
1.3
"""
return real_op(input)
[文档]def all(input, axis=None, keep_dims=False):
r"""
Reduces a dimension of `input` by the "logical AND" of all elements in the dimension, by default. And also can
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
input (Tensor): Input Tensor, has the shape :math:`(N, *)` where :math:`*` means,
any number of additional dimensions.
axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
Suppose the rank of `input` is r, `axis` must be in the range [-rank(input), rank(input)).
Default: ``None`` , all dimensions are reduced.
keep_dims (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default : ``False`` .
Returns:
Tensor, the dtype is bool.
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
the output is a 0-D Tensor representing the "logical AND" of all elements in the input Tensor.
- If `axis` is int, such as 2, and `keep_dims` is ``False`` ,
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
- If `axis` is tuple(int), such as (2, 3), and `keep_dims` is ``False`` ,
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
- If `axis` is 1-D Tensor, such as [2, 3], and `keep_dims` is ``False`` ,
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `input` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[True, False], [True, True]]))
>>> # case 1: Reduces a dimension by the "logicalAND" of all elements in the dimension.
>>> output = ops.all(x, keep_dims=True)
>>> print(output)
[[False]]
>>> print(output.shape)
(1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = ops.all(x, axis=0)
>>> print(output)
[ True False]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = ops.all(x, axis=1)
>>> print(output)
[False True]
"""
return reduce_all_impl(input, axis, keep_dims)
[文档]def relu6(x):
r"""
Computes ReLU (Rectified Linear Unit) upper bounded by 6 of input tensors element-wise.
.. math::
\text{ReLU6}(x) = \min(\max(0,x), 6)
It returns :math:`\min(\max(0,x), 6)` element-wise.
ReLU6 Activation Function Graph:
.. image:: ../images/ReLU6.png
:align: center
Args:
x (Tensor): Tensor of shape :math:`(N, *)`, where :math:`*` means any number of additional dimensions.
Data type must be float16, float32.
Returns:
Tensor, with the same dtype and shape as the `x`.
Raises:
TypeError: If dtype of `x` is neither float16 nor float32.
TypeError: If `x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> result = ops.relu6(x)
>>> print(result)
[[0. 4. 0.]
[2. 0. 6.]]
"""
return relu6_op(x)
def relu(input):
r"""
Computes ReLU (Rectified Linear Unit activation function) of input tensors element-wise.
It returns :math:`\max(input,\ 0)` element-wise. Specially, the neurons with the negative output
will be suppressed and the active neurons will stay the same.
.. math::
ReLU(input) = (input)^+ = \max(0, input)
ReLU Activation Function Graph:
.. image:: ../images/ReLU.png
:align: center
Args:
input (Tensor): The input Tensor.
inplace (bool, optional): Whether to use inplace mode, Defaults to ``False``.
Returns:
Tensor, with the same dtype and shape as the `input`.
Raises:
TypeError: If dtype of `input` is not Number type.
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> output = ops.relu(input)
>>> print(output)
[[0. 4. 0.]
[2. 0. 9.]]
"""
return relu_op(input)
def reshape_and_cache(key, value, key_cache, value_cache, slot_mapping):
r"""
The ReshapeAndCache is used for updating the block-wise KVCache of transformer network.
Args:
key (Tensor): The key tensor with data type of float16.
:math:`(num\_tokens, num\_head, head\_dim)`.
value (Tensor): The value tensor with data type of float16.
:math:`(num\_tokens, num\_head, head\_dim)`.
key_cache (Tensor): The cache tensor with data type of float16.
:math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
value_cache (Tensor): The cache tensor with data type of float16.
:math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
slot_mapping (Tensor): The slot mapping tensor with data type of int32.
:math:`(num\_tokens,)`.
Outputs:
With same data type and same shape as `key` tensor.
Notes:
No backend implementation in MindSpore, only use to export MindIr and run in MindSpore Lite.
Examples:
>>> from mindspore.ops.operations import _inner_ops
>>> num_tokens = = 4
>>> num_head = 40
>>> head_dim = 128
>>> block_size = 16
>>> num_blocks = 128
>>> key = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16))
>>> value = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16))
>>> key_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
>>> value_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
>>> slot_mapping = Tensor(np.random.shuffle(np.arange(num_tokens, dtype=np.int32)))
>>> reshape_and_cache = _inner_ops.ReshapAndCache()
>>> output = reshape_and_cache(key, value, key_cache, value_cache, slot_mapping)
>>> print(key_cache)
"""
return reshape_and_cache_op(key, value, key_cache, value_cache, slot_mapping)
[文档]def reshape(input, shape):
r"""
Rearranges the input Tensor based on the given shape.
The `shape` can only have one -1 at most, in which case it's inferred from the remaining dimensions and
the number of elements in the input.
Args:
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
shape (Union[tuple[int], list[int], Tensor[int]]): If `shape` is a tuple or list, its elements should be
integers, and only constant value is allowed. i.e., :math:`(y_1, y_2, ..., y_S)`. If `shape` is a Tensor,
data type should be int32 or int64, and only one-dimensional tensor is supported.
Returns:
Tensor, If the given `shape` does not contain -1, the `shape` of tensor is :math:`(y_1, y_2, ..., y_S)`.
If the k-th position in the given `shape` is -1, the `shape` of tensor is :math:`(y_1, ..., y_{k-1},
\frac{\prod_{i=1}^{R}x_{i}}{y_1\times ...\times y_{k-1}\times y_{k+1}\times...\times y_S} , y_{k+1}, ..., y_S)`
Raises:
ValueError: The given `shape` contains more than one -1.
ValueError: The given `shape` contains elements less than -1.
ValueError: For scenarios where the given `shape` does not contain -1, the product of elements of the given
`shape` is not equal to the product of the input's `shape`,
:math:`\prod_{i=1}^{R}x_{i} \ne \prod_{i=1}^{S}y_{i}`, (Namely, it does not match the input's array size).
And for scenarios where the given `shape` contains -1, the product of elements other than -1 of the given
`shape` is an aliquant part of the product of the input's `shape` :math:`\prod_{i=1}^{R}x_{i}`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> output = ops.reshape(input, (3, 2))
>>> print(output)
[[-0.1 0.3]
[ 3.6 0.4]
[ 0.5 -3.2]]
"""
return reshape_op(input, shape)
def flip(input, axis):
r"""
Reverses specific dimensions of a tensor.
.. warning::
The value range of "axis" is [-dims, dims - 1]. "dims" is the dimension length of "input".
Args:
input (Tensor): The target tensor.
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
axis (Union[tuple(int), list(int)]): The indices of the dimensions to reverse.
Outputs:
Tensor, has the same shape and type as `input`.
Raises:
TypeError: If `axis` is neither list nor tuple.
TypeError: If element of `axis` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
>>> output = ops.flip(input_x, axis=[1])
>>> print(output)
[[4 3 2 1]
[8 7 6 5]]
>>> input_x = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]), mindspore.int32)
>>> output = ops.flip(input_x, axis=[1, 0])
>>> print(output)
[[8 7 6 5]
[4 3 2 1]]
"""
return reverse_v2_impl(input, axis)
def rfft2(input, s=None, dim=(-2, -1), norm=None):
r"""
Calculates the two dimensional discrete Fourier transform for real input `input`.
Note:
- `rfft2` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `rfft2` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `rfft2`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `rfft2`.
Default: ``(-2, -1)`` , which means transform the last two dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1/n`).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `rfft2()` function, result.shape[dim[i]] is s[i], and for the last transformed dim,
result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`.
When the input is int16, int32, int64, float16, float32, the return value type is complex64.
When the input is float64, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((2, 2))
>>> ops.rfft2(input, s=(2, 2), dim=(0, 1), norm="backward")
Tensor(shape=[2, 2], dtype=Complex64, value=
[[4+0j, 0+0j],
[0+0j, 0+0j]])
"""
return rfft2_op(input, s, dim, norm)
def rfft(input, n=None, dim=-1, norm=None):
r"""
Calculates the one dimensional discrete Fourier transform for real input `input`.
Note:
- `rfft` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `rfft` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
n (int, optional): Number of points along `dim` in the input to use.
If given, the input will either be zero-padded or trimmed to this length before computing `rfft`.
Default: ``None``.
dim (int, optional): The dimension along which to take the one dimensional `rfft`.
Default: ``-1``, which means transform the last dimension of `input`.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"``.
Three modes are defined as,
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1/n`).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, the result of `rfft()` function, dtype of the result is complex64/128, result.shape[dim]
is :math:`n // 2 + 1`.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int16, int32, int64, float32, float64.
TypeError: If `n` or `dim` type is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `n` is less than 1.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"``.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([1, 2, 3, 4])
>>> y = ops.rfft(input, n=4, dim=-1, norm='backward')
>>> print(y)
[10.+0.j -2.+2.j -2.+0.j]
"""
return rfft_op(input, n, dim, norm)
def rfftfreq(n, d=1.0, dtype=None):
r"""
Computes the sample frequencies for `rfft` with a signal of size `n`.
For instance, Given a length `n` and a sample spacing `d` , the returned result `f` is:
.. math::
f = [0, 1, ..., n // 2] / (d * n)
Note:
- `rfftfreq` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `rfftfreq` is not supported on Windows platform yet.
Args:
n (int): Window length.
d (float, optional): Sample spacing (inverse of the sampling rate). Default: ``1.0`` .
dtype (mindspore.dtype, optional): The dtype of the returned frequencies. Default: ``None`` represents float32.
Returns:
Tensor, Array of length ``n`` containing the sample frequencies.
Raises:
ValueError: If `n` is less than 1.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import ops
>>> out = ops.rfftfreq(n=4, d=1.0)
>>> print(out)
[0. 0.25 0.5 ]
"""
return rfftfreq_op(n, d, dtype)
def rfftn(input, s=None, dim=None, norm=None):
r"""
Computes the N dimensional discrete Fourier transform for real input `input`.
Note:
- `rfftn` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `rfftn` is not supported on Windows platform yet.
Args:
input (Tensor): The input tensor.
Supported dtypes:
- Ascend/CPU: int16, int32, int64, float16, float32, float64.
s (tuple[int], optional): Length of the transformed `dim` of the result.
If given, the size of the `dim[i]` axis will be zero-padded or truncated to `s[i]` before calculating `rfftn`.
Default: ``None`` , which does not need to process `input`.
dim (tuple[int], optional): The dimension along which to take the one dimensional `rfftn`.
Default: ``None`` , which means transform the all dimension of `input`, or the last `len(s)` dimensions if s is given.
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
Three modes are defined as, where :math: `n = prod(s)`
- ``"backward"`` (no normalization).
- ``"forward"`` (normalize by :math:`1/n`).
- ``"ortho"`` (normalize by :math:`1/\sqrt{n}`).
Returns:
Tensor, The result of `rfftn()` function, result.shape[dim[i]] is s[i], and for the last transformed dim,
result.shape[dim[-1]] is :math:`s[-1] // 2 + 1`.
When the input is int16, int32, int64, float16, float32 the return value type is complex64.
When the input is float64, the return value type is complex128.
Raises:
TypeError: If the `input` type is not Tensor.
TypeError: If the `input` data type is not one of: int32, int64, float32, float64.
TypeError: If the type/dtype of `s` and `dim` is not int.
ValueError: If `dim` is not in the range of "[ `-input.ndim` , `input.ndim` )".
ValueError: If `dim` has duplicate values.
ValueError: If `s` is less than 1.
ValueError: If `s` and `dim` are given but have different shapes.
ValueError: If `norm` is none of ``"backward"`` , ``"forward"`` or ``"ortho"`` .
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = ops.ones((2, 2, 2))
>>> ops.rfftn(input, s=(2, 2, 2), dim=(0, 1, 2), norm="backward")
Tensor(shape=[2, 2, 2], dtype=Complex64, value=
[[[8+0j, 0+0j],
[0+0j, 0+0j]],
[[0+0j, 0+0j],
[0+0j, 0+0j]]])
"""
return rfftn_op(input, s, dim, norm)
[文档]def rms_norm(x, gamma, epsilon=1e-6):
r"""
The RmsNorm(Root Mean Square Layer Normalization) operator is a normalization operation. Compared to
LayerNorm, it retains scaling invariance and removes translation invariance. Its formula is:
.. math::
y=\frac{x_i}{\sqrt{\frac{1}{n}\sum_{i=1}^{n}{ x_i^2}+\varepsilon}}\gamma_i
.. warning::
This is an experimental API that is subject to change or deletion. This API is only supported in Atlas A2
training series for now.
Args:
x (Tensor): Input data of RmsNorm. Support data type: float16, float32, bfloat16.
gamma (Tensor): Learnable parameter :math:`\gamma` . Support data type: float16, float32, bfloat16.
epsilon (float, optional): A float number ranged in (0, 1] to prevent division by 0. Default value is `1e-6`.
Returns:
- Tensor, denotes the normalized result, has the same type and shape as `x`.
- Tensor, with the float data type, denotes the reciprocal of the input standard deviation, used by gradient
calculation.
Raises:
TypeError: If data type of `x` is not one of the following: float16, float32, bfloat16.
TypeError: If data type of `gamma` is not one of the following: float16, float32, bfloat16.
TypeError: If data type of `x` is not the same with the data type of `gamma`.
ValueError: If `epsilon` is not a float between 0 and 1.
ValueError: If the rank of `gamma` is lagger than the rank of `x`.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> y, rstd = ops.rms_norm(x, gamma)
>>> print(y)
[[0.46290997 0.92581993 1.3887299]
[0.46290997 0.92581993 1.3887299]]
>>> print(rstd)
[[0.46290997]
[0.46290997]]
"""
return rms_norm_impl(x, gamma, epsilon)
def rotary_position_embedding(x, cos, sin, mode=0):
r"""
Implements the Rotary Position Embedding algorithm.
Refer to paper `Enhanced Transformer with Rotary Position Embedding <https://arxiv.org/pdf/2104.09864.pdf>`_.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
x (Tensor): 4D tensor, with float16, bfloat16 or float32 data type.
cos (Tensor): 4D constant, has the same type as `x` , in range of [-1, 1].
sin (Tensor): Same with `cos` .
mode (int): An optional attribute. Used to select a calculation mode. 0: rotate_half(GPT-NeoX style); 1: rotate_interleaved(GPT-J style). Defaults to ``0`` .
.. list-table:: Config layout constraints
:widths: 5 20 20
:header-rows: 1
* - Args
- RotateHalf(mode:0)
- RotateInterleaved(mode:1)
* - x
- Supported layout:
11SD, B1SD, BNSD; D < 896 and D is an Even. B, N < 1000;
- Supported layout: 11SD, B1SD, BNSD;
D < 896 and D is an Even.
B, N < 1000;
* - cos
- Support layout for different values of `x`:
`x` is BNSD: 11SD, B1SD, BNSD;
`x` is BSND: 1S1D, BS1D, BSND;
`x` is SBND: S11D, SB1D, SBND
- Support layout for different values of `x`:
`x` is BNSD: 11SD;
`x` is BSND: 1S1D;
`x` is SBND: S11D
* - sin
- Same with `cos` .
- Same with `cos` .
.. note::
When the layout is BNSD, B * N > 8S and D is 32-bytes alignment, the performance is poor. Therefore, this interface cannot be called.
Returns:
Tensor, has the same dtype and shape as the `x`.
Raises:
TypeError: If `x` is not a Tensor.
TypeError: If `cos` is not a Tensor.
TypeError: If `sin` is not a Tensor.
TypeError: If `mode` is not an int.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.uniform(-2, 2, (4, 8192, 4, 128)))
>>> cos = Tensor(np.random.uniform(-1, 1, (1, 8192, 1, 128)))
>>> sin = Tensor(np.random.uniform(-1, 1, (1, 8192, 1, 128)))
>>> output = ops.rotary_position_embedding(x, cos, sin, 0)
>>> print(output.shape)
(4, 8192, 4, 128)
"""
return rotary_position_embedding_op(x, cos, sin, mode)
[文档]def rsqrt(input):
r"""
Computes reciprocal of square root of input tensor element-wise.
.. math::
out_{i} = \frac{1}{\sqrt{input_{i}}}
Args:
input (Tensor): The input of rsqrt. Its each element must be a non-negative
number, if an element is negative, the calculation result is nan.
Returns:
Tensor, has the same shape and dtype as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore as ms
>>> from mindspore import ops
>>> input = ms.Tensor([-0.0370, 0.2970, 1.5420, -0.9105])
>>> output = ops.rsqrt(input)
>>> print(output)
[ nan 1.8349396 0.8053002 nan]
"""
return rsqrt_op(input)
def scalar_cast(input_x, input_y):
r"""
The interface is deprecated from version 2.3 and will be removed in a future version,
please use the `int(x)` or `float(x)` instead.
Casts the input scalar to another type.
Args:
input_x (scalar): The input scalar. Only constant value is allowed.
input_y (mindspore.dtype): The type to be cast. Only constant value is allowed. And the value should only be mindspore.int64, mindspore.float64, or mindspore.bool_.
Returns:
Scalar. The type is the same as the python type corresponding to `input_y`.
Raises:
ValueError: if input_y's value is invalid.
Supported Platforms:
Deprecated
Examples:
>>> import mindspore
>>> from mindspore import ops
>>> output = ops.scalar_cast(255.0, mindspore.int64)
>>> print(output)
255
"""
return scalar_cast_op(input_x, input_y)
scalar_to_tensor_op=ScalarToTensor()
def scalar_to_tensor(input_x, dtype=None):
r"""
"""
return scalar_to_tensor_op(input_x, dtype)
[文档]def scatter_nd(indices, updates, shape):
r"""
Scatters a tensor into a new tensor depending on the specified indices.
Creates an empty tensor with the given `shape`, and set values by scattering the update tensor
depending on indices. The empty tensor has rank :math:`P` and `indices` has rank :math:`Q`.
The `shape` is :math:`(s_0, s_1, ..., s_{P-1})`, where :math:`P \ge 1`.
`indices` has shape :math:`(i_0, i_1, ..., i_{Q-2}, N)`, where :math:`Q \ge 2` and :math:`N \le P`.
The last dimension of `indices` (with length :math:`N` ) indicates slices along the :math:`N` th dimension of the
empty tensor.
`updates` is a tensor of rank :math:`Q-1+P-N`, and
its shape is :math:`(i_0, i_1, ..., i_{Q-2}, s_N, s_{N+1}, ..., s_{P-1})`.
If `indices` contains duplicates, the duplicate `updates` are summed.
The following figure shows the calculation process of inserting two new value matrices into the first dimension
with rank-3:
.. image:: ScatterNd.png
Args:
indices (Tensor): Define the index of scattering in the new tensor with int32 or int64 data type.
The rank of `indices` must be at least 2 and `indices.shape[-1] <= len(shape)`.
updates (Tensor): Define the source Tensor to be updated.
It has shape `indices.shape[:-1] + shape[indices.shape[-1]:]`.
shape (tuple[int]): Define the shape of the output tensor, has the same data type as indices.
`shape` can not be empty, and the elements in `shape` must be greater than or equal to 1.
Returns:
Tensor, the new tensor, has the same type as `update` and the same shape as `shape`.
Raises:
TypeError: If `shape` is not a tuple.
ValueError: If any element of `shape` is less than 1.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2],
... [3, 3, 3, 3], [4, 4, 4, 4]],
... [[1, 1, 1, 1], [2, 2, 2, 2],
... [3, 3, 3, 3], [4, 4, 4, 4]]]), mindspore.float32)
>>> shape = (4, 4, 4)
>>> output = ops.scatter_nd(indices, updates, shape)
>>> print(output)
[[[1. 1. 1. 1.]
[2. 2. 2. 2.]
[3. 3. 3. 3.]
[4. 4. 4. 4.]]
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]
[[1. 1. 1. 1.]
[2. 2. 2. 2.]
[3. 3. 3. 3.]
[4. 4. 4. 4.]]
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]]
>>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32)
>>> updates = Tensor(np.array([3.2, 1.1]), mindspore.float32)
>>> shape = (3, 3)
>>> output = ops.scatter_nd(indices, updates, shape)
>>> # In order to facilitate understanding, explain the operator pseudo-operation process step by step:
>>> # Step 1: Generate an empty Tensor of the specified shape according to the shape
>>> # [
>>> # [0. 0. 0.]
>>> # [0. 0. 0.]
>>> # [0. 0. 0.]
>>> # ]
>>> # Step 2: Modify the data at the specified location according to the indicators
>>> # 0th row of indices is [0, 1], 0th row of updates is 3.2.
>>> # means that the empty tensor in the 0th row and 1st col set to 3.2
>>> # [
>>> # [0. 3.2. 0.]
>>> # [0. 0. 0.]
>>> # [0. 0. 0.]
>>> # ]
>>> # 1th row of indices is [1, 1], 1th row of updates is 1.1.
>>> # means that the empty tensor in the 1th row and 1st col set to 1.1
>>> # [
>>> # [0. 3.2. 0.]
>>> # [0. 1.1 0.]
>>> # [0. 0. 0.]
>>> # ]
>>> # The final result is as follows:
>>> print(output)
[[0. 3.2 0.]
[0. 1.1 0.]
[0. 0. 0.]]
"""
return scatter_nd_op(indices, updates, shape)
def select_ext(input, dim, index):
r"""
Slices the input tensor along the selected dimension at the given index.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to slice.
index (int): the index to select with.
Returns:
Tensor.
Raises:
TypeError: If input is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, mint
>>> input = Tensor([[2, 3, 4, 5],[3, 2, 4, 5]])
>>> y = mint.select(input, 0, 0)
>>> y = Tensor([1,2], mindspore.float32)
>>> print(y)
[2 3 4 5]
"""
return select_ext_op(input, dim, index)
[文档]def select(condition, input, other):
r"""
The conditional tensor determines whether the corresponding element in the output must be
selected from `input` (if True) or `other` (if False) based on the value of each
element.
It can be defined as:
.. math::
out_i = \begin{cases}
input_i, & \text{if } condition_i \\
other_i, & \text{otherwise}
\end{cases}
Args:
condition (Tensor[bool]): The condition tensor, decides which element is chosen.
The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
input (Union[Tensor, int, float]): The first Tensor to be selected.
If input is a Tensor, its shape should be or be braodcast to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
If input is int or float, it will be casted to int32 or float32, and broadcast to the same shape as other.
There must be at least one Tensor between input and other.
other (Union[Tensor, int, float]): The second Tensor to be selected.
If other is a Tensor, its shape should be or be braodcast to :math:`(x_1, x_2, ..., x_N, ..., x_R)`.
If other is int or float, it will be casted to int32 or float32, and broadcast to the same shape as input.
There must be at least one Tensor between input and other.
Returns:
Tensor, has the same shape as `condition`.
Raises:
TypeError: If input or other is not a Tensor.
ValueError: The shape of inputs cannot be broadcast.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> # Both inputs are Tensor
>>> cond = Tensor([True, False])
>>> x = Tensor([2,3], mindspore.float32)
>>> y = Tensor([1,2], mindspore.float32)
>>> output = ops.select(cond, x, y)
>>> print(output)
[2. 2.]
"""
return select_op(condition, input, other)
def select_v2(condition, input, other):
r"""
"""
return select_v2_op(condition, input, other)
[文档]def selu_ext(input):
r"""
Activation function SELU (Scaled exponential Linear Unit).
The activation function is defined as:
.. math::
E_{i} =
scale *
\begin{cases}
x_{i}, &\text{if } x_{i} \geq 0; \cr
\text{alpha} * (\exp(x_i) - 1), &\text{otherwise.}
\end{cases}
where :math:`alpha` and :math:`scale` are pre-defined constants(:math:`alpha=1.67326324`
and :math:`scale=1.05070098`).
See more details in `Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_.
SELU Activation Function Graph:
.. image:: ../images/SeLU.png
:align: center
Args:
input (Tensor): Tensor of any dimension.
The data type is float16, float32, bfloat16.
Returns:
Tensor, with the same type and shape as the `input`.
Raises:
TypeError: If dtype of `input` is not float16, float32, bfloat16.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, mint
>>> import numpy as np
>>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> output = mint.nn.functional.selu(input)
>>> print(output)
[[-1.1113307 4.202804 -1.7575096]
[ 2.101402 -1.7462534 9.456309 ]]
"""
return selu_ext_op(input)
def selu_grad(gradient, result):
r"""
"""
return selu_grad_op(gradient, result)
def sequence_concat(x, axis=0):
r"""
Support sequence Concat operation.
.. note::
This is only for internal used.
Args:
axis (Int): The axis to be concat.
Inputs:
- **sequence** (Union[List, Tuple]) - A sequence of Tensor objects with same shape and type..
Outputs:
The concat of all input.
Raises:
TypeError: The 'sequence' is not list or tuple.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
sequence_concat_op = _get_cache_prim(SequenceConcat)(axis)
return sequence_concat_op(x)
def shard_identity(input):
r"""
A intermediate operator only be created when using mindspore.shard or
cell.shard during parallel procedure. Will not be exposed to the users.
"""
return shard_identity_op(input)
[文档]def sigmoid(input):
r"""
Computes Sigmoid of input element-wise. The Sigmoid function is defined as:
.. math::
\text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)}
where :math:`x_i` is an element of `x`.
Sigmoid Function Graph:
.. image:: ../images/Sigmoid.png
:align: center
Args:
input (Tensor): `input` is :math:`x` in the preceding formula. Tensor of any dimension,
the data type is float16, float32, float64, complex64 or complex128.
Returns:
Tensor, with the same type and shape as the input.
Raises:
TypeError: If dtype of `input` is not float16, float32, float64, complex64 or complex128.
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> output = ops.sigmoid(input)
>>> print(output)
[0.7310586 0.880797 0.95257413 0.98201376 0.9933072 ]
"""
return sigmoid_op(input)
[文档]def sign(input):
r"""
Returns an element-wise indication of the sign of a number. Notice: When the input is NaN and dtype is float64, the output of this operator is NaN.
.. math::
\text{out}_{i} = \begin{cases}
-1 & \text{input}_{i} < 0 \\
0 & \text{input}_{i} = 0 \\
1 & \text{input}_{i} > 0
\end{cases}
Args:
input (Tensor): Input Tensor.
Returns:
Tensor, the sign of input.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore as ms
>>> import mindspore.ops as ops
>>> input = ms.Tensor([[-1, 0, 2, 4, 6], [2, 3, 5, -6, 0]])
>>> output = ops.sign(input)
>>> print(output)
[[-1 0 1 1 1]
[ 1 1 1 -1 0]]
>>> ms.set_context(device_target="CPU")
>>> x = ms.Tensor([[-1, 0, float('inf'), 4, float('nan')], [2, 3, float('-inf'), -6, 0]])
>>> output = ops.sign(x)
>>> print(output)
[[-1. 0. 1. 1. 0.]
[ 1. 1. -1. -1. 0.]]
"""
return sign_op(input)
[文档]def silu(input):
r"""
Computes Sigmoid Linear Unit of input element-wise. The SiLU function is defined as:
.. math::
\text{SiLU}(x) = x * \sigma(x),
where :math:`x` is an element of the input, :math:`\sigma(x)` is Sigmoid function.
.. math::
\text{sigma}(x_i) = \frac{1}{1 + \exp(-x_i)},
SiLU Function Graph:
.. image:: ../images/SiLU.png
:align: center
Args:
input (Tensor): `input` is :math:`x` in the preceding formula. Input with the data type
float16 or float32.
Returns:
Tensor, with the same type and shape as the `input`.
Raises:
TypeError: If dtype of `input` is neither float16 nor float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> import numpy as np
>>> input = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
>>> output = ops.silu(input)
>>> print(output)
[-0.269 1.762 -0.1423 1.762 -0.269]
"""
return silu_op(input)
[文档]def sin(input):
r"""
Computes sine of the input element-wise.
.. math::
output_i = \sin(input_i)
Args:
input (Tensor): The shape of tensor is
:math:`(N,*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensor, has the same shape and dtype as the `input`.
The dtype of output is float32 when dtype of `input` is in
[bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as the `input`.
:raise TypeError: If `input` is not a Tensor.
:raise TypeError:
* CPU/GPU: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
* Ascend: If type of `input` is not bool, int8, uint8, int16, int32, int64, float16, float32 or float64, complex64, complex128.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
>>> output = ops.sin(input)
>>> print(output)
[0.58103514 0.27635565 0.4168708 0.58103514]
"""
return sin_op(input)
[文档]def sinc(input):
r"""
Computes the normalized sinc of input.
.. math::
out_i = \begin{cases} \frac{sin(\pi input_i)}{\pi input_i} & input_i\neq 0\\
1 & input_i=0 \end{cases}
Args:
input (Tensor): The input Tensor.
Returns:
Tensor, has the same shape as the `input`. The dtype of output is float32 when dtype of `input` is in
[int, bool]. Otherwise output has the same dtype as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
>>> output = ops.sinc(input)
>>> print(output)
[0.47735003 0.8759357 0.7224278 0.47735003]
"""
return sinc_op(input)
[文档]def sinh(input):
r"""
Computes hyperbolic sine of the input element-wise.
.. math::
output_i = \sinh(input_i)
Args:
input (Tensor): The input tensor of hyperbolic sine function.
Supported dtypes:
- GPU/CPU: float16, float32, float64, complex64 or complex128.
- Ascend: bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
Returns:
Tensor, has the same shape as the `input`.
The dtype of output is float32 when dtype of `input` is in
[bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as the `input`.
:raise TypeError: If `input` is not a Tensor.
:raise TypeError:
* CPU/GPU: If dtype of `input` is not float16, float32 or float64, complex64, complex128.
* Ascend: If dtype of `input` is not bool, int8, uint8, int16, int32, int64, float16, float32, float64, complex64, complex128 or bfloat16.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
>>> output = ops.sinh(input)
>>> print(output)
[0.6604918 0.28367308 0.44337422 0.6604918 ]
"""
return sinh_op(input)
[文档]def softplus_ext(input, beta=1, threshold=20):
r"""
Applies softplus function to `input` element-wise.
The softplus function is shown as follows, x is the element of `input` :
.. math::
\text{output} = \frac{1}{beta}\log(1 + \exp(\text{beta * x}))
where :math:`input * beta > threshold`, the implementation converts to the linear function to ensure numerical stability.
Args:
input (Tensor): Tensor of any dimension. Supported dtypes:
- Ascend: float16, float32, bfloat16.
beta (number.Number, optional): Scaling parameters in the softplus function. Default: ``1`` .
threshold (number.Number, optional): For numerical stability, the softplus function is converted
to a threshold parameter of a linear function. Default: ``20`` .
Returns:
Tensor, with the same type and shape as the input.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not float16, float32, bfloat16.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.1, 0.2, 30, 25]), mindspore.float32)
>>> output = ops.auto_generate.softplus_ext(input)
>>> print(output)
[0.74439657 0.7981388 30. 25.]
"""
return softplus_ext_op(input, beta, threshold)
def softshrink_grad(input_grad, input_x, lambd=0.5):
r"""
Computes gradients for SoftShrinkGrad operation.
Args:
input_grad (Tensor): the gradients of loss to output of SoftShrink function. Supported dtypes:
- Ascend: float16, float32, bfloat16.
- CPU/GPU: float16, float32.
input_x (Tensor): Must be the input `input` of the forward operator SoftSHrink. Supported dtypes:
- Ascend: float16, float32, bfloat16.
- CPU/GPU: float16, float32.
lambd (float): the lambda value for the Softshrink formulation. Default: ``0.5`` .
Returns:
backprops, a Tensor with the same shape and data type as `input_x`.
Rasise:
ValueError: If `lambd` is not a float.
ValueError: If shape of `input_grad` is not the same as `input_x`.
TypeError: If dtype of `input_grad` is not the same as `input_x`.
TypeError: If dtype of `input_grad` or `input_x` is not float16, float32 or bfloat16.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
return softshrink_grad_impl(input_grad, input_x, lambd)
[文档]def softshrink(input, lambd=0.5):
r"""
Soft Shrink activation function. Calculates the output according to the input elements.
The formula is defined as follows:
.. math::
\text{SoftShrink}(x) =
\begin{cases}
x - \lambda, & \text{ if } x > \lambda \\
x + \lambda, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
SoftShrink Activation Function Graph:
.. image:: ../images/Softshrink.png
:align: center
Args:
input (Tensor): The input of Soft Shrink. Supported dtypes:
- Ascend: float16, float32, bfloat16.
- CPU/GPU: float16, float32.
lambd (number, optional): The threshold :math:`\lambda` defined by the Soft Shrink formula.
It should be greater than or equal to 0, default: ``0.5`` .
Returns:
Tensor, has the same data type and shape as the input `input`.
Raises:
TypeError: If `lambd` is not a float, int or bool.
TypeError: If `input` is not a tensor.
TypeError: If dtype of `input` is not float16, float32 or bfloat16.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> import numpy as np
>>> x = Tensor(np.array([[ 0.5297, 0.7871, 1.1754], [ 0.7836, 0.6218, -1.1542]]), mindspore.float32)
>>> output = ops.softshrink(x)
>>> print(output)
[[ 0.02979 0.287 0.676 ]
[ 0.2837 0.1216 -0.6543 ]]
"""
return softshrink_impl(input, lambd)
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False):
r"""
Solve the linear system :math:`a x = b` for `x`, Assuming `a` is a triangular matrix.
Note:
- `solve_triangular` is currently only used in `mindscience` scientific computing scenarios and
dose not support other usage scenarios.
- `solve_triangular` is not supported on Windows platform yet.
Args:
a (Tensor): A triangular matrix of shape :math:`(*, M, M)` where :math:`*` is zero or more batch dimensions.
b (Tensor): A Tensor of shape :math:`(*, M)` or :math:`(*, M, N)`. Right-hand side matrix in :math:`a x = b`.
trans (Union[int, str], optional): Type of system to solve. Default: ``0``.
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
lower (bool, optional): Use only data contained in the lower triangle of `a`. Default: ``False``.
unit_diagonal (bool, optional): If ``True``, diagonal elements of :math:`a` are assumed to be 1 and
will not be referenced. Default: ``False``.
Returns:
Tensor of shape :math:`(*, M)` or :math:`(*, M, N)`,
which is the solution to the system :math:`a x = b`.
Shape of :math:`x` matches :math:`b`.
Raises:
ValueError: If `a` is less than 2 dimension.
ValueError: if `a` is not square matrix.
TypeError: If dtype of `a` and `b` are not the same.
ValueError: If the shape of `a` and `b` are not matched.
ValueError: If `trans` is not in set {0, 1, 2, 'N', 'T', 'C'}.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import numpy as onp
>>> import mindspore
>>> from mindspore import Tensor
>>> from mindspore.ops import solve_triangular
>>> a = Tensor(onp.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]], onp.float32))
>>> b = Tensor(onp.array([3, 1, 3, 4], onp.float32))
>>> x = solve_triangular(a, b, lower=True, unit_diagonal=False, trans='N')
>>> print(x)
[ 1. -1. 2. 2.]
>>> print(a @ x) # Check the result
[3. 1. 3. 4.]
"""
return solve_triangular_op(a, b, trans, lower, unit_diagonal)
def sort_ext(input, dim=-1, descending=False, stable=False):
r"""
"""
return sort_ext_op(input, dim, descending, stable)
[文档]def sqrt(x):
r"""
Returns sqrt of a tensor element-wise.
.. math::
out_{i} = \sqrt{x_{i}}
Args:
x (Tensor): The input tensor with a dtype of number.Number.
Returns:
Tensor, has the same shape as the `x`.
Raises:
TypeError: If `x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
>>> output = ops.sqrt(x)
>>> print(output)
[1. 2. 3.]
"""
return sqrt_op(x)
[文档]def square(input):
r"""
Returns square of a tensor element-wise.
.. math::
y_i = input_i ^ 2
Args:
input (Tensor): The input tensor with a dtype of Number.
Returns:
Tensor, has the same shape and dtype as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> output = ops.square(input)
>>> print(output)
[1. 4. 9.]
"""
return square_op(input)
[文档]def stack_ext(tensors, dim=0):
r"""
Stacks a list of tensors in specified dim.
Stacks the list of input tensors with the same rank `R`, output is a tensor of rank `(R+1)`.
Given input tensors of shape :math:`(x_1, x_2, ..., x_R)`. Set the number of input tensors as `N`.
If :math:`dim \ge 0`, the shape of the output tensor is
:math:`(x_1, x_2, ..., x_{dim}, N, x_{dim+1}, ..., x_R)`.
Args:
tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
dim (int): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
Returns:
Tensor. A stacked Tensor with the same type as `tensors`.
Raises:
TypeError: If the data types of elements in `tensors` are not the same.
ValueError: If `dim` is out of the range [-(R+1), R+1);
or if the shapes of elements in tensors are not the same.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> import numpy as np
>>> data1 = Tensor(np.array([0, 1]).astype(np.float32))
>>> data2 = Tensor(np.array([2, 3]).astype(np.float32))
>>> output = ops.auto_generate.stack_ext([data1, data2], 0)
>>> print(output)
[[0. 1.]
[2. 3.]]
"""
return stack_ext_impl(tensors, dim)
[文档]def strided_slice(input_x, begin, end, strides, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0):
r"""
Extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
This operation extracts a fragment of size (end-begin)/strides from the given 'input_tensor'.
Starting from the beginning position, the fragment continues adding strides to the index until
all dimensions are not less than the ending position.
Note:
- `begin` , `end` and `strides` must have the same shape.
- `begin` , `end` and `strides` are all 1-D Tensor, and their shape size
must not greater than the dim of `input_x`.
During the slicing process, the fragment (end-begin)/strides are extracted from each dimension.
Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
set `begin`, `end` and `strides` to (1, 3, 2), (3, 5, 6),
(1, 1, 2) respectively, then elements from index 1 to 3 are extrected for dim 0, index 3 to 5
are extrected for dim 1 and index 2 to 6 with a `stirded` of 2 are extrected for dim 2, this
process is equivalent to a pythonic slice `input_x[1:3, 3:5, 2:6:2]`.
If the length of `begin`, `end` and `strides` is smaller than the dim of `input_x`,
then all elements are extracted from the missing dims, it behaves like all the
missing dims are filled with zeros, size of that missing dim and ones.
Example: For Tensor `input_x` with shape :math:`(5, 6, 7)`,
set `begin`, `end` and `strides` to (1, 3),
(3, 5), (1, 1) respectively, then elements from index 1 to 3 are extrected
for dim 0, index 3 to 5 are extrected for dim 1 and index 3 to 5 are extrected
for dim 2, this process is equivalent to a pythonic slice `input_x[1:3, 3:5, 0:7]`.
Here's how a mask works:
For each specific mask, it will be converted to a binary representation internally, and then
reverse the result to start the calculation. For Tensor `input_x` with
shape :math:`(5, 6, 7)`. Given mask value of 3 which
can be represented as 0b011. Reverse that we get 0b110, which implies the first and second dim of the
original Tensor will be effected by this mask. See examples below, for simplicity all mask mentioned
below are all in their reverted binary form:
- `begin_mask` and `end_mask`
If the ith bit of `begin_mask` is 1, `begin[i]` is ignored and the fullest
possible range in that dimension is used instead. `end_mask` is analogous,
except with the end range. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, if `begin_mask`
is 0b110, `end_mask` is 0b011, the slice `input_x[0:3, 0:6, 2:7:2]` is produced.
- `ellipsis_mask`
If the ith bit of `ellipsis_mask` is 1, as many unspecified dimensions as needed
will be inserted between other dimensions. Only one non-zero bit is allowed
in `ellipsis_mask`. For Tensor `input_x` with shape :math:`(5, 6, 7, 8)`, `input_x[2:,...,:6]`
is equivalent to `input_x[2:5,:,:,0:6]` , `input_x[2:,...]` is equivalent
to `input_x[2:5,:,:,:]`.
- `new_axis_mask`
If the ith bit of `new_axis_mask` is 1, `begin`, `end` and `strides` are
ignored and a new length 1 dimension is added at the specified position
in the output Tensor. For Tensor `input_x` with shape :math:`(5, 6, 7)`, if `new_axis_mask`
is 0b110, a new dim is added to the second dim, which will produce
a Tensor with shape :math:`(5, 1, 6, 7)`.
- `shrink_axis_mask`
If the ith bit of `shrink_axis_mask` is 1, `begin`, `end` and `strides`
are ignored and dimension i will be shrunk to 0.
For Tensor `input_x` with shape :math:`(5, 6, 7)`,
if `shrink_axis_mask` is 0b010, it is equivalent to slice `x[:, 5, :]`
and results in an output shape of :math:`(5, 7)`.
Note:
`new_axis_mask` and `shrink_axis_mask` are not recommended to
use at the same time, it might incur unexpected result.
Args:
input_x (Tensor): The input Tensor to be extracted from.
begin (tuple[int]): A tuple which represents the location where to start.
end (tuple[int]): A tuple or which represents the maximum location where to end.
strides (tuple[int]): A tuple which represents the strides is continuously added
before reaching the maximum location. Only int is allowed, it can be negative
which results in reversed slicing.
begin_mask (int, optional): Starting index of the slice. Default: ``0`` .
end_mask (int, optional): Ending index of the slice. Default: ``0`` .
ellipsis_mask (int, optional): An int mask, ignore slicing operation when set to 1. Default: ``0`` .
new_axis_mask (int, optional): An int mask for adding new dims. Default: ``0`` .
shrink_axis_mask (int, optional): An int mask for shrinking dims. Default: ``0`` .
Returns:
Tensor, return the extracts a strided slice of a Tensor based on `begin/end` index and `strides`.
Raises:
TypeError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
`shrink_axis_mask` is not an int.
TypeError: If `begin`, `end` or `strides` is not tuple[int].
ValueError: If `begin_mask`, `end_mask`, `ellipsis_mask`, `new_axis_mask` or
`shrink_axis_mask` is less than 0.
ValueError: If `begin`, `end` and `strides` have different shapes.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
>>> output = ops.strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1))
>>> # Take this " output = strided_slice(input_x, (1, 0, 2), (3, 1, 3), (1, 1, 1)) " as an example,
>>> # start = [1, 0, 2] , end = [3, 1, 3], strides = [1, 1, 1], Find a segment of (start, end),
>>> # note that end is an open interval
>>> # To facilitate understanding, this operator can be divided into three steps:
>>> # Step 1: Calculation of the first dimension:
>>> # start = 1, end = 3, strides = 1, So can take 1st, 2nd rows, and then gets the final output at this time.
>>> # output_1th =
>>> # [
>>> # [
>>> # [3,3,3]
>>> # [4,4,4]
>>> # ]
>>> # [
>>> # [5,5,5]
>>> # [6,6,6]
>>> # ]
>>> # ]
>>> # Step 2: Calculation of the second dimension
>>> # 2nd dimension, start = 0, end = 1, strides = 1. So only 0th rows
>>> # can be taken, and the output at this time.
>>> # output_2nd =
>>> # [
>>> # [
>>> # [3,3,3]
>>> # ]
>>> # [
>>> # [5,5,5]
>>> # ]
>>> # ]
>>> # Step 3: Calculation of the third dimension
>>> # 3nd dimension,start = 2, end = 3, strides = 1, So can take 2th cols,
>>> # and you get the final output at this time.
>>> # output_3ed =
>>> # [
>>> # [
>>> # [3]
>>> # ]
>>> # [
>>> # [5]
>>> # ]
>>> # ]
>>> # The final output after finishing is:
>>> print(output)
[[[3.]]
[[5.]]]
>>> # another example like :
>>> output = strided_slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
>>> print(output)
[[[3. 3. 3.]]]
"""
strided_slice_op = _get_cache_prim(StridedSlice)(begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
return strided_slice_op(input_x, begin, end, strides)
def sub_ext(input, other, alpha=1):
r"""
Subtracts scaled other value from input Tensor.
.. math::
out_{i} = input_{i} - alpha \times other_{i}
Note:
- When the two inputs have different shapes,
they must be able to broadcast to a common shape.
- The two inputs and alpha comply with the implicit type conversion rules to make the data types
consistent.
Args:
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
alpha (number.Number): A scaling factor applied to `other`, default 1.
Returns:
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
and the data type is the one with higher precision or higher digits among the two inputs and alpha.
Raises:
TypeError: If the type of `input`, `other`, or `alpha` is not one of the following: Tensor, number.Number, bool.
TypeError: If `alpha` is of type float but `input` and `other` are not of type float.
TypeError: If `alpha` is of type bool but `input` and `other` are not of type bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> x = Tensor(np.array([4, 5, 6]).astype(np.float32))
>>> y = Tensor(1, mindspore.int32)
>>> alpha = 0.5
>>> output = ops.auto_generate.sub_ext(x, y, alpha)
>>> print(output)
[3.5 4.5 5.5]
>>> # the data type of x is float32, the data type of y is int32,
>>> # alpha is a float, and the output is the data format of higher precision float32.
>>> print(output.dtype)
Float32
"""
return sub_ext_op(input, other, alpha)
[文档]def sub(input, other):
r"""
Subtracts the second input tensor from the first input tensor element-wise.
.. math::
out_{i} = input_{i} - other_{i}
Note:
- When the two inputs have different shapes, they must be able to broadcast to a common shape.
- The two inputs can not be bool type at the same time,
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
- The two inputs comply with the implicit type conversion rules to make the data types
consistent.
Args:
input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
other (Union[Tensor, number.Number, bool]): The second input, when the first input is a Tensor,
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
Returns:
Tensor with a shape that is the same as the broadcasted shape of the input `input` and `other`,
and the data type is the one with higher precision or higher digits among the two inputs.
Raises:
TypeError: If `input` and `other` are not number.Number or bool or Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> other = Tensor(np.array([4, 5, 6]), mindspore.int32)
>>> output = ops.sub(input, other)
>>> print(output)
[-3 -3 -3]
"""
return sub_op(input, other)
def swiglu_grad(grad_output, input, dim=-1):
r"""
"""
return swiglu_grad_op(grad_output, input, dim)
def swiglu(input, dim=-1):
r"""
Computes SwiGLU (Swish-Gated Linear Unit activation function) of input tensor.
SwiGLU is a variant of the :class:`mindspore.ops.GLU` activation function, it is defined as:
.. math::
{SwiGLU}(a, b)= Swish(a) \otimes b
where :math:`a` is the first half of the `input` matrices and :math:`b` is the second half,
Swish(a)=a :math:`\sigma` (a), :math:`\sigma` is the :func:`mindspore.ops.sigmoid` activation function
and :math:`\otimes` is the Hadamard product.
Args:
input (Tensor): Tensor to be split. It has shape :math:`(\ast_1, N, \ast_2)`
where `*` means, any number of additional dimensions. :math:`N` must be divisible by 2.
dim (int, optional): the axis to split the input. It must be int. Default: ``-1`` , the last axis of `input`.
Returns:
Tensor, the same dtype as the `input`, with the shape :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`.
Raises:
TypeError: If dtype of `input` is not float16, float32 or bfloat16.
TypeError: If `input` is not a Tensor.
RuntimeError: If the dimension specified by `dim` is not divisible by 2.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import Tensor, ops
>>> input = Tensor([[-0.12, 0.123, 31.122], [2.1223, 4.1212121217, 0.3123]], dtype=mindspore.float32)
>>> output = ops.swiglu(input, 0)
>>> print(output)
[[-0.11970687 0.2690224 9.7194 ]]
"""
return swiglu_op(input, dim)
def t_ext(input):
r"""
Transpose the input tensor.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input tensor.
Returns:
Tensor, transpose 2D tensor, return 1D tensor as it is.
Raises:
ValueError: If the dimension of `input` is greater than 2.
ValueError: If `input` is empty.
TypeError: If `input` is not a tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.float32)
>>> output = ops.t_ext(input)
>>> print(output)
[[ 1. 4.]
[ 2. 5.]
[ 3. 6.]]
"""
return t_ext_op(input)
[文档]def tan(input):
r"""
Computes tangent of `input` element-wise.
.. math::
out_i = \tan(input_i)
Args:
input (Tensor): The input Tensor, valid for any dimensions.
Returns:
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
>>> output = ops.tan(input)
>>> print(output)
[-1.5574077 0. 1.5574077]
"""
return tan_op(input)
[文档]def tanh(input):
r"""
Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:
.. math::
tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
where :math:`x_i` is an element of the input Tensor.
Tanh Activation Function Graph:
.. image:: ../images/Tanh.png
:align: center
Args:
input (Tensor): Input of Tanh.
Returns:
Tensor, with the same type and shape as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> output = ops.tanh(input)
>>> print(output)
[0.7615941 0.9640276 0.9950547 0.9993293 0.9999092]
"""
return tanh_op(input)
def tensor_scatter_elements(data, indices, updates, axis=0, reduce='none'):
r"""
"""
tensor_scatter_elements_op = _get_cache_prim(TensorScatterElements)(axis, reduce)
return tensor_scatter_elements_op(data, indices, updates)
[文档]def topk_ext(input, k, dim=-1, largest=True, sorted=True):
r"""
Finds values and indices of the `k` largest or smallest entries along a given dimension.
.. warning::
- If sorted is set to False, due to different memory layout and traversal methods on different platforms,
the display order of calculation results may be inconsistent when `sorted` is False.
If the `input` is a one-dimensional Tensor, finds the `k` largest or smallest entries in the Tensor,
and outputs its value and index as a Tensor. values[`k`] is the `k` largest item in `input`,
and its index is indices [`k`].
For a multi-dimensional matrix,
calculates the first or last `k` entries in a given dimension, therefore:
.. math::
values.shape = indices.shape
If the two compared elements are the same, the one with the smaller index value is returned first.
Args:
input (Tensor): Input to be computed.
k (int): The number of top or bottom elements to be computed along the last dimension.
dim (int, optional): The dimension to sort along. Default: ``-1`` .
largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
Default: ``True`` .
sorted (bool, optional): If ``True`` , the obtained elements will be sorted by the values in descending
order or ascending order according to `largest`. If ``False`` , the obtained elements will not be
sorted. Default: ``True`` .
Returns:
A tuple consisting of `values` and `indices`.
- values (Tensor) - The `k` largest or smallest elements in each slice of the given dimension.
- indices (Tensor) - The indices of values within the last dimension of input.
Raises:
TypeError: If `sorted` is not a bool.
TypeError: If `input` is not a Tensor.
TypeError: If `k` is not an int.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> from mindspore import ops
>>> x = ms.Tensor([[0.5368, 0.2447, 0.4302, 0.9673],
... [0.4388, 0.6525, 0.4685, 0.1868],
... [0.3563, 0.5152, 0.9675, 0.8230]], dtype=ms.float32)
>>> output = ops.topk_ext(x, 2, dim=1)
>>> print(output)
(Tensor(shape=[3, 2], dtype=Float32, value=
[[ 9.67299998e-01, 5.36800027e-01],
[ 6.52499974e-01, 4.68499988e-01],
[ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
[[3, 0],
[1, 2],
[2, 3]]))
>>> output2 = ops.topk_ext(x, 2, dim=1, largest=False)
>>> print(output2)
(Tensor(shape=[3, 2], dtype=Float32, value=
[[ 2.44700000e-01, 4.30200011e-01],
[ 1.86800003e-01, 4.38800007e-01],
[ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int32, value=
[[1, 2],
[3, 0],
[0, 1]]))
"""
return topk_ext_op(input, k, dim, largest, sorted)
def topkrouter(input, capacity, expert_num, drop_type=0):
r"""
TopkRouter implementation in MOE.
Inputs:
- **x** (Tensor) - Input Tensor of 3D, Supporting types:[int32, int64]
- **capacity** (Int64) - The maximum number of tokens each expert can handle
- **expert_num** (Int64) - The number of expert.
- **drop_type** (Int64) - S-Drop/K-Drop, 0 means S-Drop, 1 means K-Drop, default 0.
Outputs:
tuple(Tensor), tuple of 2 tensors, `dispatch_index` and `combine_inex`.
- dispatch_index (Tensor) - Token ID processed by each expert.
- combine_index (Tensor) - The combine index of each token.
Supported Platforms:
``Ascend``
"""
return topkrouter_op(input, capacity, expert_num, drop_type)
[文档]def trace_ext(input):
r"""
Returns a new tensor that is the sum of the `input` main trace.
Note:
Input must be tensor.
Args:
input (Tensor): 2-D Tensor.
Returns:
Tensor, when the data type of `input` is integer or bool, its data type is int64, otherwise it is the same as `input`, and size equals to 1.
Raises:
TypeError: If `input` is not a Tensor.
ValueError: If the dimension of `input` is not equal to 2.
TypeError: If the dtype of `input` is not one of float16, float32, float64, bool, uint8, int8, int16, int32, int64, complex64, complex128, bfloat16.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]]), mindspore.float32)
>>> output = ops.trace_ext(input)
>>> print(output)
42.0
>>> input = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
>>> output = ops.trace_ext(input)
>>> print(output)
18.0
>>> input = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
>>> output = ops.trace_ext(input)
>>> print(output)
24.0
"""
return trace_ext_op(input)
[文档]def trace(input):
r"""
Returns a new tensor that is the sum of the `input` main trace.
Note:
Input must be matrix, and complex number is not supported at present.
Args:
input (Tensor): A matrix to be calculated. The matrix must be two dimensional.
Returns:
Tensor, with the same data type as input `input`, and size equals to 1.
Raises:
TypeError: If `input` is not a Tensor.
ValueError: If the dimension of `input` is not equal to 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]]), mindspore.float32)
>>> output = ops.trace(input)
>>> print(output)
42.0
>>> input = Tensor(np.arange(1, 13).reshape(3, 4), mindspore.float32)
>>> output = ops.trace(input)
>>> print(output)
18.0
>>> input = Tensor(np.arange(12, 0, -1).reshape(4, 3), mindspore.float32)
>>> output = ops.trace(input)
>>> print(output)
24.0
"""
return trace_op(input)
def transpose_ext(input, dim0, dim1):
r"""
Interchange two axes of a tensor.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input(Tensor): Input tensor.
dim0 (int): First axis.
dim1 (int): Second axis.
Returns:
Transposed tensor, has the same data type as `input`.
Raises:
TypeError: If argument `input` is not Tensor.
TypeError: If `dim0` or `dim1` is not integer.
ValueError: If `dim0` or `dim1` is not in the range of :math:`[-ndim, ndim-1]`.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import mint
>>> from mindspore import Tensor
>>> input = Tensor(np.ones((2,3,4), dtype=np.float32))
>>> output = mint.transpose(input, 0, 2)
>>> print(output.shape)
(4, 3, 2)
"""
return transpose_ext_op(input, dim0, dim1)
[文档]def transpose(input, input_perm):
r"""
Permutes the dimensions of the input tensor according to input permutation.
For a 1-D array this has no effect, as a transposed vector is simply the same vector.
To convert a 1-D array into a 2D column vector please refer to :func:`mindspore.ops.expand_dims`.
For a 2-D array, this is a standard matrix transpose. For an n-D array, if axes are given,
their order indicates how the axes are permuted (see Examples).
If axes are not provided and a.shape is :math:`(i[0], i[1], ... i[n-2], i[n-1])`,
then a.transpose().shape is :math:`(i[n-1], i[n-2], ... i[1], i[0])`.
Note:
On GPU and CPU, if the value of `input_perm` is negative, its actual value is `input_perm[i] + rank(input)`.
Negative value of `input_perm` is not supported on Ascend.
Args:
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
input_perm (tuple[int]): The permutation to be converted. The elements in `input_perm` are composed of
the indexes of each dimension of `input`. The length of `input_perm` and the shape of `input` must be
the same. Only constant value is allowed. Must be in the range [-rank(input), rank(input)).
Returns:
Tensor, the type of output tensor is the same as `input` and the shape of output tensor is decided by the
shape of `input` and the value of `input_perm`.
Raises:
TypeError: If `input_perm` is not a tuple.
ValueError: If length of shape of `input` is not equal to length of shape of `input_perm`.
ValueError: If the same element exists in `input_perm`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32)
>>> input_perm = (0, 2, 1)
>>> output = ops.transpose(input, input_perm)
>>> print(output)
[[[ 1. 4.]
[ 2. 5.]
[ 3. 6.]]
[[ 7. 10.]
[ 8. 11.]
[ 9. 12.]]]
"""
return transpose_op(input, input_perm)
def tril_ext(input, diagonal=0):
r"""
"""
return tril_ext_impl(input, diagonal)
[文档]def triu(input, diagonal=0):
r"""
Returns the upper triangle part of `input` (elements that contain the diagonal and below),
and set the other elements to zeros.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input tensor with shape :math:`(M, N, *)` where * means any number of additional dimensions.
diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: ``0``,
indicating the main diagonal.
Returns:
Tensor, a tensor has the same shape and data type as `input`.
Raises:
TypeError: If `diagonal` is not an int.
TypeError: If `input` is not a Tensor.
ValueError: If the dimension of `input` is less than 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
... [ 5, 6, 7, 8],
... [10, 11, 12, 13],
... [14, 15, 16, 17]]))
>>> result = ops.triu(x)
>>> print(result)
[[ 1 2 3 4]
[ 0 6 7 8]
[ 0 0 12 13]
[ 0 0 0 17]]
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
... [ 5, 6, 7, 8],
... [10, 11, 12, 13],
... [14, 15, 16, 17]]))
>>> result = ops.triu(x, diagonal=1)
>>> print(result)
[[ 0 2 3 4]
[ 0 0 7 8]
[ 0 0 0 13]
[ 0 0 0 0]]
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
... [ 5, 6, 7, 8],
... [10, 11, 12, 13],
... [14, 15, 16, 17]]))
>>> result = ops.triu(x, diagonal=-1)
>>> print(result)
[[ 1 2 3 4]
[ 5 6 7 8]
[ 0 11 12 13]
[ 0 0 16 17]]
"""
return triu_impl(input, diagonal)
[文档]def trunc(input):
r"""
Returns a new tensor with the truncated integer values of the elements of the input tensor.
Args:
input (Tensor): The input tensor.
Returns:
Tensor, the same shape and data type as the input.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([3.4742, 0.5466, -0.8008, -3.9079]),mindspore.float32)
>>> output = ops.trunc(x)
>>> print(output)
[3. 0. 0. -3.]
"""
return trunc_op(input)
def tuple_to_tensor(input_tuple, dtype=None):
r"""
"""
return tuple_to_tensor_op(input_tuple, dtype)
def type_as(input, tensor):
r"""
Returns first input tensor cast to the type of the with the second input tensor.
.. warning::
This is an experimental API that is subject to change or deletion.
Note:
When converting complex numbers to boolean type, the imaginary part of the complex number is not
taken into account. As long as the real part is non-zero, it returns True; otherwise, it returns False.
Args:
input (Tensor): The shape of tensor is :math:`(x_0, x_1, ..., x_R)`. The tensor whose data type is to be converted.
other (Tensor): The shape of tensor is :math:`(x_0, x_1, ..., x_R)`. The tensor whose data type is specified.
Returns:
Tensor, the shape of tensor is the same as `input`, :math:`(x_0, x_1, ..., x_R)`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `other` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
>>> input = Tensor(input_np)
>>> other_np = np.random.randn(2, 3, 4).astype(np.int32)
>>> other = Tensor(input_np)
>>> output = ops.type_as(input, other)
>>> print(output.dtype)
Int32
>>> print(output.shape)
(2, 3, 4, 5)
"""
return type_as_op(input, tensor)
[文档]def unsorted_segment_sum(input_x, segment_ids, num_segments):
r"""
Computes the sum of a tensor along segments.
Calculates a tensor such that :math:`\text{output}[i] = \sum_{segment\_ids[j] == i} \text{data}[j, \ldots]`, where
:math:`j,...` is a tuple describing the index of element in data.
`segment_ids` selects which elements in data to sum
up. Segment_ids does not need to be sorted, and it does not need to cover all values in the entire valid value
range.
The following figure shows the calculation process of unsorted_segment_sum:
.. image:: UnsortedSegmentSum.png
Note:
- If the segment_id i is absent in the segment_ids, then output[i] will be filled with 0.
- On Ascend, if the value of segment_id is less than 0 or greater than the length of the input data shape, an
execution error will occur.
If the sum of the given segment_ids :math:`i` is empty, then :math:`\text{output}[i] = 0`. If the given segment_ids
is negative, the value will be ignored. 'num_segments' must be equal to the number of different segment_ids.
Args:
input_x (Tensor): Input Tensor contains the data to be summed.
The shape is :math:`(x_1, x_2, ..., x_R)`.
segment_ids (Tensor): The label indicates the segment to which each element belongs.
Set the shape as :math:`(x_1, x_2, ..., x_N)`, where 0 < N <= R.
num_segments (Union[int, Tensor], optional): Set :math:`z` as num_segments, it can be an int or 0-D Tensor.
Returns:
Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`.
Raises:
TypeError: If `num_segments` is not an int or 0-D Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> input_x = Tensor([1, 2, 3, 4], mindspore.float32)
>>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32)
>>> num_segments = 4
>>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
>>> print(output)
[3. 3. 4. 0.]
>>> input_x = Tensor([1, 2, 3, 4, 2, 5], mindspore.float32)
>>> segment_ids = Tensor([0, 0, 1, 2, 3, 4], mindspore.int32)
>>> num_segments = 6
>>> output = ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
>>> print(output)
[3. 3. 4. 2. 5. 0.]
"""
return unsorted_segment_sum_op(input_x, segment_ids, num_segments)
def view_as(input, other):
r"""
Change the shape of the input tensor based on the shape of other.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input tensor.
other (Tensor): The shape of return tensor is same as the shape of other.
Returns:
Tensor, which has the same shape of other.
Raises:
TypeError: If `input` is not a tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float32))
>>> other = Tensor(np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32))
>>> output = ops.view_as(input, other)
>>> print(output)
[[1. 2.]
[3. 2.]
[3. 4.]]
"""
return view_as_op(input, other)
def view(input, shape):
r"""
Reshape the tensor according to the input shape. It's the same as :func:`mindspore.Tensor.reshape`,
implemented by the underlying reshape operator.
Args:
shape (Union[tuple(int), int]): Dimension of the output tensor.
Returns:
Tensor, which dimension is the input shape's value.
Examples:
>>> from mindspore import Tensor
>>> import numpy as np
>>> a = Tensor(np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float32))
>>> output = a.view((3, 2))
>>> print(output)
[[1. 2.]
[3. 2.]
[3. 4.]]
"""
return view_op(input, shape)
zeros_op=Zeros()
[文档]def zeros(size, dtype=None):
r"""
Creates a tensor filled with value zeros, whose shape and type are described by the first argument `size` and second argument `dtype` respectively.
.. warning::
For argument `size`, Tensor type input will be deprecated in the future version.
Args:
size (Union[tuple[int], list[int], int, Tensor]): The specified shape of output tensor. Only positive integer or
tuple or Tensor containing positive integers are allowed. If it is a Tensor,
it must be a 0-D or 1-D Tensor with int32 or int64 dtypes.
dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
mindspore.float32 will be used. Default: ``None`` .
Returns:
Tensor, whose dtype and size are defined by input.
Raises:
TypeError: If `size` is neither an int nor an tuple/list/Tensor of int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import ops
>>> output = ops.zeros((2, 2), mindspore.float32)
>>> print(output)
[[0. 0.]
[0. 0.]]
"""
return zeros_op(size, dtype)
def grouped_matmul(x, weight, bias=None, scale=None, offset=None, antiquant_scale=None, antiquant_offset=None, group_list=None, split_item=0, group_type=-1):
r"""
Group calculation matmul.
** Non-Quant: **
.. math::
y_i = x_i\times weight_i + bias_i
** Antiquant-Quant: **
.. math::
y_i = x_i\times (weight_i + antiquant\_offset_i) * antiquant\_scale_i + bias_i
Args:
split_item (int): Splitting input mode. Only support 0 and 3. 0 represents multiple Tensors, and 3 represents a single Tensor.
group_type (int): The axis to be split. Only support -1 and 0. If the matrix is multiplied by A[m,k]xB[k,n]=C[m,n].
-1: No grouping, 0: Group on the m-axis
Inputs:
x (TensorList): TensorList, including 2D-6D Tensors. Supported dtypes: Float16, Float32.
The shape of the tensor in tensorlist is :math:`(M, N)` or :math:`(..., M, N)`.
weight (TensorList): TensorList, include 2D-3D Tensors. Supported dtypes: Float16, Float32, int8.
The shape of the tensor in tensorlist is :math:`(N, K)` or :math:`(E, N, K)`.
bias (TensorList, optional): TensorList, include 1D-2D Tensors. Supported dtypes: Float16, Float32. If not used, None.
Length is the same as the weight length. The shape of the tensor is :math:`(N)` or :math:`(E, N)`.
scale (TensorList, optional): TensorList, scale factor of quant(A8W8) parameters. Supported dtypes: Unit64.
Length is the same as the weight length. Currently not supported, use None.
offset (TensorList, optional): TensorList, offset of quant(A8W8) parameters. Supported dtypes: Float32.
Length is the same as the weight length. Currently not supported, use None.
antiquant_scale (TensorList, optional): TensorList, scale factor of antiquant(A16W8) parameters. Supported dtypes: Float16.
Length is the same as the weight length. Only use in antiquant. If not used, None.
antiquant_offset (TensorList, optional): TensorList, offset factor of antiquant(A16W8) parameters. Supported dtypes: Float16.
Length is the same as the weight length. Only use in antiquant. If not used, None.
group_list (Tensor, optional): Grouping positions for the M-axis of input x. Supported dtypes: Int64
Parameter limitations 1
=========== ============ =========== ====================================================================================================
split_item group_type group_list notes
=========== ============ =========== ====================================================================================================
0 -1 None The length of x is n, tensor in x must be 2D-6D. The length of weight is n, tensor in weight must be 2D.
3 0 1D Tensor The length of x is 1, tensor in x must be 2D. The length of weight is 1, tensor in weight must be 3D.
(group_list.shape)[0] must be equal to (weight.shape)[0]
The last number in group_list needs to be equal to the 0th dimension of the shape with weight
=========== ============ =========== ====================================================================================================
Parameter limitations 2
Non-quant tyep table
========= ========= ========= ========= ========= ================ ================= =========
x weight bias scale offset antiquant_scale antiquant_offset y
========= ========= ========= ========= ========= ================ ================= =========
Float16 Float16 Float16 None None None None Float16
========= ========= ========= ========= ========= ================ ================= =========
Parameter limitations 3
Only in split_item=3, group_type=0
========= ========= ========= ========= ========= ================ ================= =========
x weight bias scale offset antiquant_scale antiquant_offset y
========= ========= ========= ========= ========= ================ ================= =========
Float32 Float32 Float32 None None None None Float32
========= ========= ========= ========= ========= ================ ================= =========
Outputs:
y (TensorList): TensorList, include 2D Tensors. The shape of the tensor is :math:`(M, K)`.
Raises:
TypeError: If `split_item` is not 0 or 3.
TypeError: If `group_type` is not -1 or 0.
TypeError: when `split_item` is 0, `group_type` is not -1.
TypeError: when `split_item` is 3, `group_type` is not 0.
TypeError: when `split_item` is 3, `group_list` is None.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import nn, context
>>> from mindspore.ops.auto_generate import GroupedMatmul
>>> class Net(nn.Cell):
... def __init__(self, split_item=3, group_type=0):
... super(Net, self).__init__()
... self.gmm = GroupedMatmul(split_item, group_type)
...
... def construct(self, x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list):
... result = self.gmm(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list)
... return result
...
>>> context.set_context(device_target="Ascend", mode=ms.GRAPH_MODE)
>>> x = [ms.Tensor(np.array([[0, 0, 0, 0],
... [1, 1, 1, 1],
... [2, 2, 2, 2],
... [2, 2, 2, 2],
... [1, 1, 1, 1],
... [1, 1, 1, 1]]), ms.float16)]
>>> weight = [ms.Tensor(np.arange(32).reshape((4, 4, 2)), ms.float16)]
>>> bias = None
>>> scale = None
>>> offset = None
>>> antiquant_scale = None
>>> antiquant_offset = None
>>> group_list = ms.Tensor([1, 3, 4, 6], ms.int64)
>>> net = Net()
>>> output = net(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list)
>>> print(output[0])
[[0 0 ]
[44 48 ]
[88 96 ]
[152 160]
[108 112]
[108 112]]
"""
return grouped_matmul_impl(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type)
def kv_cache_scatter_update(var, indices, updates, axis, reduce='none'):
r"""
Update var with updates and indices along sequence axis.
Args:
var (Tensor): 4-D tensor, the target tensor.
indices (Tensor): 1-D tensor, the index tensor.
updates (Tensor): 4-D tensor, the tensor doing the update operation.
axis (Int): Which axis to scatter, can be '-1' and '-2'.
reduce (String): Scatter mode, default to string "none" and can be "update".
Returns:
Tensor, has the same data type and shape as original `var`.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore.ops.operations._infer_ops import KVCacheScatterUpdate
>>> kv_cache_scatter_update_op = KVCacheScatterUpdate()
>>> var_shape = [1, 5, 128, 4096]
>>> var = np.random.uniform(low=1, high=10, size=var_shape).astype(np.float32)
>>> indices_shape = [1]
>>> indices = np.random.randint(low=1, high=10, size=indices_shape).astype(np.int64)
>>> updates_shape = [1, 5, 128, 1]
>>> updates = np.random.uniform(low=1, high=10, size=updates_shape).astype(np.float32)
>>> output = kv_cache_scatter_update_op(Tensor(var), Tensor(indices), Tensor(updates), -1, 'update')
>>> print(output.shape)
"""
return kv_cache_scatter_update_op(var, indices, updates, axis, reduce)
def moe_finalize_routing(expanded_x, x1, x2=None, bias=None, scales=None, expanded_row_idx=None, expanded_expert_idx=None):
r"""
In MoE calculation, merge the results output by FFN and rearrange the output in time order by experts.
Notes:
- E: The number of experts, such as 8.
- K: The number of experts selected by a token, such as 1 or 2.
- N: The number of rows in x1, which is the number of original tokens.
- H: The number of cols in x1, which is the hiddens of tokens.
.. math::
expertid = expanded_expert_idx[i,k]
out(i,j) = x1_{i,j} + x2_{i,j} + \sum_{k=0}^{K}(scales_{i,k}*(expanded\_x_{expanded\_row\_idx_{i+k*N},j} + bias_{expertid,j}))
Inputs:
expanded_x (Tensor): The output of MoE FFN. The tensor must be 2D tensor. The shape of the tensor must be :math:`(K*N, H)`.
Supported dtypes: Float16, Float32.
x1 (Tensor): The output of attention. The tensor must be 2D tensor. The shape of the tensor must be :math:`(N, H)`.
Data type requirements should be consistent with expanded_x.
If not used, the required values to be passed are all 0, The shape of the Tensor meets the requirements
x2 (Tensor, optional): The output of attention. The tensor must be 2D tensor. The shape of the tensor must be :math:`(N, H)`. If not used, None.
Data type requirements should be consistent with expanded_x.
bias (Tensor): The bias of the last matmul in MoE FFN. The tensor must be 2D tensor. The shape of the tensor must be :math:`(E, H)`.
Data type requirements should be consistent with expanded_x.
scales (Tensor): Weighted expanded when each token corresponds to multiple experts. The tensor must be 2D tensor.
The shape of the tensor must be :math:`(N, K)`. Data type requirements should be consistent with expanded_x.
If not used, the required values to be passed are all 1. The shape of the Tensor meets the requirements
expanded_row_idx (Tensor): The index in time order. The tensor must be 1D tensor. The shape of the tensor must be :math:`(K*N)`. Supported dtypes: Int32.
The value in Tensor must be between 0 and K*N, and the value cannot be repeated.
expanded_expert_idx (Tensor): The experts selected for each token are used to find the bias of which experts need to be accumulated.
The tensor must be 2D tensor. The shape of the tensor must be :math:`(N, K)`. Supported dtypes: Int32.
Outputs:
Tensor, the merged and sorted results. The tensor is 2D tensor. The shape of the tensor is :math:`(N, H)`. Data type consistent with expanded_x.
Raises:
TypeError: If the data type of input Tensor does not match the description in args.
ShapeError: If the shape of input Tensor does not match the description in args.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor, nn, context
>>> from mindspore.ops.auto_generate import MoeFinalizeRouting
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.moe_finalize_routing = MoeFinalizeRouting()
...
... def construct(self, expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx):
... result = self.moe_finalize_routing(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
... return result
...
>>> context.set_context(device_target="Ascend", mode=ms.GRAPH_MODE)
>>> # E = 4, K = 2, N = 3, H = 4
>>> expanded_x = ms.Tensor(np.array([[0.1, 0.1, 0.1, 0.1],
... [0.2, 0.2, 0.2, 0.2],
... [0.3, 0.3, 0.3, 0.3],
... [0.1, 0.1, 0.1, 0.1],
... [0.2, 0.2, 0.2, 0.2],
... [0.3, 0.3, 0.3, 0.3]]), ms.float16)
>>> x1 = ms.Tensor(np.array([[1, 1, 1, 1],
... [0.2, 0.2, 0.2, 0.2],
... [0.3, 0.3, 0.3, 0.3]]), ms.float16)
>>> x2 = None
>>> bias = ms.Tensor(np.array([[0.1, 0.1, 0.1, 0.1],
... [0.2, 0.2, 0.2, 0.2],
... [0.3, 0.3, 0.3, 0.3],
... [0.4, 0.4, 0.4, 0.4]]), ms.float16)
>>> scales = ms.Tensor(np.array([[0.7, 0.3],
... [0.8, 0.2],
... [0.8, 0.2]]), ms.float16)
>>> expanded_row_idx = ms.Tensor(np.array([2, 3, 1, 0, 5, 4]), ms.int32)
>>> expanded_expert_idx = ms.Tensor(np.array([[0, 1],
... [0, 2],
... [1, 3]]), ms.int32)
>>> net = Net()
>>> output = net(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
>>> print(output)
[[1.37 1.37 1.37 1.37]
[0.48 0.48 0.48 0.48]
[0.74 0.74 0.74 0.74]]
"""
return moe_finalize_routing_op(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
def quant_batch_matmul(x1, x2, scale, offset=None, bias=None, pertokenScaleOptional=None, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
r"""
"""
return quant_batch_matmul_impl(x1, x2, scale, offset, bias, pertokenScaleOptional, transpose_x1, transpose_x2, dtype)
def weight_quant_batch_matmul(x, weight, antiquant_scale, antiquant_offset=None, quant_scale=None, quant_offset=None, bias=None, transpose_x=False, transpose_weight=False, antiquant_group_size=0):
r"""
"""
return weight_quant_batch_matmul_impl(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size)