# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators definition generated by gen_ops.py, includes primitive classes."""
from mindspore.ops.primitive import Primitive, prim_arg_register
from mindspore.ops import signature as sig
from mindspore.common import dtype as mstype
from mindspore.common._decorator import deprecated
from mindspore.ops._primitive_cache import _get_cache_prim
from mindspore.ops.auto_generate.gen_arg_dtype_cast import type_it
from mindspore.ops.auto_generate.gen_arg_handler import *
from mindspore._c_expression import OpDtype
from mindspore.common._stub_tensor import _convert_stub
from mindspore._c_expression import pyboost_abs
from mindspore._c_expression import pyboost_adam_weight_decay_ext
from mindspore._c_expression import pyboost_add_ext
from mindspore._c_expression import pyboost_arange
from mindspore._c_expression import pyboost_argmax_ext
from mindspore._c_expression import pyboost_argmax_with_value
from mindspore._c_expression import pyboost_argmin_with_value
from mindspore._c_expression import pyboost_avg_pool2d_grad
from mindspore._c_expression import pyboost_avg_pool2d
from mindspore._c_expression import pyboost_batch_mat_mul
from mindspore._c_expression import pyboost_batch_norm_ext
from mindspore._c_expression import pyboost_batch_norm_grad_ext
from mindspore._c_expression import pyboost_bmm_ext
from mindspore._c_expression import pyboost_broadcast_to
from mindspore._c_expression import pyboost_cast
from mindspore._c_expression import pyboost_ceil
from mindspore._c_expression import pyboost_chunk
from mindspore._c_expression import pyboost_clamp_scalar
from mindspore._c_expression import pyboost_clamp_tensor
from mindspore._c_expression import pyboost_concat
from mindspore._c_expression import pyboost_constant_pad_nd
from mindspore._c_expression import pyboost_contiguous
from mindspore._c_expression import pyboost_convolution_grad
from mindspore._c_expression import pyboost_convolution
from mindspore._c_expression import pyboost_copy
from mindspore._c_expression import pyboost_cos
from mindspore._c_expression import pyboost_dense
from mindspore._c_expression import pyboost_div
from mindspore._c_expression import pyboost_divmod
from mindspore._c_expression import pyboost_dot
from mindspore._c_expression import pyboost_dropout_do_mask_ext
from mindspore._c_expression import pyboost_dropout_ext
from mindspore._c_expression import pyboost_dropout_gen_mask_ext
from mindspore._c_expression import pyboost_dropout_grad_ext
from mindspore._c_expression import pyboost_embedding_dense_backward
from mindspore._c_expression import pyboost_embedding
from mindspore._c_expression import pyboost_equal
from mindspore._c_expression import pyboost_erf
from mindspore._c_expression import pyboost_erfinv
from mindspore._c_expression import pyboost_exp
from mindspore._c_expression import pyboost_ffn_ext
from mindspore._c_expression import pyboost_fill_scalar
from mindspore._c_expression import pyboost_fill_tensor
from mindspore._c_expression import pyboost_flash_attention_score_grad
from mindspore._c_expression import pyboost_flash_attention_score
from mindspore._c_expression import pyboost_flatten_ext
from mindspore._c_expression import pyboost_gather_d_grad_v2
from mindspore._c_expression import pyboost_gather_d
from mindspore._c_expression import pyboost_gelu_grad
from mindspore._c_expression import pyboost_gelu
from mindspore._c_expression import pyboost_greater_equal
from mindspore._c_expression import pyboost_greater
from mindspore._c_expression import pyboost_grid_sampler_2d_grad
from mindspore._c_expression import pyboost_grid_sampler_2d
from mindspore._c_expression import pyboost_grid_sampler_3d_grad
from mindspore._c_expression import pyboost_grid_sampler_3d
from mindspore._c_expression import pyboost_group_norm_grad
from mindspore._c_expression import pyboost_group_norm
from mindspore._c_expression import pyboost_isfinite
from mindspore._c_expression import pyboost_layer_norm_ext
from mindspore._c_expression import pyboost_layer_norm_grad_ext
from mindspore._c_expression import pyboost_leaky_relu_ext
from mindspore._c_expression import pyboost_leaky_relu_grad_ext
from mindspore._c_expression import pyboost_less_equal
from mindspore._c_expression import pyboost_less
from mindspore._c_expression import pyboost_lin_space_ext
from mindspore._c_expression import pyboost_log
from mindspore._c_expression import pyboost_logical_and
from mindspore._c_expression import pyboost_logical_not
from mindspore._c_expression import pyboost_logical_or
from mindspore._c_expression import pyboost_masked_fill
from mindspore._c_expression import pyboost_matmul_ext
from mindspore._c_expression import pyboost_matmul
from mindspore._c_expression import pyboost_max
from mindspore._c_expression import pyboost_max_pool_grad_with_indices
from mindspore._c_expression import pyboost_max_pool_grad_with_mask
from mindspore._c_expression import pyboost_max_pool_with_indices
from mindspore._c_expression import pyboost_max_pool_with_mask
from mindspore._c_expression import pyboost_maximum
from mindspore._c_expression import pyboost_mean_ext
from mindspore._c_expression import pyboost_min
from mindspore._c_expression import pyboost_minimum
from mindspore._c_expression import pyboost_mul
from mindspore._c_expression import pyboost_mv
from mindspore._c_expression import pyboost_neg
from mindspore._c_expression import pyboost_norm
from mindspore._c_expression import pyboost_normal_ext
from mindspore._c_expression import pyboost_not_equal
from mindspore._c_expression import pyboost_one_hot_ext
from mindspore._c_expression import pyboost_ones_like_ext
from mindspore._c_expression import pyboost_ones
from mindspore._c_expression import pyboost_pow
from mindspore._c_expression import pyboost_prod_ext
from mindspore._c_expression import pyboost_quant_batch_matmul
from mindspore._c_expression import pyboost_reciprocal
from mindspore._c_expression import pyboost_reduce_all
from mindspore._c_expression import pyboost_reduce_any
from mindspore._c_expression import pyboost_reflection_pad_1d_grad
from mindspore._c_expression import pyboost_reflection_pad_1d
from mindspore._c_expression import pyboost_reflection_pad_2d_grad
from mindspore._c_expression import pyboost_reflection_pad_2d
from mindspore._c_expression import pyboost_reflection_pad_3d_grad
from mindspore._c_expression import pyboost_reflection_pad_3d
from mindspore._c_expression import pyboost_relu_grad
from mindspore._c_expression import pyboost_relu
from mindspore._c_expression import pyboost_repeat_interleave
from mindspore._c_expression import pyboost_replication_pad_1d_grad
from mindspore._c_expression import pyboost_replication_pad_1d
from mindspore._c_expression import pyboost_replication_pad_2d_grad
from mindspore._c_expression import pyboost_replication_pad_2d
from mindspore._c_expression import pyboost_replication_pad_3d_grad
from mindspore._c_expression import pyboost_replication_pad_3d
from mindspore._c_expression import pyboost_reverse_v2
from mindspore._c_expression import pyboost_rsqrt
from mindspore._c_expression import pyboost_scatter_add_ext
from mindspore._c_expression import pyboost_scatter
from mindspore._c_expression import pyboost_select
from mindspore._c_expression import pyboost_sigmoid_grad
from mindspore._c_expression import pyboost_sigmoid
from mindspore._c_expression import pyboost_silu_grad
from mindspore._c_expression import pyboost_silu
from mindspore._c_expression import pyboost_sin
from mindspore._c_expression import pyboost_slice_ext
from mindspore._c_expression import pyboost_softmax_backward
from mindspore._c_expression import pyboost_softmax
from mindspore._c_expression import pyboost_softplus_ext
from mindspore._c_expression import pyboost_softplus_grad_ext
from mindspore._c_expression import pyboost_split_tensor
from mindspore._c_expression import pyboost_split_with_size
from mindspore._c_expression import pyboost_sqrt
from mindspore._c_expression import pyboost_square
from mindspore._c_expression import pyboost_stack_ext
from mindspore._c_expression import pyboost_sub_ext
from mindspore._c_expression import pyboost_sum_ext
from mindspore._c_expression import pyboost_tanh_grad
from mindspore._c_expression import pyboost_tanh
from mindspore._c_expression import pyboost_tile
from mindspore._c_expression import pyboost_topk_ext
from mindspore._c_expression import pyboost_transpose
from mindspore._c_expression import pyboost_tril
from mindspore._c_expression import pyboost_triu
from mindspore._c_expression import pyboost_uniform_ext
from mindspore._c_expression import pyboost_upsample_bilinear2d_grad
from mindspore._c_expression import pyboost_upsample_bilinear2d
from mindspore._c_expression import pyboost_upsample_linear1d_grad
from mindspore._c_expression import pyboost_upsample_linear1d
from mindspore._c_expression import pyboost_upsample_nearest1d_grad
from mindspore._c_expression import pyboost_upsample_nearest1d
from mindspore._c_expression import pyboost_upsample_nearest2d_grad
from mindspore._c_expression import pyboost_upsample_nearest2d
from mindspore._c_expression import pyboost_upsample_nearest3d_grad
from mindspore._c_expression import pyboost_upsample_nearest3d
from mindspore._c_expression import pyboost_upsample_trilinear3d_grad
from mindspore._c_expression import pyboost_upsample_trilinear3d
from mindspore._c_expression import pyboost_weight_quant_batch_matmul
from mindspore._c_expression import pyboost_zeros_like_ext
from mindspore._c_expression import pyboost_zeros
class ACosGrad(Primitive):
r"""
Computes ACosGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dout):
return super().__call__(x, dout)
a_cos_grad_op=ACosGrad()
class AbsGrad(Primitive):
r"""
Computes gradients for abs operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dout):
return super().__call__(x, dout)
abs_grad_op=AbsGrad()
[文档]class Abs(Primitive):
r"""
.. code-block::
prim = ops.Abs()
out = prim(input)
is equivalent to
.. code-block::
ops.abs(input)
Refer to :func:`mindspore.ops.abs` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_abs(self, [input]))
abs_op=Abs()
[文档]class ACos(Primitive):
r"""
.. code-block::
prim = ops.ACos()
out = prim(input)
is equivalent to
.. code-block::
ops.acos(input)
Refer to :func:`mindspore.ops.acos` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
acos_op=ACos()
class AcoshGrad(Primitive):
r"""
Performs grad of Acosh operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, out, dout):
return super().__call__(out, dout)
acosh_grad_op=AcoshGrad()
[文档]class Acosh(Primitive):
r"""
.. code-block::
prim = ops.Acosh()
out = prim(input)
is equivalent to
.. code-block::
ops.acosh(input)
Refer to :func:`mindspore.ops.acosh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
acosh_op=Acosh()
class AdamWeightDecayExt(Primitive):
r"""
Implements Adam Weight Decay algorithm.
.. math::
\begin{aligned}
&\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2
\text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
\: \epsilon \text{ (epsilon)} \\
&\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad},
\: \textit{maximize} \\
&\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
\text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex]
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
&\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
&\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
&\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
&\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
&\hspace{5mm}\textbf{if} \: amsgrad \\
&\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
\widehat{v_t}) \\
&\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
\big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
\big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
&\bf{return} \: \theta_t \\[-1.ex]
\end{aligned}
.. warning::
This is an experimental optimizer API that is subject to change.
This module must be used with lr scheduler module in `LRScheduler Class
<https://www.mindspore.cn/docs/en/r2.3.0rc2/api_python/mindspore.experimental.html#lrscheduler-class>`_ .
Inputs:
- **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means,
any number of additional dimensions. The data type can be float16 or float32.
- **m** (Parameter) - The 1st moment vector in the updating formula,
it should have the the shape as `var`. The data type can be float16 or float32.
- **v** (Parameter) - The 2nd moment vector in the updating formula,
it should have the same shape as `m`.
- **max_v** (Parameter) - The 2nd moment vector in the updating formula,
it should have the same shape as `m`.
- **gradient** (Tensor) - Gradient, has the same shape as `var`
- **step** (float, int) - step
- **lr** (float) - :math:`lr` in the updating formula. The paper suggested value is :math:`10^{-8}`,
the data type should be float32.
- **beta1** (float) - The exponential decay rate for the 1st moment estimations,
the data type should be float32. The paper suggested value is :math:`0.9`
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations,
the data type should be float32. The paper suggested value is :math:`0.999`
- **decay** (float) - weight decay (L2 penalty), must be a scalar tensor with float32 data type.
- **eps** (float) - Term added to the denominator to improve numerical stability,
the data type should be float32.
- **amsgrad** (bool) - whether to use the AMSGrad algorithm. Default: ``False``.
- **maximize** (bool) - maximize the params based on the objective, instead of minimizing.
Default: ``False``.
.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('max_v', dtype=sig.sig_dtype.T1),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
sig.make_sig('step', dtype=sig.sig_dtype.T2),
sig.make_sig('lr', dtype=sig.sig_dtype.T3),
sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
sig.make_sig('beta2', dtype=sig.sig_dtype.T3),
sig.make_sig('decay', dtype=sig.sig_dtype.T3),
sig.make_sig('eps', dtype=sig.sig_dtype.T3),
sig.make_sig('amsgrad', dtype=sig.sig_dtype.T4, default=False),
sig.make_sig('maximize', dtype=sig.sig_dtype.T5, default=False),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad=False, maximize=False):
return _convert_stub(pyboost_adam_weight_decay_ext(self, [var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad, maximize]))
adam_weight_decay_ext_op=AdamWeightDecayExt()
[文档]class AdamWeightDecay(Primitive):
r"""
Updates gradients by the Adaptive Moment Estimation algorithm with weight decay (AdamWeightDecay).
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The AdamWeightDecay variant was proposed in `Decoupled Weight Decay Regularization
<https://arxiv.org/abs/1711.05101>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
update = \frac{m}{\sqrt{v} + \epsilon} \\
update =
\begin{cases}
update + weight\_decay * w
& \text{ if } weight\_decay > 0 \\
update
& \text{ otherwise }
\end{cases} \\
w = w - lr * update
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`lr` represents `learning_rate`, :math:`w` represents `var`, :math:`decay` represents `weight_decay`,
:math:`\epsilon` represents `epsilon`.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If ``True`` , updates of the var, m, and v tensors will be protected by a lock.
If ``False`` , the result is unpredictable. Default: ``False`` .
Inputs:
- **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means,
any number of additional dimensions. The data type can be float16 or float32.
- **m** (Parameter) - The 1st moment vector in the updating formula,
it should have the the shape as `var`. The data type can be float16 or float32.
- **v** (Parameter) - The 2nd moment vector in the updating formula,
it should have the same shape as `m`.
- **lr** (float) - :math:`lr` in the updating formula. The paper suggested value is :math:`10^{-8}`,
the data type should be float32.
- **beta1** (float) - The exponential decay rate for the 1st moment estimations,
the data type should be float32. The paper suggested value is :math:`0.9`
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations,
the data type should be float32. The paper suggested value is :math:`0.999`
- **epsilon** (float) - Term added to the denominator to improve numerical stability,
the data type should be float32.
- **decay** (float) - The weight decay value, must be a scalar tensor with float32 data type.
Default: ``0.0`` .
- **gradient** (Tensor) - Gradient, has the same shape as `var`.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Raises:
TypeError: If `use_locking` is not a bool.
TypeError: If `lr`, `beta1`, `beta2`, `epsilon` or `decay` is not a float32.
TypeError: If `var`, `m` or `v` is not a Parameter with dtype float16 or float32.
TypeError: If `gradient` is not a Tensor.
ValueError: If `epsilon` <= 0.
ValueError: If `beta1`, `beta2` is not in range (0.0,1.0).
ValueError: If `decay` < 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter, ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.adam_weight_decay = ops.AdamWeightDecay()
... self.var = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="var")
... self.m = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="m")
... self.v = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="v")
... def construct(self, lr, beta1, beta2, epsilon, decay, grad):
... out = self.adam_weight_decay(self.var, self.m, self.v, lr, beta1, beta2,
... epsilon, decay, grad)
... return out
>>> net = Net()
>>> gradient = Tensor(np.ones([2, 2]).astype(np.float32))
>>> output = net(0.001, 0.9, 0.999, 1e-8, 0.0, gradient)
>>> print(net.var.asnumpy())
[[0.999 0.999]
[0.999 0.999]]
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('lr', dtype=sig.sig_dtype.T2),
sig.make_sig('beta1', dtype=sig.sig_dtype.T2),
sig.make_sig('beta2', dtype=sig.sig_dtype.T2),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T2),
sig.make_sig('decay', dtype=sig.sig_dtype.T2),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
)
@prim_arg_register
def __init__(self, use_locking=False):
self._set_prim_arg("use_locking", use_locking)
self.add_prim_attr("side_effect_mem", True)
def __call__(self, var, m, v, lr, beta1, beta2, epsilon, decay, gradient):
return super().__call__(var, m, v, lr, beta1, beta2, epsilon, decay, gradient, self.use_locking)
class AddExt(Primitive):
r"""
.. code-block::
prim = ops.AddExt()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.add_ext(input, other, alpha)
Refer to :func:`mindspore.ops.add_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', dtype=sig.sig_dtype.T),
sig.make_sig('other', dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1, default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other, alpha=1):
return _convert_stub(pyboost_add_ext(self, [input, other, alpha]))
add_ext_op=AddExt()
[文档]class Add(Primitive):
r"""
.. code-block::
prim = ops.Add()
out = prim(input, other)
is equivalent to
.. code-block::
ops.add(input, other)
Refer to :func:`mindspore.ops.add` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return super().__call__(input, other)
add_op=Add()
[文档]class Addcdiv(Primitive):
r"""
Adds the element-wise division of `x1` by `x2`, multiplied by `value` to `input_data`.
It computes the following operation:
.. math::
y[i] = input\_data[i] + value[i] * (x1[i] / x2[i])
Inputs:
- **input_data** (Tensor) - The tensor to be added.
- **x1** (Tensor) - The numerator tensor.
- **x2** (Tensor) - The denominator tensor.
- **value** (Tensor) - The multiplier for tensor x1/x2.
Outputs:
Tensor, has the same shape and dtype as x1/x2.
Raises:
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same.
ValueError: If `x1` could not be broadcast to `x2`.
ValueError: If `value` could not be broadcast to `x1/x2`.
ValueError: If `input_data` could not be broadcast to `value*(x1/x2)`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
>>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32)
>>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32)
>>> value = Tensor([1], mindspore.float32)
>>> addcdiv = ops.Addcdiv()
>>> y = addcdiv(input_data, x1, x2, value)
>>> print(y)
[1.25 1.6666667 2.5 5. ]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, tensor1, tensor2, value):
return super().__call__(input, tensor1, tensor2, value)
addcdiv_op=Addcdiv()
[文档]class Addcmul(Primitive):
r"""
Adds the element-wise product of `x1` by `x2`, multiplied by `value` to `input_data`.
It computes the following operation:
.. math::
output[i] = input\_data[i] + value[i] * (x1[i] * x2[i])
Inputs:
- **input_data** (Tensor) - The tensor to be added.
- **x1** (Tensor) - The tensor to be multiplied.
- **x2** (Tensor) - The tensor to be multiplied.
- **value** (Tensor) - The multiplier for tensor x1*x2.
Outputs:
Tensor, has the same shape and dtype as x1*x2.
Raises:
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same.
ValueError: If `x1` could not be broadcast to `x2`.
ValueError: If `value` could not be broadcast to `x1` * `x2`.
ValueError: If `input_data` could not be broadcast to `value*(x1*x2)`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_data = Tensor(np.array([1, 1, 1]), mindspore.float32)
>>> x1 = Tensor(np.array([[1], [2], [3]]), mindspore.float32)
>>> x2 = Tensor(np.array([[1, 2, 3]]), mindspore.float32)
>>> value = Tensor([1], mindspore.float32)
>>> addcmul = ops.Addcmul()
>>> y = addcmul(input_data, x1, x2, value)
>>> print(y)
[[ 2. 3. 4.]
[ 3. 5. 7.]
[ 4. 7. 10.]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, tensor1, tensor2, value):
return super().__call__(input, tensor1, tensor2, value)
addcmul_op=Addcmul()
class AddN(Primitive):
r"""
.. code-block::
prim = ops.AddN()
out = prim(x)
is equivalent to
.. code-block::
ops.addn(x)
Refer to :func:`mindspore.ops.addn` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
addn_op=AddN()
[文档]class Angle(Primitive):
r"""
.. code-block::
prim = ops.Angle()
out = prim(input)
is equivalent to
.. code-block::
ops.angle(input)
Refer to :func:`mindspore.ops.angle` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
angle_op=Angle()
class ApplyCamePart1(Primitive):
r"""
Computes Part 1 of the CAME Optimizer.
Args:
- **grad** (Tensor) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **eps** (float) - data type must be float.
Returns:
- **sum_grad_r** (Tensor) - A Tensor of shape :math:`(..., n)`
- **sum_grad_c** (Tensor) - A Tensor of shape :math:`(..., m)`
- **sum_grad_rc** (Tensor) - A Tensor of of shape:math:`(..., m)`
Raises:
TypeError: If `grad` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _inner_ops as P
>>> grad = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> apply_came_part1 = P.ApplyCamePart1()
>>> output = apply_came_part1(grad, 1.1)
>>> print(output[0].asnumpy())
(1024,)
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, eps):
return super().__call__(grad, eps)
apply_came_part1_op=ApplyCamePart1()
class ApplyCamePart2(Primitive):
r"""
Computes Part 2 of the CAME Optimizer.
Args:
- **grad** (Tensor) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **sum_grad_r** (Tensor) - The shape = 1D :math:`(..., n)`.
A Tensor of types: float32.
- **sum_grad_c** (Tensor) - The shape = 1D :math:`(..., m)`.
A Tensor of types: float32.
- **sum_grad_rc** (Tensor) - The shape = 1D :math:`(...)`.
A Tensor of types: float32.
- **r** (Tensor) - The shape = 1D :math:`(..., n)`.
The Tensor has the same data type as `grad`.
- **c** (Tensor) - The shape = 1D :math:`(..., m)`.
The Tensor has the same data type as `grad`.
- **beta2** (float) - data type must be float.
- **sum_r** (Tensor) - The shape = 1D :math:`(..., 1)`.
'None' is currently supported. A Tensor of types: float32.
- **global_shape** (Tensor) - the shape = 1D :math:`(2)`.
'None' is currently supported. A Tensor of types: int64.
Returns:
- **r** (Tensor) - A Tensor of shape :math:`(..., n)`
- **c** (Tensor) - A Tensor of shape :math:`(..., m)`
- **u** (Tensor) - A Tensor of of shape:math:`(..., n, m)`
- **sum_square_u** (Tensor) - A Tensor of of shape:math:`(1)`
Raises:
TypeError: If `grad` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _inner_ops as P
>>> apply_came_part2 = P.ApplyCamePart2()
>>> grad = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> sum_grad_r = Tensor(np.ones([1024]), dtype=ms.float32)
>>> sum_grad_c = Tensor(np.ones([64]), dtype=ms.float32)
>>> sum_grad_rc = Tensor(np.array([64]), dtype=ms.float32)
>>> r = Tensor(np.ones([1024]), dtype=ms.float32)
>>> c = Tensor(np.ones([64]), dtype=ms.float32)
>>> beta2 = 0.5
>>> sum_r = Tensor(np.array([128]), dtype=ms.float32)
>>> global_shape = (1024, 64)
>>> output = apply_came_part2(grad, sum_grad_r, sum_grad_c, sum_grad_rc, r, c, beta2, sum_r, global_shape)
>>> print(output[0].shape)
(1024,)
"""
__mindspore_signature__ = (
sig.make_sig('grad'),
sig.make_sig('sum_grad_r'),
sig.make_sig('sum_grad_c'),
sig.make_sig('sum_grad_rc'),
sig.make_sig('r'),
sig.make_sig('c'),
sig.make_sig('beta2'),
sig.make_sig('sum_r', default=None),
sig.make_sig('global_shape', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, sum_grad_r, sum_grad_c, sum_grad_rc, r, c, beta2, sum_r=None, global_shape=None):
return super().__call__(grad, sum_grad_r, sum_grad_c, sum_grad_rc, r, c, beta2, sum_r, global_shape)
apply_came_part2_op=ApplyCamePart2()
class ApplyCamePart3(Primitive):
r"""
Computes Part 3 of the CAME Optimizer.
Args:
- **u** (Parameter) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **m** (Parameter) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **eps** (float) - data type must be float.
- **beta1** (float) - data type must be float.
- **clip_threshold** (float) - data type must be float.
- **sum_square_u** (Tensor) - The shape = 1D :math:`(1)`.
A Tensor of types: float32.
- **global_shape** (Tensor) - the shape = 1D :math:`(2)`.
'None' is currently supported. A Tensor of types: int64.
- **use_first_moment** (bool).
Returns:
- **m** (Tensor) - A Tensor of shape :math:`(..., n, m)`
- **sum_u_r** (Tensor) - A Tensor of shape :math:`(..., n)`
- **sum_u_c** (Tensor) - A Tensor of of shape:math:`(..., m)`
- **sum_u_rc** (Tensor) - A Tensor of of shape:math:`(...)`
Raises:
TypeError: If `u` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _inner_ops as P
>>> apply_came_part3 = P.ApplyCamePart3()
>>> u = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> m = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> eps = 0.8
>>> beta1 = 0.5
>>> clip_threshold = 0.5
>>> sum_square_u = Tensor(np.array([128]), dtype=ms.float32)
>>> global_shape = (1024, 64)
>>> use_first_moment = False
>>> output = apply_came_part3(u, m, eps, beta1, clip_threshold, sum_square_u, global_shape, use_first_moment)
>>> print(output[0].shape)
(1024, 64)
"""
__mindspore_signature__ = (
sig.make_sig('u'),
sig.make_sig('m'),
sig.make_sig('eps'),
sig.make_sig('beta1'),
sig.make_sig('clip_threshold'),
sig.make_sig('sum_square_u'),
sig.make_sig('global_shape', default=None),
sig.make_sig('use_first_moment', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, u, m, eps, beta1, clip_threshold, sum_square_u, global_shape=None, use_first_moment=False):
return super().__call__(u, m, eps, beta1, clip_threshold, sum_square_u, global_shape, use_first_moment)
apply_came_part3_op=ApplyCamePart3()
class ApplyCamePart4(Primitive):
r"""
Computes Part 4 of the CAME Optimizer.
Args:
- **param** (Parameter) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **m** (Parameter) - The shape = 2D :math:`(..., n, m)`.
The Tensor has the same data type as `param`.
- **r** (Tensor) - The shape = 1D :math:`(..., n)`.
The Tensor has the same data type as `param`.
- **c** (Tensor) - The shape = 1D :math:`(..., m)`.
The Tensor has the same data type as `param`.
- **weight_decay** (Tensor) - The shape = 1D :math:`(1)`.
A Tensor of types: float32.
- **lr** (Tensor) - The shape = 1D :math:`(1)`.
A Tensor of types: float32.
- **beta3** (float) - data type must be float.
- **sum_r** (Tensor) - The shape = 1D :math:`(..., 1)`.
'None' is currently supported. A Tensor of types: float32.
- **sum_u_r** (Tensor) - The shape = 1D :math:`(..., n)`.
A Tensor of types: float32.
- **sum_u_c** (Tensor) - The shape = 1D :math:`(..., m)`.
A Tensor of types: float32.
- **sum_u_rc** (Tensor) - The shape = 1D :math:`(...)`.
A Tensor of types: float32.
- **global_shape** (Tensor) - the shape = 1D :math:`(2)`.
'None' is currently supported. A Tensor of types: int64.
Returns:
- **param** (Tensor) - A Tensor of shape :math:`(..., n, m)`
- **r** (Tensor) - A Tensor of shape :math:`(..., n)`
- **c** (Tensor) - A Tensor of of shape:math:`(..., m)`
Raises:
TypeError: If `param` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _inner_ops as P
>>> apply_came_part4 = P.ApplyCamePart4()
>>> param = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> m = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> r = Tensor(np.ones([1024]), dtype=ms.float32)
>>> c = Tensor(np.ones([64]), dtype=ms.float32)
>>> weight_decay = Tensor([0.8])
>>> lr = Tensor([0.5])
>>> beta3 = 0.5
>>> sum_r = Tensor(np.array([128.]), dtype=ms.float32)
>>> sum_u_r = Tensor(np.ones([1024]), dtype=ms.float32)
>>> sum_u_c = Tensor(np.ones([64]), dtype=ms.float32)
>>> sum_u_rc = Tensor(np.array([128.]), dtype=ms.float32)
>>> global_shape = (1024, 64)
>>> output = apply_came_part4(param, m, r, c, weight_decay, lr, beta3, \
... sum_r, sum_u_r, sum_u_c, sum_u_rc, global_shape)
>>> print(output[0].shape)
(1024, 64)
"""
__mindspore_signature__ = (
sig.make_sig('param'),
sig.make_sig('m'),
sig.make_sig('r'),
sig.make_sig('c'),
sig.make_sig('weight_decay'),
sig.make_sig('lr'),
sig.make_sig('beta3'),
sig.make_sig('sum_r'),
sig.make_sig('sum_u_r'),
sig.make_sig('sum_u_c'),
sig.make_sig('sum_u_rc'),
sig.make_sig('global_shape', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, param, m, r, c, weight_decay, lr, beta3, sum_r, sum_u_r, sum_u_c, sum_u_rc, global_shape=None):
return super().__call__(param, m, r, c, weight_decay, lr, beta3, sum_r, sum_u_r, sum_u_c, sum_u_rc, global_shape)
apply_came_part4_op=ApplyCamePart4()
class ApplyRotaryPosEmb(Primitive):
r"""
.. code-block::
prim = ops.ApplyRotaryPosEmb(cos_format)
out = prim(query, key, cos, sin, position_ids)
is equivalent to
.. code-block::
ops.apply_rotary_pos_emb_(query, key, cos, sin, position_ids, cos_format)
Refer to :func:`mindspore.ops.apply_rotary_pos_emb_` for more details.
"""
@prim_arg_register
def __init__(self, cos_format=0):
self._set_prim_arg("cos_format", cos_format)
def __call__(self, query, key, cos, sin, position_ids):
return super().__call__(query, key, cos, sin, position_ids, self.cos_format)
class Arange(Primitive):
r"""
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, start, end, step):
return _convert_stub(pyboost_arange(self, [start, end, step]))
arange_op=Arange()
class ArgMaxExt(Primitive):
r"""
.. code-block::
prim = ops.ArgMaxExt()
out = prim(input, dim, keepdim)
is equivalent to
.. code-block::
ops.argmax(input, dim, keepdim)
Refer to :func:`mindspore.ops.argmax` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, keepdim=False):
return _convert_stub(pyboost_argmax_ext(self, [input, dim, keepdim]))
argmax_ext_op=ArgMaxExt()
[文档]class Argmax(Primitive):
r"""
Returns the indices of the maximum value along a specified `axis` of a Tensor.
Refer to :func:`mindspore.ops.argmax` for more details.
Args:
axis (int): Axis where the Argmax operation applies to. Default: ``-1`` .
output_type (:class:`mindspore.dtype`): Output data type.
Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
Inputs:
- **input_x** (Tensor) - The input tensor. :math:`(N, *)` where :math:`*` means, any number of additional
dimensions.
Outputs:
Tensor, indices of the max value of input tensor across the axis.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
>>> output = ops.Argmax(output_type=mindspore.int32)(input_x)
>>> print(output)
[1 0 0]
"""
@prim_arg_register
def __init__(self, axis=-1, output_type=mstype.int32):
self._set_prim_arg("axis", axis)
self._set_prim_arg_with_handler("output_type", output_type, dtype_to_type_id)
def __call__(self, input_x):
return super().__call__(input_x, self.axis, self.output_type)
[文档]class ArgMaxWithValue(Primitive):
r"""
Calculates the maximum value along with the given axis for the input tensor, and returns the maximum values and
indices.
Note:
In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
.. warning::
- If there are multiple maximum values, the index of the first maximum value is used.
- The value range of `axis` is [-dims, dims - 1]. "dims" is the dimension length of `input`.
Also see :func:`mindspore.ops.max`.
Args:
axis (int): The dimension to reduce. Default: ``0`` .
keep_dims (bool): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
input, the output will reduce dimension if ``false`` . Default: ``False`` .
Inputs:
- **input** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
:math:`(input_1, input_2, ..., input_N)`.
Outputs:
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
tensor.
- **index** (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keep_dims`
is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1}, ..., input_N)`.
Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1}, ..., input_N)` .
- **values** (Tensor) - The maximum value of input tensor, with the same shape as `index`, and same dtype as `input`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `axis` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
>>> index, output = ops.ArgMaxWithValue()(input_x)
>>> print(index, output)
3 0.7
>>> index, output = ops.ArgMaxWithValue(keep_dims=True)(input_x)
>>> print(index, output)
[3] [0.7]
"""
@prim_arg_register
def __init__(self, axis=0, keep_dims=False):
self._set_prim_arg("axis", axis)
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, input):
return _convert_stub(pyboost_argmax_with_value(self, [input, self.axis, self.keep_dims]))
[文档]class Argmin(Primitive):
r"""
Returns the indices of the minimum value along a specified `axis` of a Tensor.
If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is
:math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
Args:
axis (int): Axis where the Argmin operation applies to. Default: ``-1`` .
output_type (:class:`mindspore.dtype`): Output data type.
Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
Inputs:
- **input_x** (Tensor) - Input tensor.
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
Outputs:
Tensor, which is the minimum index in the specified axis of input Tensor.
Raises:
TypeError: If `axis` is not an int.
TypeError: If `output_type` is neither int32 nor int64.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
>>> index = ops.Argmin()(input_x)
>>> print(index)
2
"""
@prim_arg_register
def __init__(self, axis=-1, output_type=mstype.int32):
self._set_prim_arg("axis", axis)
self._set_prim_arg_with_handler("output_type", output_type, dtype_to_type_id)
def __call__(self, x):
return super().__call__(x, self.axis, self.output_type)
[文档]class ArgMinWithValue(Primitive):
r"""
Calculates the minimum value along with the given axis for the input tensor, and returns the minimum values and
indices.
Note:
In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
.. warning::
- If there are multiple minimum values, the index of the first minimum value is used.
- The value range of `axis` is [-dims, dims - 1]. "dims" is the dimension length of `input`.
Also see :func:`mindspore.ops.min`.
Args:
axis (int): The dimension to reduce. Default: ``0`` .
keep_dims (bool): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
input, the output will reduce dimension if ``false`` . Default: ``False`` .
Inputs:
- **input** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
:math:`(input_1, input_2, ..., input_N)` .Complex tensor is not supported.
Outputs:
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
tensor.
- **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int64. If `keep_dims`
is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1}, ..., input_N)`.
Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1}, ..., input_N)` .
- **values** (Tensor) - The minimum value of input tensor, with the same
shape as `index`, and same dtype as `input`.
Raises:
TypeError: If `input` is not Tensor.
TypeError: If `keep_dims` is not a bool.
TypeError: If `axis` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
>>> index, output = ops.ArgMinWithValue()(x)
>>> print(index, output)
0 0.0
>>> index, output = ops.ArgMinWithValue(keep_dims=True)(x)
>>> print(index, output)
[0] [0.0]
"""
@prim_arg_register
def __init__(self, axis=0, keep_dims=False):
self._set_prim_arg("axis", axis)
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, input):
return _convert_stub(pyboost_argmin_with_value(self, [input, self.axis, self.keep_dims]))
class AsinGrad(Primitive):
r"""
Computes AsinGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dout):
return super().__call__(x, dout)
asin_grad_op=AsinGrad()
[文档]class Asin(Primitive):
r"""
.. code-block::
prim = ops.Asin()
out = prim(input)
is equivalent to
.. code-block::
ops.asin(input)
Refer to :func:`mindspore.ops.asin` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
asin_op=Asin()
class AsinhGrad(Primitive):
r"""
Performs grad of Asinh operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, out, dout):
return super().__call__(out, dout)
asinh_grad_op=AsinhGrad()
[文档]class Asinh(Primitive):
r"""
.. code-block::
prim = ops.Asinh()
out = prim(input)
is equivalent to
.. code-block::
ops.asinh(input)
Refer to :func:`mindspore.ops.asinh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
asinh_op=Asinh()
[文档]class AssignAdd(Primitive):
r"""
.. code-block::
prim = ops.AssignAdd()
out = prim(variable, value)
is equivalent to
.. code-block::
ops.assign_add(variable, value)
Refer to :func:`mindspore.ops.assign_add` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('value', dtype=sig.sig_dtype.T),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, variable, value):
return super().__call__(variable, value)
assign_add_op=AssignAdd()
[文档]class Assign(Primitive):
r"""
.. code-block::
prim = ops.Assign()
out = prim(variable, value)
is equivalent to
.. code-block::
ops.assign(variable, value)
Refer to :func:`mindspore.ops.assign` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('value', dtype=sig.sig_dtype.T),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, variable, value):
return super().__call__(variable, value)
assign_op=Assign()
[文档]class Atan2(Primitive):
r"""
.. code-block::
prim = ops.Atan2()
out = prim(input, other)
is equivalent to
.. code-block::
ops.atan2(input, other)
Refer to :func:`mindspore.ops.atan2` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return super().__call__(input, other)
atan2_op=Atan2()
class AtanGrad(Primitive):
r"""
Computes AtanGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dout):
return super().__call__(x, dout)
atan_grad_op=AtanGrad()
[文档]class Atan(Primitive):
r"""
.. code-block::
prim = ops.Atan()
out = prim(input)
is equivalent to
.. code-block::
ops.atan(input)
Refer to :func:`mindspore.ops.atan` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
atan_op=Atan()
[文档]class Atanh(Primitive):
r"""
.. code-block::
prim = ops.Atanh()
out = prim(input)
is equivalent to
.. code-block::
ops.atanh(input)
Refer to :func:`mindspore.ops.atanh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
atanh_op=Atanh()
class AvgPool2DGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('grad'),
sig.make_sig('image'),
sig.make_sig('kernel_size'),
sig.make_sig('stride'),
sig.make_sig('padding', default=0),
sig.make_sig('ceil_mode', default=False),
sig.make_sig('count_include_pad', default=True),
sig.make_sig('divisor_override', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, image, kernel_size, stride, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
return _convert_stub(pyboost_avg_pool2d_grad(self, [grad, image, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override]))
avg_pool2d_grad_op=AvgPool2DGrad()
class AvgPool2D(Primitive):
r"""
Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
Typically the input is of shape :math:`(N, C, H_{in}, W_{in})`, outputs regional average in the
:math:`(H_{in}, W_{in})`-dimension. Given kernel size :math:`(k_{H}, k_{W})` and `stride` , the operation
is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \frac{1}{k_{H} * k_{W}} \sum_{m=0}^{k_{H}-1} \sum_{n=0}^{k_{W}-1}
\text{input}(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
Inputs:
input (Tensor): Tensor of shape :math:`(N, C, H_{in}, W_{in})`.
kernel_size (Union[int, tuple[int], list[int]]): The size of kernel used to take the average value. Can be
a single number or a tuple (kH, kW).
stride (Union[int, tuple[int], list[int]]): The distance of kernel moving. Can be a single number or
a tuple (sH, sW).
padding (Union(int, tuple[int], list[int])): Implicit zero padding to be added on both sides. Can be a single
number or a tuple (padH, padW). Default: 0.
ceil_mode (bool): If True, apply ceil instead of floor to compute the output shape. Default: ``False``.
count_include_pad (bool): If True, include the zero-padding in the averaging calculation. Default: ``True`` .
divisor_override (int): If specified, it will be used as divisor in the averaging calculation, otherwise
`kernel_size` will be used. Default: ``None``.
Outputs:
Tensor, with shape :math:`(N, C, H_{out}, W_{out})`.
.. math::
H_{out} = \frac{H_{in} + 2 \times padding[0] - kernel_size[0]}{stride[0]} + 1
W_{out} = \frac{W_{in} + 2 \times padding[1] - kernel_size[1]}{stride[1]} + 1
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `kernel_size` or `stride` is neither int nor tuple.
TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
TypeError: If `divisor_override` is not an int.
ValueError: If length of shape of `input` is not equal to `4` or `3`.
ValueError: If `kernel_size` or `stride` is less than 1.
ValueError: If `kernel_size` or `stride` is a tuple whose length is not equal to `2` or `1`.
ValueError: If `padding` is neither a int nor a tuple whose length is equal to `2` or `1`.
ValueError: If value of `padding` is less than `0`.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
>>> output = ops.auto_generate.AvgPool2D()(x, 2, 1)
>>> print(output)
[[[[ 2.5 3.5 4.5]
[ 6.5 7.5 8.5]]
[[14.5 15.5 16.5]
[18.5 19.5 20.5]]
[[26.5 27.5 28.5]
[30.5 31.5 32.5]]]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('kernel_size'),
sig.make_sig('stride'),
sig.make_sig('padding', default=0),
sig.make_sig('ceil_mode', default=False),
sig.make_sig('count_include_pad', default=True),
sig.make_sig('divisor_override', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, kernel_size, stride, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
return _convert_stub(pyboost_avg_pool2d(self, [input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override]))
avg_pool2d_op=AvgPool2D()
class AvgPoolGrad(Primitive):
r"""
Gradients of the avg pool operation.
"""
@prim_arg_register
def __init__(self, kernel_size=1, strides=1, pad_mode='VALID', data_format='NCHW'):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pad_mode", pad_mode, str_to_enum)
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, x, out, dout):
return super().__call__(x, out, dout, self.kernel_size, self.strides, self.pad_mode, self.data_format)
[文档]class AvgPool(Primitive):
r"""
Average pooling operation.
Refer to :func:`mindspore.ops.avg_pool2d` for more details.
Args:
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents height and width of the kernel, or a tuple
of two int numbers that represent height and width respectively. Default: ``1`` .
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: ``1`` .
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
``"SAME"`` or ``"VALID"`` . Default: ``"VALID"`` .
- ``"SAME"``: Pad the input around its edges so that the shape of input and output
are the same when `stride` is set to ``1``.
The amount of padding to is calculated by the operator internally, If the amount is even, it is
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
possible height and width. Extra pixels that could not complete a full stride will
be discarded.
data_format (str, optional): The format of input and output data. It should be ``'NHWC'`` or ``'NCHW'`` .
Default: ``'NCHW'`` .
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Supported dtypes: float16, float32, float64.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Raises:
TypeError: If `kernel_size` or `strides` is neither int nor tuple.
TypeError: If dtype of `x` is not float16, float32 or float64.
ValueError: If `kernel_size` or `strides` is less than 1.
ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive.
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
ValueError: If length of shape of `x` is not equal to 4.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops, nn
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.avgpool_op = ops.AvgPool(pad_mode='VALID', kernel_size=2, strides=1)
...
... def construct(self, x):
... result = self.avgpool_op(x)
... return result
...
>>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
>>> net = Net()
>>> output = net(x)
>>> print(output)
[[[[ 2.5 3.5 4.5]
[ 6.5 7.5 8.5]]
[[14.5 15.5 16.5]
[18.5 19.5 20.5]]
[[26.5 27.5 28.5]
[30.5 31.5 32.5]]]]
"""
@prim_arg_register
def __init__(self, kernel_size=1, strides=1, pad_mode='VALID', data_format='NCHW'):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pad_mode", pad_mode, str_to_enum)
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, x):
return super().__call__(x, self.kernel_size, self.strides, self.pad_mode, self.data_format)
[文档]class BatchMatMul(Primitive):
r"""
Computes matrix multiplication between two tensors by batch.
.. math::
\text{output}[..., :, :] = \text{matrix}(x[..., :, :]) * \text{matrix}(y[..., :, :])
The rank of both two input tensors must be same and not less than `2`.
Args:
transpose_a (bool): If ``True`` , the last two dimensions of `x` is transposed before multiplication.
Default: ``False`` .
transpose_b (bool): If ``True`` , the last two dimensions of `y` is transposed before multiplication.
Default: ``False`` .
Inputs:
- **x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
size of the last two dimensions. If `transpose_a` is ``True`` , its shape must be :math:`(*B, C, N)`.
- **y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
`transpose_b` is ``True`` , its shape must be :math:`(*B, M, C)`.
Outputs:
Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
Raises:
TypeError: If `transpose_a` or `transpose_b` is not a bool.
ValueError: If length of shape of `x` is not equal to length of shape of `y` or
length of shape of inputs is less than 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
>>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
>>> batmatmul = ops.BatchMatMul()
>>> output = batmatmul(x, y)
>>> print(output.shape)
(2, 4, 1, 4)
>>> x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
>>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
>>> batmatmul = ops.BatchMatMul(transpose_a=True)
>>> output = batmatmul(x, y)
>>> print(output.shape)
(2, 4, 1, 4)
"""
@prim_arg_register
def __init__(self, transpose_a=False, transpose_b=False):
self._set_prim_arg("transpose_a", transpose_a)
self._set_prim_arg("transpose_b", transpose_b)
def __call__(self, x, y):
return _convert_stub(pyboost_batch_mat_mul(self, [x, y, self.transpose_a, self.transpose_b]))
class BatchNormExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias'),
sig.make_sig('running_mean'),
sig.make_sig('runnning_var'),
sig.make_sig('training', default=False),
sig.make_sig('momentum', default=0.1),
sig.make_sig('epsilon', default=1e-5),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias, running_mean, runnning_var, training=False, momentum=0.1, epsilon=1e-5):
return _convert_stub(pyboost_batch_norm_ext(self, [input, weight, bias, running_mean, runnning_var, training, momentum, epsilon]))
batch_norm_ext_op=BatchNormExt()
class BatchNormGradExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self, training=False, eps=1e-5):
self._set_prim_arg("training", training)
self._set_prim_arg("eps", eps)
def __call__(self, dout, input, weight, running_mean, running_var, saved_mean, saved_rstd):
return _convert_stub(pyboost_batch_norm_grad_ext(self, [dout, input, weight, running_mean, running_var, saved_mean, saved_rstd, self.training, self.eps]))
class BatchNormGradGrad(Primitive):
r"""
Performs grad of BatchNormGrad operation.
"""
@prim_arg_register
def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'):
self._set_prim_arg("is_training", is_training)
self._set_prim_arg("epsilon", epsilon)
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, x, dy, scale, saved_mean, saved_variance, dout_dx, dout_dscale, dout_dbias):
return super().__call__(x, dy, scale, saved_mean, saved_variance, dout_dx, dout_dscale, dout_dbias, self.is_training, self.epsilon, self.data_format)
class BatchNormGrad(Primitive):
r"""
Performs grad of BatchNorm operation.
"""
@prim_arg_register
def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'):
self._set_prim_arg("is_training", is_training)
self._set_prim_arg("epsilon", epsilon)
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, dout, x, scale, saved_mean, saved_variance, reserve):
return super().__call__(dout, x, scale, saved_mean, saved_variance, reserve, self.is_training, self.epsilon, self.data_format)
class Betainc(Primitive):
r"""
Calculates the regularized incomplete beta function
:math:`I_{x}(a, b)`. It is defined as the ratio of the incomplete beta function
to the complete beta function:
.. math::
I_{x}(a, b)=\frac{B(x ; a, b)}{B(a, b)}
where
.. math::
B(x ; a, b)=\int_{0}^{x} t^{a-1}(1-t)^{b-1} dt
is the incomplete beta function and
.. math::
B(a, b) = \int_0^1 t^{a-1} (1-t)^{b-1} dt
is the complete beta function.
Inputs:
- **a** (Tensor) - Peak location of beta distribution.
A Tensor of types: float32, float64.
- **b** (Tensor) - Spread of the beta distribution.
A Tensor, must have the same dtype and shape as `a` .
- **x** (Tensor) - Upper limit of integration of the incomplete beta function.
A Tensor, must have the same dtype and shape as `a` .
Outputs:
A Tensor, has the same dtype and shape as `a` .
Raises:
TypeError: If dtype of `a` is not float32 nor float64.
TypeError: If either dtype of `b` and `x` is not the same as the `a`.
ValueError: If either shape of `b` and `x` is not the same as the `a`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> a = Tensor(np.array([0.3, 0.1, 0.4]), mindspore.float32)
>>> b = Tensor(np.array([0.4, 0.5, 0.9]), mindspore.float32)
>>> x = Tensor(np.array([0.2, 0.6, 0.5]), mindspore.float32)
>>> betainc = ops.Betainc()
>>> print(betainc(a, b, x))
[0.41462693 0.8706035 0.7298298 ]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, a, b, x):
return super().__call__(a, b, x)
betainc_op=Betainc()
class BiasAddGrad(Primitive):
r"""
Computes gradients of BiasAdd.
"""
@prim_arg_register
def __init__(self, data_format='NCHW'):
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, dout):
return super().__call__(dout, self.data_format)
[文档]class BiasAdd(Primitive):
r"""
Returns the sum of the input Tensor and the bias Tensor. Before adding, the bias Tensor will be broadcasted to be
consistent with the shape of the input Tensor.
Args:
data_format (str, optional): The format of input and output data.
It should be ``"NHWC"`` , ``"NCHW"`` or ``"NCDHW"`` .
Default is ``"NCHW"`` .
Inputs:
- **input_x** (Tensor) - The input tensor. The shape can be 2-5 dimensions. Supported dtypes:
- Ascend/CPU: all Number type.
- GPU: float16, float32, int8.
- **bias** (Tensor) - The bias tensor, with shape :math:`(C)`. C must be the same as channel dimension C of
`input_x`. It has the same type as `input_x`.
Outputs:
Tensor, with the same shape and data type as `input_x`.
Raises:
TypeError: If `data_format` is not a str.
ValueError: If value of `data_format` is not in the range of ['NHWC','NCHW','NCDHW'].
TypeError: If `input_x` or `bias` is not a Tensor.
TypeError: If dtype of `input_x` or `bias` is inconsistent.
TypeError: If dimension of `input_x` is not in the range [2, 5].
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
>>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)
>>> bias_add = ops.BiasAdd()
>>> output = bias_add(input_x, bias)
>>> print(output.shape)
(2, 3)
"""
@prim_arg_register
def __init__(self, data_format='NCHW'):
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, input_x, bias):
return super().__call__(input_x, bias, self.data_format)
class BatchMatMulExt(Primitive):
r"""
.. code-block::
prim = ops.BatchMatMulExt()
out = prim(input, mat2)
is equivalent to
.. code-block::
ops.bmm_ext(input, mat2)
Refer to :func:`mindspore.ops.bmm_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mat2):
return _convert_stub(pyboost_bmm_ext(self, [input, mat2]))
bmm_ext_op=BatchMatMulExt()
class BoolNot(Primitive):
r"""
Returns bool_not `not` of bool input.
.. note::
The inputs can be constant/variable value. Usage is the same as 'not' in Python.
This primitive only have 'CPU' implementation, for other platform, it runs using heterogeneous.
Inputs:
- **x** (Scalar) - A constant or variable scalar, the type can be bool.
Outputs:
Scalar, the type is bool.
Raises:
TypeError: If `x` are not bool scalar.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
bool_not_op=BoolNot()
[文档]class BroadcastTo(Primitive):
r"""
.. code-block::
prim = ops.BroadcastTo(shape)
out = prim(input)
is equivalent to
.. code-block::
ops.broadcast_to(input, shape)
Refer to :func:`mindspore.ops.broadcast_to` for more details.
"""
@prim_arg_register
def __init__(self, shape):
self._set_prim_arg("shape", type_it('BroadcastTo', 'shape', shape, (OpDtype.DT_LIST_INT, OpDtype.DT_TENSOR), OpDtype.DT_TUPLE_INT))
def __call__(self, input):
return _convert_stub(pyboost_broadcast_to(self, [input, self.shape]))
[文档]class Ceil(Primitive):
r"""
.. code-block::
prim = ops.Ceil()
out = prim(input)
is equivalent to
.. code-block::
ops.ceil(input)
Refer to :func:`mindspore.ops.ceil` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_ceil(self, [input]))
ceil_op=Ceil()
[文档]class CeLU(Primitive):
r"""
.. code-block::
prim = ops.CeLU(alpha)
out = prim(x)
is equivalent to
.. code-block::
ops.celu(x, alpha)
Refer to :func:`mindspore.ops.celu` for more details.
"""
@prim_arg_register
def __init__(self, alpha=1.0):
self._set_prim_arg("alpha", alpha)
def __call__(self, x):
return super().__call__(x, self.alpha)
class CholeskyGrad(Primitive):
r"""
Computes the reverse mode backpropgated gradient of the Cholesky algorithm.
Args:
- **x** (Tensor) - A tensor with float32 or float64 data type.
- **grad** (Tensor) - A tensor with float32 or float64 data type. `grad` should have
the same dtype with `x`.
Outputs:
Tensor, has the same dtype as `a` and `x`.
Raises:
TypeError: If x is not Tensor.
TypeError: If grad is not Tensor.
TypeError: If dtype of input x and grad is not float64 nor float32,
TypeError: If x has different dtype with grad.
ValueError: If input tensor's last two dims are not equal,
ValueError: If the shape of x and grad mismatch.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, grad):
return super().__call__(x, grad)
cholesky_grad_op=CholeskyGrad()
class CholeskyInverse(Primitive):
r"""
Returns the inverse of the positive definite matrix using cholesky matrix factorization given its Cholesky factor.
Refer to :func:`mindspore.ops.cholesky_inverse` for more details.
Args:
upper(bool, optional): Whether to return a lower or upper triangular matrix. Default: ``False`` .
Inputs:
- **x** (Tensor) - The input tensor whose rank is 2. Supported dtypes: float32, float64.
Outputs:
Tensor, has the same shape and dtype as `x`.
Supported Platforms:
``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([[1, 1], [1, 2]), mindspore.float32)
>>> y = ops.CholeskyInverse()(x)
>>> print(y)
[[ 5.0 -3.0 ]
[-3.0 2.0 ]]
"""
@prim_arg_register
def __init__(self, upper=False):
self._set_prim_arg("upper", upper)
def __call__(self, input_x):
return super().__call__(input_x, self.upper)
[文档]class Cholesky(Primitive):
r"""
Performs the Cholesky decomposition on a single or a batch of symmetric positive-definite matrices.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.cholesky` for more details.
Args:
upper (bool, optional): Flag that indicates whether to return a upper or lower triangular matrix.
Default: ``False`` .
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(*, N, N)`, where :math:`*` is zero or more batch dimensions
consisting of symmetric positive-definite matrices, with float32 or float64 data type.
Outputs:
Tensor, has the same shape and data type as `input_x`.
Supported Platforms:
``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1.0, 1.0], [1.0, 2.0]]), mindspore.float32)
>>> output = ops.Cholesky()(input_x)
>>> print(output)
[[1. 0.]
[1. 1.]]
"""
@prim_arg_register
def __init__(self, upper=False):
self._set_prim_arg("upper", upper)
def __call__(self, input_x):
return super().__call__(input_x, self.upper)
class Chunk(Primitive):
r"""
Cut the input Tensor into `chunks` sub-tensors along the specified axis.
Note:
This function may return less than the specified number of chunks!
Inputs:
input (Tensor): A Tensor to be cut.
chunks (int): Number of sub-tensors to cut.
dim (int, optional): Specify the dimensions that you want to split. Default: ``0`` .
Returns:
A tuple of sub-tensors.
Raises:
TypeError: If argument `input` is not Tensor.
TypeError: The sum of `chunks` is not int.
TypeError: If argument `dim` is not int.
ValueError: If argument `dim` is out of range of :math:`[-input.ndim, input.ndim)` .
ValueError: If argument `chunks` is not positive number.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import ops, Tensor
>>> input_x = np.arange(9).astype("float32")
>>> output = ops.Chunk()(Tensor(input_x), 3)
>>> print(output)
(Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
Tensor(shape=[3], dtype=Float32, value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('chunks'),
sig.make_sig('dim', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, chunks, dim=0):
return _convert_stub(pyboost_chunk(self, [input, chunks, dim]))
chunk_op=Chunk()
class ClampScalar(Primitive):
r"""
.. code-block::
prim = ops.ClampScalar()
out = prim(input, min, max)
is equivalent to
.. code-block::
ops.clamp_scalar(input, min, max)
Refer to :func:`mindspore.ops.clamp_scalar` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('min', default=None),
sig.make_sig('max', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, min=None, max=None):
return _convert_stub(pyboost_clamp_scalar(self, [input, min, max]))
clamp_scalar_op=ClampScalar()
class ClampTensor(Primitive):
r"""
.. code-block::
prim = ops.ClampTensor()
out = prim(input, min, max)
is equivalent to
.. code-block::
ops.clamp_tensor(input, min, max)
Refer to :func:`mindspore.ops.clamp_tensor` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('min', default=None),
sig.make_sig('max', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, min=None, max=None):
return _convert_stub(pyboost_clamp_tensor(self, [input, min, max]))
clamp_tensor_op=ClampTensor()
[文档]class Complex(Primitive):
r"""
Returns a complex Tensor from the real part and the imag part.
.. warning::
This is an experimental API that is subject to change or deletion.
Inputs:
- **real** (Tensor) - The real input tensor. types: float32, float64.
- **imag** (Tensor) - The imag input tensor. types: float32, float64.
Outputs:
Tensor, has the complex type.
Raises:
TypeError: If the dtype of input is not one of: float32, float64.
TypeError: If the dtypes of two inputs are not same.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> real = Tensor(np.array([1]), mindspore.float32)
>>> imag = Tensor(np.array([2]), mindspore.float32)
>>> complex = ops.Complex()
>>> output = complex(real, imag)
>>> print(output)
[1.+2.j]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, real, imag):
return super().__call__(real, imag)
complex_op=Complex()
[文档]class Concat(Primitive):
r"""
.. code-block::
prim = ops.Concat(axis)
out = prim(tensors)
is equivalent to
.. code-block::
ops.cat(tensors, axis)
Refer to :func:`mindspore.ops.cat` for more details.
"""
@prim_arg_register
def __init__(self, axis=0):
self._set_prim_arg("axis", axis)
def __call__(self, tensors):
return _convert_stub(pyboost_concat(self, [tensors, self.axis]))
[文档]class Conj(Primitive):
r"""
.. code-block::
prim = ops.Conj()
out = prim(input)
is equivalent to
.. code-block::
ops.conj(input)
Refer to :func:`mindspore.ops.conj` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
conj_op=Conj()
class ConstantPadND(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('padding'),
sig.make_sig('value', default=0.0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding, value=0.0):
return _convert_stub(pyboost_constant_pad_nd(self, [input, padding, value]))
constant_pad_nd_op=ConstantPadND()
class Contiguous(Primitive):
r"""
.. code-block::
prim = ops.Contiguous()
out = prim(input)
is equivalent to
.. code-block::
ops.contiguous(input)
Refer to :func:`mindspore.ops.contiguous` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_contiguous(self, [input]))
contiguous_op=Contiguous()
class ConvolutionGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
)
@prim_arg_register
def __init__(self, stride=1, padding=0, dilation=1, transposed=False, output_padding=0, groups=1, output_mask=()):
self._set_prim_arg_with_handler("stride", stride, to_strides)
self._set_prim_arg_with_handler("padding", padding, to_2d_paddings)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("transposed", transposed)
self._set_prim_arg_with_handler("output_padding", output_padding, to_output_padding)
self._set_prim_arg("groups", groups)
self._set_prim_arg("output_mask", output_mask)
def __call__(self, dout, input, weight, bias=None):
return _convert_stub(pyboost_convolution_grad(self, [dout, input, weight, bias, self.stride, self.padding, self.dilation, self.transposed, self.output_padding, self.groups, self.output_mask]))
class Convolution(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
)
@prim_arg_register
def __init__(self, stride=1, padding=0, dilation=1, transposed=False, output_padding=0, groups=1):
self._set_prim_arg_with_handler("stride", stride, to_strides)
self._set_prim_arg_with_handler("padding", padding, to_2d_paddings)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("transposed", transposed)
self._set_prim_arg_with_handler("output_padding", output_padding, to_output_padding)
self._set_prim_arg("groups", groups)
def __call__(self, input, weight, bias=None):
return _convert_stub(pyboost_convolution(self, [input, weight, bias, self.stride, self.padding, self.dilation, self.transposed, self.output_padding, self.groups]))
class Copy(Primitive):
r"""
.. code-block::
prim = ops.Copy()
out = prim(input)
is equivalent to
.. code-block::
ops.copy(input)
Refer to :func:`mindspore.ops.copy` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_copy(self, [input]))
copy_op=Copy()
class Correlate(Primitive):
r"""
.. code-block::
prim = ops.Correlate(mode)
out = prim(a, v)
is equivalent to
.. code-block::
ops.correlate(a, v, mode)
Refer to :func:`mindspore.ops.correlate` for more details.
"""
@prim_arg_register
def __init__(self, mode='valid'):
self._set_prim_arg_with_handler("mode", mode, str_to_enum)
def __call__(self, a, v):
return super().__call__(a, v, self.mode)
[文档]class Cos(Primitive):
r"""
.. code-block::
prim = ops.Cos()
out = prim(input)
is equivalent to
.. code-block::
ops.cos(input)
Refer to :func:`mindspore.ops.cos` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_cos(self, [input]))
cos_op=Cos()
[文档]class Cosh(Primitive):
r"""
.. code-block::
prim = ops.Cosh()
out = prim(input)
is equivalent to
.. code-block::
ops.cosh(input)
Refer to :func:`mindspore.ops.cosh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
cosh_op=Cosh()
[文档]class CumProd(Primitive):
r"""
Computes the cumulative product of the tensor x along axis.
For example, if input is a vector of size N, the result will also be a vector of size N, with elements.
.. math::
y_i = x_1 * x_2 * x_3 * ... * x_i
Args:
exclusive (bool): If ``True`` , perform exclusive cumulative product. Default: ``False`` .
reverse (bool): If ``True`` , reverse the result along axis. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input Tensor with shape
:math:`(N, *)` where :math:`*` means any number of additional dimensions.
- **axis** (int) - The dimensions to compute the cumulative product.
Only constant value is allowed.
Outputs:
Tensor, has the same shape and dtype as the `x`.
Raises:
TypeError: If `exclusive` or `reverse` is not a bool.
TypeError: If `axis` is not an int.
ValueError: If `axis` is None.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> a, b, c, = 1, 2, 3
>>> x = Tensor(np.array([a, b, c]).astype(np.float32))
>>> op0 = ops.CumProd()
>>> output0 = op0(x, 0) # output=[a, a * b, a * b * c]
>>> op1 = ops.CumProd(exclusive=True)
>>> output1 = op1(x, 0) # output=[1, a, a * b]
>>> op2 = ops.CumProd(reverse=True)
>>> output2 = op2(x, 0) # output=[a * b * c, b * c, c]
>>> op3 = ops.CumProd(exclusive=True, reverse=True)
>>> output3 = op3(x, 0) # output=[b * c, c, 1]
>>> print(output0)
[1. 2. 6.]
>>> print(output1)
[1. 1. 2.]
>>> print(output2)
[6. 6. 3.]
>>> print(output3)
[6. 3. 1.]
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [5, 3, 5]]).astype(np.float32))
>>> output4 = op0(x, 0)
>>> output5 = op0(x, 1)
>>> print(output4)
[[ 1. 2. 3.]
[ 4. 10. 18.]
[20. 30. 90.]]
>>> print(output5)
[[ 1. 2. 6.]
[ 4. 20. 120.]
[ 5. 15. 75.]]
"""
@prim_arg_register
def __init__(self, exclusive=False, reverse=False):
self._set_prim_arg("exclusive", exclusive)
self._set_prim_arg("reverse", reverse)
def __call__(self, x, axis):
return super().__call__(x, axis, self.exclusive, self.reverse)
[文档]class CumSum(Primitive):
r"""
Computes the cumulative sum of input tensor along axis.
.. math::
y_i = x_1 + x_2 + x_3 + ... + x_i
Args:
exclusive (bool): By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output. Default: ``False`` .
reverse (bool): If ``True`` , perform inverse cumulative sum. Default: ``False`` .
Inputs:
- **input** (Tensor) - The input Tensor with shape
:math:`(N, *)` where :math:`*` means any number of additional dimensions.
- **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed.
Must be in the range [-rank(input), rank(input)).
Outputs:
Tensor, the shape of the output tensor is consistent with the input tensor's.
Raises:
TypeError: If `exclusive` or `reverse` is not a bool.
TypeError: If `axis` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
>>> cumsum = ops.CumSum()
>>> # case 1: along the axis 0
>>> y = cumsum(x, 0)
>>> print(y)
[[ 3. 4. 6. 10.]
[ 4. 10. 13. 19.]
[ 8. 13. 21. 26.]
[ 9. 16. 28. 35.]]
>>> # case 2: along the axis 1
>>> y = cumsum(x, 1)
>>> print(y)
[[ 3. 7. 13. 23.]
[ 1. 7. 14. 23.]
[ 4. 7. 15. 22.]
[ 1. 4. 11. 20.]]
>>> # Next demonstrate exclusive and reverse, along axis 1
>>> # case 3: exclusive = True
>>> cumsum = ops.CumSum(exclusive=True)
>>> y = cumsum(x, 1)
>>> print(y)
[[ 0. 3. 7. 13.]
[ 0. 1. 7. 14.]
[ 0. 4. 7. 15.]
[ 0. 1. 4. 11.]]
>>> # case 4: reverse = True
>>> cumsum = ops.CumSum(reverse=True)
>>> y = cumsum(x, 1)
>>> print(y)
[[23. 20. 16. 10.]
[23. 22. 16. 9.]
[22. 18. 15. 7.]
[20. 19. 16. 9.]]
"""
@prim_arg_register
def __init__(self, exclusive=False, reverse=False):
self._set_prim_arg("exclusive", exclusive)
self._set_prim_arg("reverse", reverse)
def __call__(self, input, axis):
return super().__call__(input, axis, self.exclusive, self.reverse)
[文档]class Cummax(Primitive):
r"""
.. code-block::
prim = ops.Cummax(axis)
out = prim(input)
is equivalent to
.. code-block::
ops.cummax(input, axis)
Refer to :func:`mindspore.ops.cummax` for more details.
"""
@prim_arg_register
def __init__(self, axis):
self._set_prim_arg("axis", axis)
def __call__(self, input):
return super().__call__(input, self.axis)
[文档]class Cummin(Primitive):
r"""
Returns the cumulative minimum of elements and the index.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.cummin` for more detail.
Args:
axis (int): The axis to accumulate the tensor's value. Must be in the range [-rank(input), rank(input)).
Inputs:
- **input** (Tensor) - The input tensor.
Outputs:
A tuple of 2 Tensors(values, indices), containing the cumulative minimum of elements and the index,
the shape of each output tensor is the same as input `input`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore import Tensor, ops
>>> import mindspore
>>> a = Tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220], mindspore.float32)
>>> axis = 0
>>> output = ops.Cummin(axis)(a)
>>> print(output[0])
[-0.2284 -0.6628 -0.6628 -0.6628 -1.3298 -1.3298]
>>> print(output[1])
[0 1 1 1 4 4]
"""
@prim_arg_register
def __init__(self, axis):
self._set_prim_arg("axis", axis)
def __call__(self, input):
return super().__call__(input, self.axis)
class DCT(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('type'),
sig.make_sig('n'),
sig.make_sig('axis', default=-1),
sig.make_sig('norm', default='BACKWARD'),
sig.make_sig('forward', default=True),
sig.make_sig('grad', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, type, n, axis=-1, norm='BACKWARD', forward=True, grad=False):
return super().__call__(x, type, n, axis, str_to_enum('DCT', 'norm', norm), forward, grad)
dct_op=DCT()
class DecoderKVCache(Primitive):
r"""
.. code-block::
prim = ops.DecoderKVCache()
out = prim(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
is equivalent to
.. code-block::
ops.decoder_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
Refer to :func:`mindspore.ops.decoder_k_v_cache` for more details.
"""
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len):
return super().__call__(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
decoder_k_v_cache_op=DecoderKVCache()
[文档]class Dense(Primitive):
r"""
.. code-block::
prim = ops.Dense()
out = prim(input, weight, bias)
is equivalent to
.. code-block::
ops.dense(input, weight, bias)
Refer to :func:`mindspore.ops.dense` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None):
return _convert_stub(pyboost_dense(self, [input, weight, bias]))
dense_op=Dense()
[文档]class Diag(Primitive):
r"""
.. code-block::
prim = ops.Diag()
out = prim(input)
is equivalent to
.. code-block::
ops.diag(input)
Refer to :func:`mindspore.ops.diag` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
diag_op=Diag()
class Diagonal(Primitive):
r"""
.. code-block::
prim = ops.Diagonal(offset, dim1, dim2)
out = prim(input)
is equivalent to
.. code-block::
ops.diagonal(input, offset, dim1, dim2)
Refer to :func:`mindspore.ops.diagonal` for more details.
"""
@prim_arg_register
def __init__(self, offset=0, dim1=0, dim2=1):
self._set_prim_arg("offset", offset)
self._set_prim_arg("dim1", dim1)
self._set_prim_arg("dim2", dim2)
def __call__(self, input):
return super().__call__(input, self.offset, self.dim1, self.dim2)
[文档]class Div(Primitive):
r"""
Computes the quotient of dividing the first input tensor by the second input tensor element-wise.
Refer to :func:`mindspore.ops.div` for more details.
Note:
- One of the two inputs must be a Tensor, when the two inputs have different shapes,
they must be able to broadcast to a common shape.
- The two inputs can not be bool type at the same time,
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
- The two inputs comply with the implicit type conversion rules to make the data types
consistent.
Inputs:
- **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/r2.3.0rc2/api_python/mindspore.html#mindspore.dtype>`_ or
`bool_ <https://www.mindspore.cn/docs/en/r2.3.0rc2/api_python/mindspore.html#mindspore.dtype>`_.
- **y** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/r2.3.0rc2/api_python/mindspore.html#mindspore.dtype>`_ or
`bool_ <https://www.mindspore.cn/docs/en/r2.3.0rc2/api_python/mindspore.html#mindspore.dtype>`_.
Outputs:
Tensor, the shape is the same as the one of the input `x` , `y` after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> # case 1 :has same data type and shape of the two inputs
>>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
>>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
>>> div = ops.Div()
>>> output = div(x, y)
>>> print(output)
[-1.3333334 2.5 2. ]
>>> # case 2 : different data type and shape of the two inputs
>>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
>>> y = Tensor(2, mindspore.int32)
>>> output = div(x, y)
>>> print(output)
[-2. 2.5 3.]
>>> print(output.dtype)
Float32
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y):
return _convert_stub(pyboost_div(self, [x, y]))
div_op=Div()
class DivMod(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x', dtype=sig.sig_dtype.T),
sig.make_sig('y', dtype=sig.sig_dtype.T),
sig.make_sig('rounding_mode', dtype=sig.sig_dtype.T1, default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y, rounding_mode=None):
return _convert_stub(pyboost_divmod(self, [x, y, rounding_mode if rounding_mode is None else str_to_enum('DivMod', 'rounding_mode', rounding_mode)]))
divmod_op=DivMod()
class Dot(Primitive):
r"""
.. code-block::
prim = ops.Dot()
out = prim(input, other)
is equivalent to
.. code-block::
ops.dot(input, other)
Refer to :func:`mindspore.ops.dot` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_dot(self, [input, other]))
dot_op=Dot()
class DropoutDoMaskExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mask, p):
return _convert_stub(pyboost_dropout_do_mask_ext(self, [input, mask, p]))
dropout_do_mask_ext_op=DropoutDoMaskExt()
class DropoutExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('p', default=0.5),
sig.make_sig('seed', default=0),
sig.make_sig('offset', default=0),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_hidden", True)
def __call__(self, input, p=0.5, seed=0, offset=0):
return _convert_stub(pyboost_dropout_ext(self, [input, p, seed, offset]))
dropout_ext_op=DropoutExt()
class DropoutGenMaskExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_hidden", True)
def __call__(self, shape, p, seed, offset, dtype):
return _convert_stub(pyboost_dropout_gen_mask_ext(self, [shape, p, seed, offset, dtype_to_type_id('DropoutGenMaskExt', 'dtype', dtype)]))
dropout_gen_mask_ext_op=DropoutGenMaskExt()
class DropoutGradExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, mask, p):
return _convert_stub(pyboost_dropout_grad_ext(self, [x, mask, p]))
dropout_grad_ext_op=DropoutGradExt()
[文档]class Dropout(Primitive):
r"""
During training, randomly zeroes some of the elements of the input tensor
with probability :math:`1 - keep\_prob` from a Bernoulli distribution. It plays the
role of reducing neuron correlation and avoid overfitting.
Refer to :func:`mindspore.ops.dropout` for more details.
Args:
keep_prob (float, optional): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units. Default: ``0.5`` .
Seed0 (int, optional): Seed0 value for random generating. Default: ``0`` .
Seed1 (int, optional): Seed1 value for random generating. Default: ``0`` .
Inputs:
- **x** (Tensor) - The input Tensor of shape :math:`(*, N)`, with data type of float16, float32 or float64.
Outputs:
- **output** (Tensor) - With the same shape and data type as `x`.
- **mask** (Tensor) - The mask applied to `x`.
- On GPU and CPU, `mask` has the same shape and data type as `x`.
- On Ascend, to achieve a better performance, it is denoted as a 1-D Tensor
with Uint8 data type. It has shape :math:`(byte\_counts, )` where :math:`byte\_counts` is the
number of bytes needed to mask the input `x`, :math:`byte\_counts` is calculated using the
following formula:
.. math::
byte\_counts = \text{ceil}(\text{cumprod}(x.shape) / 128) * 16
If shape of `x` is :math:`(2, 3, 4, 5, 6)`, the shape of `mask` will be :math:`(96, )`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> dropout = ops.Dropout(keep_prob=0.5)
>>> x = Tensor(np.ones([1, 2, 3, 4, 5]), mindspore.float32)
>>> output, mask = dropout(x)
>>> print(output.shape, mask.shape, mask.dtype)
(1, 2, 3, 4, 5) (16,) UInt8
"""
@prim_arg_register
def __init__(self, keep_prob=0.5, Seed0=0, Seed1=0):
self._set_prim_arg("keep_prob", keep_prob)
self._set_prim_arg("Seed0", Seed0)
self._set_prim_arg("Seed1", Seed1)
self.add_prim_attr("side_effect_hidden", True)
def __call__(self, x):
return super().__call__(x, self.keep_prob, self.Seed0, self.Seed1)
class Eig(Primitive):
r"""
Computes the eigenvalues and eigenvectors of a square matrix(batch square matrices).
Args:
compute_v (bool, optional): If ``True`` , compute both eigenvalues and eigenvectors;
If `False`, just eigenvalues will be computed. Default: ``False`` .
Inputs:
- **x** (Tensor) - Square matrices of shape :math:`(*, N, N)`, with float32, float64, complex64 or
complex128 data type.
Outputs:
- **eigen_values** (Tensor) - Shape :math:`(*, N)`. Each inner most vector represents eigenvalues of
the corresponding matrix. The eigenvalues may not have an order.
- **eigen_vectors** (Tensor) - If `compute_v` is `False`, it's an empty tensor. Otherwise, this tensor has
shape :math:`(*, N, N)`, whose columns represent normalized (unit length) eigenvectors of corresponding
eigenvalues.
Raises:
TypeError: If `compute_v` is not a bool.
TypeError: If dtype of `x` is not one of: float64, float32, complex64 or complex128.
TypeError: If `x` is not a Tensor.
ValueError: If `x` is not a square(batch squares).
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[1.0, 0.0], [0.0, 2.0]]), mindspore.float32)
>>> u, v = ops.Eig(True)(x)
>>> print(u)
[1.+0.j 2.+0.j]
>>> print(v)
[[1.+0.j 0.+0.j]
[0.+0.j 1.+0.j]]
"""
@prim_arg_register
def __init__(self, compute_v=False):
self._set_prim_arg("compute_v", compute_v)
def __call__(self, x):
return super().__call__(x, self.compute_v)
class EluGrad(Primitive):
r"""
Gradients of Elu operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, out):
return super().__call__(dout, out)
elu_grad_op=EluGrad()
[文档]class Elu(Primitive):
r"""
.. code-block::
prim = ops.Elu(alpha)
out = prim(input_x)
is equivalent to
.. code-block::
ops.elu(input_x, alpha)
Refer to :func:`mindspore.ops.elu` for more details.
"""
@prim_arg_register
def __init__(self, alpha=1.0):
self._set_prim_arg("alpha", alpha)
def __call__(self, input_x):
return super().__call__(input_x, self.alpha)
class EmbeddingDenseBackward(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('grad'),
sig.make_sig('indices'),
sig.make_sig('num_weights'),
sig.make_sig('padding_idx', default=None),
sig.make_sig('scale_grad_by_freq', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, indices, num_weights, padding_idx=None, scale_grad_by_freq=False):
return _convert_stub(pyboost_embedding_dense_backward(self, [grad, indices, num_weights, padding_idx, scale_grad_by_freq]))
embedding_dense_backward_op=EmbeddingDenseBackward()
class Embedding(Primitive):
r"""
Retrieve the word embeddings in `weight` using indices specified in `input`.
.. warning::
On Ascend, the behavior is unpredictable when the value of `input` is invalid.
Args:
input (Tensor): The indices used to lookup in the `weight`. The data type must be mindspore.int32 or mindspore.int64,
and the value should be in range `[0, weight.shape[0])`.
weight (Parameter): The matrix where to lookup from. The shape must be 2D.
padding_idx (int, optional): If the value is not None, the corresponding row of `weight` will not be updated in training.
The value should be in range `[-weight.shape[0], weight.shape[0])` if it's not ``None``. Default ``None``.
max_norm (float, optional): If not None, firstly get the p-norm result of the `weight` specified by `input` where p is specified by `norm_type`;
if the result is larger then `max_norm`, update the `weight` with :math:`\frac{max\_norm}{result+1e^{-7}}` in-place. Default ``None``.
norm_type (float, optional): Indicates the value of p in p-norm. Default ``2.0``.
scale_grad_by_freq (bool, optional): If ``True`` the gradients will be scaled by the inverse of frequency of the index in `input`. Default ``False``.
Returns:
Tensor, has the same data type as `weight`, the shape is :math:`(*input.shape, weight.shape[1])`.
Raises:
ValueError: If `padding_idx` is out of valid range.
ValueError: If the shape of `weight` is invalid.
TypeError: `weight` is not a :class:`mindspore.Parameter`.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, Parameter, ops
>>> input = Tensor([[1, 0, 1, 1], [0, 0, 1, 0]])
>>> weight = Parameter(np.random.randn(3, 3).astype(np.float32))
>>> output = ops.auto_generate.Embedding()(input, weight, max_norm=0.4)
>>> print(output)
[[[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01]],
[[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01]]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight', sig.sig_rw.RW_WRITE),
sig.make_sig('padding_idx', default=None),
sig.make_sig('max_norm', default=None),
sig.make_sig('norm_type', default=2.0),
sig.make_sig('scale_grad_by_freq', default=False),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False):
return _convert_stub(pyboost_embedding(self, [input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq]))
embedding_op=Embedding()
[文档]class Equal(Primitive):
r"""
.. code-block::
prim = ops.Equal()
out = prim(input, other)
is equivalent to
.. code-block::
ops.equal(input, other)
Refer to :func:`mindspore.ops.equal` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_equal(self, [input, other]))
equal_op=Equal()
[文档]class Erf(Primitive):
r"""
.. code-block::
prim = ops.Erf()
out = prim(input)
is equivalent to
.. code-block::
ops.erf(input)
Refer to :func:`mindspore.ops.erf` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_erf(self, [input]))
erf_op=Erf()
[文档]class Erfc(Primitive):
r"""
.. code-block::
prim = ops.Erfc()
out = prim(input)
is equivalent to
.. code-block::
ops.erfc(input)
Refer to :func:`mindspore.ops.erfc` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
erfc_op=Erfc()
[文档]class Erfinv(Primitive):
r"""
.. code-block::
prim = ops.Erfinv()
out = prim(input)
is equivalent to
.. code-block::
ops.erfinv(input)
Refer to :func:`mindspore.ops.erfinv` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_erfinv(self, [input]))
erfinv_op=Erfinv()
[文档]class Exp(Primitive):
r"""
.. code-block::
prim = ops.Exp()
out = prim(input)
is equivalent to
.. code-block::
ops.exp(input)
Refer to :func:`mindspore.ops.exp` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_exp(self, [input]))
exp_op=Exp()
[文档]class ExpandDims(Primitive):
r"""
.. code-block::
prim = ops.ExpandDims()
out = prim(input_x, axis)
is equivalent to
.. code-block::
ops.expand_dims(input_x, axis)
Refer to :func:`mindspore.ops.expand_dims` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, axis):
return super().__call__(input_x, axis)
expand_dims_op=ExpandDims()
[文档]class Expm1(Primitive):
r"""
.. code-block::
prim = ops.Expm1()
out = prim(input)
is equivalent to
.. code-block::
ops.expm1(input)
Refer to :func:`mindspore.ops.expm1` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
expm1_op=Expm1()
class ExtractImagePatches(Primitive):
r"""
.. code-block::
prim = ops.ExtractImagePatches(ksizes, strides, rates, padding)
out = prim(input_x)
is equivalent to
.. code-block::
ops.extract_image_patches(input_x, ksizes, strides, rates, padding)
Refer to :func:`mindspore.ops.extract_image_patches` for more details.
"""
@prim_arg_register
def __init__(self, ksizes, strides, rates, padding='VALID'):
self._set_prim_arg_with_handler("ksizes", type_it('ExtractImagePatches', 'ksizes', ksizes, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT), to_kernel_size)
self._set_prim_arg_with_handler("strides", type_it('ExtractImagePatches', 'strides', strides, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT), to_strides)
self._set_prim_arg_with_handler("rates", type_it('ExtractImagePatches', 'rates', rates, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT), to_rates)
self._set_prim_arg_with_handler("padding", padding, str_to_enum)
def __call__(self, input_x):
return super().__call__(input_x, self.ksizes, self.strides, self.rates, self.padding)
[文档]class Eye(Primitive):
r"""
Creates a tensor with ones on the diagonal and zeros in the rest.
Refer to :func:`mindspore.ops.eye` for more details.
Note:
The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8 or bool on Ascend platforms.
Inputs:
- **n** (int) - The number of rows of returned tensor. Constant value only.
- **m** (int) - The number of columns of returned tensor. Constant value only.
- **t** (mindspore.dtype) - MindSpore's dtype, the data type of the returned tensor.
Default: ``None`` , the data type of the returned tensor is mindspore.float32.
Outputs:
Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on
the user's Inputs `n` and `m`. And the data type depends on Inputs `t`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import ops
>>> eye = ops.Eye()
>>> output = eye(2, 2, mindspore.int32)
>>> print(output)
[[1 0]
[0 1]]
>>> print(output.dtype)
Int32
>>> output = eye(1, 2, mindspore.float32)
>>> print(output)
[[1. 0.]]
>>> print(output.dtype)
Float32
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, n, m, dtype):
return super().__call__(n, m, dtype_to_type_id('Eye', 'dtype', dtype))
eye_op=Eye()
class FastGeLUGrad(Primitive):
r"""
Gradients of FastGeLU operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x):
return super().__call__(dy, x)
fast_gelu_grad_op=FastGeLUGrad()
[文档]class FastGeLU(Primitive):
r"""
.. code-block::
prim = ops.FastGeLU()
out = prim(x)
is equivalent to
.. code-block::
ops.fast_gelu(x)
Refer to :func:`mindspore.ops.fast_gelu` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
fast_gelu_op=FastGeLU()
class FFNExt(Primitive):
r"""
.. code-block::
prim = ops.FFNExt(activation, inner_precise)
out = prim(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2)
is equivalent to
.. code-block::
ops.ffn_ext(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise)
Refer to :func:`mindspore.ops.ffn_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('weight1'),
sig.make_sig('weight2'),
sig.make_sig('expertTokens', default=None),
sig.make_sig('bias1', default=None),
sig.make_sig('bias2', default=None),
sig.make_sig('scale', default=None),
sig.make_sig('offset', default=None),
sig.make_sig('deqScale1', default=None),
sig.make_sig('deqScale2', default=None),
sig.make_sig('antiquant_scale1', default=None),
sig.make_sig('antiquant_scale2', default=None),
sig.make_sig('antiquant_offset1', default=None),
sig.make_sig('antiquant_offset2', default=None),
)
@prim_arg_register
def __init__(self, activation='fastgelu', inner_precise=0):
self._set_prim_arg_with_handler("activation", activation, str_to_enum)
self._set_prim_arg("inner_precise", inner_precise)
def __call__(self, x, weight1, weight2, expertTokens=None, bias1=None, bias2=None, scale=None, offset=None, deqScale1=None, deqScale2=None, antiquant_scale1=None, antiquant_scale2=None, antiquant_offset1=None, antiquant_offset2=None):
return _convert_stub(pyboost_ffn_ext(self, [x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, self.activation, self.inner_precise]))
class FFT2(Primitive):
r"""
.. code-block::
prim = ops.FFT2()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.fft2(input, s, dim, norm)
Refer to :func:`mindspore.ops.fft2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=(-2, -1)),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=(-2, -1), norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('FFT2', 'norm', norm))
fft2_op=FFT2()
class FFT(Primitive):
r"""
.. code-block::
prim = ops.FFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.fft(input, n, dim, norm)
Refer to :func:`mindspore.ops.fft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('FFT', 'norm', norm))
fft_op=FFT()
class FFTShapeCopy(Primitive):
r"""
Truncate or zero-fill the gradient of an fft operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, shape):
return super().__call__(dout, shape)
fft_shapecopy_op=FFTShapeCopy()
[文档]class FFTWithSize(Primitive):
r"""
Fourier transform, can be adjusted by parameters to achieve FFT/IFFT/RFFT/IRFFT.
For fft, it computes the following expression:
.. math::
X[\omega_1, \dots, \omega_d] =
\sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d]
e^{-j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}},
where :math:`d` = `signal_ndim` is number of dimensions for the
signal, and :math:`N_i` is the size of signal dimension :math:`i`.
For ifft, it computes the following expression:
.. math::
X[\omega_1, \dots, \omega_d] =
\frac{1}{\prod_{i=1}^d N_i} \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d]
e^{\ j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}},
where :math:`d` = `signal_ndim` is number of dimensions for the
signal, and :math:`N_i` is the size of signal dimension :math:`i`.
Note:
- FFT/IFFT requires complex64 or complex128 inputs, return complex64 or complex128 outputs.
- RFFT requires bool, uint8, int8, int16, int32, int64, float32 and float64 inputs,
return complex64 or complex128 outputs.
- IRFFT requires complex64 or complex128 inputs, return float32 or float64 outputs.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
signal_ndim (int): The number of dimensions in each signal, this controls how many dimensions
of the fourier transform are realized, can only be 1, 2 or 3.
inverse (bool): Whether it is the inverse transformation, used to select from FFT and RFFT or IFFT and IRFFT.
- when set to ``True``: IFFT and IRFFT.
- when set to ``False``: FFT and RFFT.
real (bool): Whether it is the real transformation, combines with `inverse` to select a specific
transformation mode:
- `inverse` is ``False`` , `real` is ``False`` : corresponds to FFT.
- `inverse` is ``True`` , `real` is ``False`` : corresponds to IFFT.
- `inverse` is ``False`` , `real` is ``True`` : corresponds to RFFT.
- `inverse` is ``True`` , `real` is ``True`` : corresponds to IRFFT.
norm (str, optional): The normalization, optional values: [ ``"backward"`` , ``"forward"`` , ``"ortho"`` ].
Default value: ``"backward"`` .
- ``"backward"`` has the direct transforms unscaled and the inverse transforms scaled by :math:`1/n`,
where n is the input x's element numbers.
- ``"ortho"`` has both direct and inverse transforms are scaled by :math:`1/\sqrt n`.
- ``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse transforms unscaled.
onesided (bool, optional): Controls whether the input is halved to avoid redundancy. Default: ``True`` .
signal_sizes (tuple, optional): Size of the original signal (the signal before rfft, no batch dimension),
only in IRFFT mode and set `onesided` to ``True`` requires the parameter, the following conditions must be
satisfied. Default: ``()`` .
- The length of `signal_sizes` is equal to the signal_ndim of the IRFFT:
:math:`len(signal\_sizes)=signal\_ndim`.
- The last dimension of `signal_sizes` divided by 2 is equal to
the last dimension of the IRFFT input: :math:`signal\_size[-1]/2+1=x.shape[-1]`.
- `signal_sizes` has exactly the same dimensions as the input shape
except for the last dimension: :math:`signal\_sizes[:-1]=x.shape[:-1]`.
Inputs:
- **x** (Tensor) - The dimension of the input tensor must be greater than or equal to signal_ndim.
Outputs:
A tensor containing the complex-to-complex, real-to-complex or complex-to-real Fourier transform result.
Raises:
TypeError: If the input type of FFT/IFFT/IRFFT is not one of: complex64, complex128.
TypeError: If the input type is not Tensor.
ValueError: If `x` dimension is less than signal_ndim.
ValueError: If signal_ndim is greater than 3 or less than 1.
ValueError: If norm is none of "backward", "forward" or "ortho".
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> # case FFT: signal_ndim: 1, inverse: False, real: False.
>>> fft_in = Tensor(np.array([2, 1, 2]), mindspore.complex64)
>>> fft_net = ops.FFTWithSize(signal_ndim=1, inverse=False, real=False)
>>> fft_output = fft_net(fft_in)
>>> print(fft_output)
[5. +0.j 0.5 +0.86602545j 0.50000006-0.8660255j ]
>>> # case IFFT: signal_ndim: 1, inverse: True, real: False.
>>> ifft_in = fft_output
>>> ifft_net = ops.FFTWithSize(signal_ndim=1, inverse=True, real=False)
>>> ifft_output = ifft_net(ifft_in)
>>> print(ifft_output)
[2. -1.9868216e-08j 0.99999994+0.0000000e+00j
1.9999999 +7.9472862e-08j]
>>> # case RFFT2D: signal_ndim: 2, inverse: False, real: True.
>>> rfft_in = Tensor(np.array([[2, 1, 2], [3, 1, 6]]), mindspore.float32)
>>> rfft_net = ops.FFTWithSize(signal_ndim=2, inverse=False, real=True)
>>> rfft_output = rfft_net(rfft_in)
>>> print(rfft_output)
[[ 1.5000000e+01+1.1920929e-07j -2.3841858e-07+5.1961522e+00j]
[-5.0000000e+00-2.9802322e-08j 9.9999988e-01-3.4641016e+00j]]
>>> # case IRFFT2D: signal_ndim: 2, inverse: True, real: True.
>>> irfft_in = rfft_output
>>> irfft_net = ops.FFTWithSize(signal_ndim=2, inverse=True, real=True, signal_sizes=rfft_in.shape)
>>> irfft_output = irfft_net(irfft_in)
>>> print(irfft_output)
[[2. 1. 2. ]
[3. 0.99999994 5.9999995 ]]
"""
@prim_arg_register
def __init__(self, signal_ndim, inverse, real, norm='backward', onesided=True, signal_sizes=()):
self._set_prim_arg("signal_ndim", signal_ndim)
self._set_prim_arg("inverse", inverse)
self._set_prim_arg("real", real)
self._set_prim_arg_with_handler("norm", norm, str_to_enum)
self._set_prim_arg("onesided", onesided)
self._set_prim_arg("signal_sizes", signal_sizes)
def __call__(self, x):
return super().__call__(x, self.signal_ndim, self.inverse, self.real, self.norm, self.onesided, self.signal_sizes)
class FFTN(Primitive):
r"""
.. code-block::
prim = ops.FFTN()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.fftn(input, s, dim, norm)
Refer to :func:`mindspore.ops.fftn` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=None, norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('FFTN', 'norm', norm))
fftn_op=FFTN()
class FFTShift(Primitive):
r"""
.. code-block::
prim = ops.FFTShift()
out = prim(input, dim)
is equivalent to
.. code-block::
ops.fftshift(input, dim)
Refer to :func:`mindspore.ops.fftshift` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None):
return super().__call__(input, dim)
fftshift_op=FFTShift()
class FillScalar(Primitive):
r"""
Create a Tensor of the specified shape and fill it with the specified scalar value.
Args:
size (Union(tuple[int], list[int])): The specified shape of output tensor.
fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
Keyword Args:
dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for
details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
Returns:
Tensor.
Raises:
TypeError: If `size` is not a tuple or list.
ValueError: The element in `size` is less than 0.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('size'),
sig.make_sig('fill_value'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, size, fill_value, dtype=None):
return _convert_stub(pyboost_fill_scalar(self, [size, fill_value, dtype if dtype is None else dtype_to_type_id('FillScalar', 'dtype', dtype)]))
fill_scalar_op=FillScalar()
class FillTensor(Primitive):
r"""
Create a Tensor of the specified shape and fill it with the specified tensor value.
Args:
size (Union(tuple[int], list[int])): The specified shape of output tensor.
fill_value (Tensor): Value to fill the returned tensor. Complex numbers are not supported for now. Must be
scalar Tensor or 1-D Tensor with shape of [1].
Keyword Args:
dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for
details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
Returns:
Tensor.
Raises:
TypeError: If `size` is not a tuple or list.
ValueError: The element in `size` is less than 0.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('size'),
sig.make_sig('fill_value'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, size, fill_value, dtype=None):
return _convert_stub(pyboost_fill_tensor(self, [size, fill_value, dtype if dtype is None else dtype_to_type_id('FillTensor', 'dtype', dtype)]))
fill_tensor_op=FillTensor()
class FlashAttentionScoreGrad(Primitive):
r"""
Calculates the gradient of FlashAttentionScore operation.
.. warning::
This is an experimental API that is subject to change or deletion.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key'),
sig.make_sig('value'),
sig.make_sig('dy'),
sig.make_sig('pse_shift', default=None),
sig.make_sig('drop_mask', default=None),
sig.make_sig('padding_mask', default=None),
sig.make_sig('atten_mask', default=None),
sig.make_sig('softmax_max', default=None),
sig.make_sig('softmax_sum', default=None),
sig.make_sig('softmax_in', default=None),
sig.make_sig('attention_in', default=None),
sig.make_sig('prefix', default=None),
sig.make_sig('actual_seq_qlen', default=None),
sig.make_sig('actual_seq_kvlen', default=None),
)
@prim_arg_register
def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=65536, next_tokens=65536, inner_precise=1, input_layout='BSH', sparse_mode=0):
self._set_prim_arg("head_num", head_num)
self._set_prim_arg("keep_prob", keep_prob)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("pre_tokens", pre_tokens)
self._set_prim_arg("next_tokens", next_tokens)
self._set_prim_arg("inner_precise", inner_precise)
self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
self._set_prim_arg("sparse_mode", sparse_mode)
def __call__(self, query, key, value, dy, pse_shift=None, drop_mask=None, padding_mask=None, atten_mask=None, softmax_max=None, softmax_sum=None, softmax_in=None, attention_in=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None):
return _convert_stub(pyboost_flash_attention_score_grad(self, [query, key, value, dy, pse_shift, drop_mask, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode]))
class FlashAttentionScore(Primitive):
r"""
FlashAttentionScore.
.. math::
\begin{array}{ll} \\
y = Dropout(Softmax(Mask(scale_value \mul (real_shift + query * key), attn_mask), -1), keep_prob) \\
\mul value \\
\end{array}
B -- Batch size. Value range 1 to 2k.
S1 -- Sequence length of query. Value range 1 to 512k.
S2 -- Sequence length of key and value. Value range 1 to 512k.
N1 -- Num heads of query. Value range 1 to 256.
N2 -- Num heads of key and value, and N2 must be a factor of N1.
D -- Head size. The value ranges is a multiple of 16, with the max value of 512.
H1 -- Hidden size of query, which equals to N1 * D.
H2 -- Hidden size of key and value, which equals to N2 * D.
.. warning::
This is an experimental API that is subject to change or deletion. Only support on Atlas A2 training series.
Args:
head_num (int): The head num of query, equal to N1.
keep_prob (float): The keep probability of dropout. Value range is (0.0, 1.0]. Default: 1.0. When keep_prob
is 1.0, drop_mask should be none.
scale_value (float): The scale factor of score. Generally, the value is 1.0 / (D ** 0.5). Default: 1.0.
pre_tokens (int): Parameter for sparse computation, represents how many tokens are counted forward.
When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647.
next_tokens (int): Parameter for sparse computation, represents how many tokens are counted backward.
When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647.
The value of pre_tokens corresponds to S1, and the value of next_tokens corresponds to S2. They define the
valid area on the attn_mask matrix. It must ensure that the band is not empty.
The following values are not allowed:
- pre_tokens < 0 and next_tokens < 0.
- (pre_tokens < 0 and next_tokens >= 0) and (next_tokens < abs(pre_tokens) or abs(pre_tokens) >= S2).
- (pre_tokens >= 0 and next_tokens < 0) and (abs(next_tokens) > pre_tokens or abs(next_tokens) >= S1).
inner_precise (int): The parameter is reserved and not implemented yet. Default: 0.
input_layout (str): Specifies the layout of input `query`, key and value. The value can be "BSH", "BNSD", "SBH",
"BSND" or "TND". "TND" is an experimental format. Default: "BSH".
When input_layout is "TND", the following restrictions must be met.
There are two lists that represent the length of the input sequence: list_seq_q and list_seq_k. Each
value in the list indicates the length of the sequence in the batch. For example, list_seq_q = [4, 2, 6],
list_seq_k = [10, 3, 9]. The element of list indicate S. T1 is sum(list_seq_q) = 12, T2 is
sum(list_seq_k) = 22.
max_seqlen_q = max(list_seq_q), max_seqlen_k = max(list_seq_k).
qk_pointer = sum(list_seq_q * list_seq_k), which is the sum of the element multiplication.
- The lengths of two lists are the same, and size of list is batch. batch is less than or equal to 1024.
- When input_layout is "TND", actual_seq_qlen and actual_seq_kvlen must be not none.
Otherwise, they are none.
- The actual_seq_qlen and actual_seq_kvlen are the cumulative sum of sequence of key/value, so they must
be non-decreasing.
- If real_shift is not none, list_seq_q and list_seq_k must be same. The maximum value of list_seq_q and
list_seq_k is greater than 1024. Real_shift should be `(B, N1, 1024, S2)` and `(1, N1, 1024, S2)`, and
S2 is equal to max_seqlen_k.
- Attn mask must be a lower trianglar matrix, so sparse_mode should be 2 or 3. The shape of attn_mask
should be `(2048, 2048)`.
- The shape of drop_mask is (qk_pointer * N1 // 8,).
- Prefix is none.
- Next_tokens is 0, and pre_tokens is not less than max_seqlen_q.
- When sparse_mode is 3, S1 of each batch should be less than or equal to S2.
- 0 should not exist in list_seq_k.
sparse_mode (int): Indicates sparse mode. Default 0.
- 0: Indicates the defaultMask mode. If attn_mask is not passed, the mask operation is not performed,
and preTokens and nextTokens(internally assigned as INT_MAX) are ignored. If passed in, the full attn_mask
matrix (S1 * S2) needs to be passed in, indicating that the part between preTokens and nextTokens needs to
be calculated.
- 1: Represents allMask, that is, passing in the complete attn_mask matrix.
- 2: Representing the leftUpCausal mode corresponds to the lower triangle scenario divided by the left
vertex, and the optimized attn_mask matrix (2048*2048) is required.
- 3: Representing the rightDownCausal model corresponds to the lower triangle scene divided by the lower
right vertex, and the optimized attn_mask matrix (2048*2048) is required.
- 4: Represents the band scenario, that is, the part between counting preTokens and nextTokens, and the
optimized attn_mask matrix (2048*2048) is required..
- 5: Represents the prefix scenario, that is, on the basis of rightDownCasual, a matrix with length S1 and
width N is added to the left side. The value of N is obtained by the new input prefix, and the N value of
each Batch axis is different, not implemented yet.
- 6: Represents the global scenario, not implemented yet.
- 7: Represents the dilated scenario, not implemented yet.
- 8: Represents the block_local scenario, not implemented yet.
Inputs:
- **query** (Tensor[float16, bfloat16]) - The query tensor.
Input tensor of shape :math:`(B, S1, H1)`, `(B, N1, S1, D)`, `(S1, B, H1)`, `(B, S1, N1, D)` or `(T1, N1, D)`.
- **key** (Tensor[float16, bfloat16]) - The key tensor.
Input tensor of shape :math:`(B, S2, H2)`, `(B, N2, S2, D)`, `(S2, B, H2)`, `(B, S2, N2, D)` or `(T2, N2, D)`.
- **value** (Tensor[float16, bfloat16]) - The value tensor.
Input tensor of shape :math:`(B, S2, H2)`, `(B, N2, S2, D)`, `(S2, B, H2)`, `(B, S2, N2, D)` or `(T2, N2, D)`.
The key and value have the same shape.
- **real_shift** (Union[Tensor[float16, bfloat16], None]) - Also known as pse. The position embedding code. If S
is greater than 1024 and the mask of the lower triangle is used, enter only the inverse 1024 lines of
the lower triangle for memory optimization. Input tensor of shape :math:`(B, N1, S1, S2)`,
`(1, N1, S1, S2)`, `(B, N1, 1024, S2)`, `(1, N1, 1024, S2)`.
- ALiBi scenario: real_shift must meet the ALiBi rule, and sparse_mode is 2 or 3 for the lower triangle.
In this scenario, real_shift is `(B, N1, 1024, S2)`, `(1, N1, 1024, S2)`.
- Non-ALiBi scenario: real_shift is `(B, N1, S1, S2)`, `(1, N1, S1, S2)`.
The shape of `real_shift` should be `(B, N1, 1024, S2)` and `(1, N1, 1024, S2)` when input_layout is `TND`.
- **drop_mask** (Union[Tensor[uint8], None]) - The dropout mask tensor.
Input tensor of shape :math:`(B, N1, S1, S2 // 8) or None`. S2 is a multiple of 8 when not None.
- **padding_mask** (None) - Reserved parameter. Not implemented yet.
- **attn_mask** (Union[Tensor[uint8], Tensor[bool], None]) - The attention mask tensor. For each element, 0
indicates retention and 1 indicates discard. Input tensor of shape :math:`(B, N1, S1, S2)`, `(B, 1, S1, S2)`,
`(S1, S2)` or (2048, 2048). In compression scenario, sparse_mode is 2, 3, or 4, attn_mask must be
`(2048, 2048)`. When sparse_mode is 5, attn_mask must be `(B, N1, S1, S2)`, `(B, 1, S1, S2)`. When sparse_mode
is 0 and 1, attn_mask should be `(B, N1, S1, S2)`, `(B, 1, S1, S2)`, `(S1, S2)`.
- **prefix** (Union[List[int64], Tuple[int64], None]) - N value of each Batch in the prefix sparse calculation
scenario. Input tensor of shape :math:`(B,)`. B max value 32. Not none only when sparse_mode is 5.
If S1 > S2, N ranges from 0 to S2. If S1 <= S2, N ranges from S2 - S1 to S2.
- **actual_seq_qlen** (Union[List[int64], Tuple[int64], None]) - Size of query corresponding to each batch, array
with increasing values and the last value equal to T1.
- **actual_seq_kvlen** (Union[List[int64], Tuple[int64], None]) - Size of key and value corresponding to each
batch, array with increasing values and the last value equal to T2.
Outputs:
- **softmax_max** (Tensor[float32]) - (B, N1, S1, 8) when input_layout is not `TND` else (T1, N1, D)
- **softmax_sum** (Tensor[float32]) - (B, N1, S1, 8) when input_layout is not `TND` else (T1, N1, D)
- **softmax_out** (Tensor[float16, bfloat16]) - Useless output, ignore it. Output tensor of shape : `()`
- **attention_out** (Tensor[float16, bfloat16]) - The output of attention, its shape, and data type
are the same as the query.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key'),
sig.make_sig('value'),
sig.make_sig('real_shift', default=None),
sig.make_sig('drop_mask', default=None),
sig.make_sig('padding_mask', default=None),
sig.make_sig('attn_mask', default=None),
sig.make_sig('prefix', default=None),
sig.make_sig('actual_seq_qlen', default=None),
sig.make_sig('actual_seq_kvlen', default=None),
)
@prim_arg_register
def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=2147483647, next_tokens=2147483647, inner_precise=0, input_layout='BSH', sparse_mode=0):
self._set_prim_arg("head_num", head_num)
self._set_prim_arg("keep_prob", keep_prob)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("pre_tokens", pre_tokens)
self._set_prim_arg("next_tokens", next_tokens)
self._set_prim_arg("inner_precise", inner_precise)
self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
self._set_prim_arg("sparse_mode", sparse_mode)
def __call__(self, query, key, value, real_shift=None, drop_mask=None, padding_mask=None, attn_mask=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None):
return _convert_stub(pyboost_flash_attention_score(self, [query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode]))
class FlattenExt(Primitive):
r"""
.. code-block::
prim = ops.FlattenExt()
out = prim(input, start_dim, end_dim)
is equivalent to
.. code-block::
ops.flatten_ext(input, start_dim, end_dim)
Refer to :func:`mindspore.ops.flatten_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('start_dim', default=0),
sig.make_sig('end_dim', default=-1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, start_dim=0, end_dim=-1):
return _convert_stub(pyboost_flatten_ext(self, [input, start_dim, end_dim]))
flatten_ext_op=FlattenExt()
[文档]class Flatten(Primitive):
r"""
Flattens a tensor without changing its batch size on the 0-th axis.
Refer to :func:`mindspore.ops.flatten` for more details.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened, where :math:`N` is batch size.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
the product of the remaining dimension.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = ops.Flatten()
>>> output = flatten(input_x)
>>> print(output.shape)
(1, 24)
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x):
return super().__call__(input_x)
flatten_op=Flatten()
[文档]class FloorDiv(Primitive):
r"""
.. code-block::
prim = ops.FloorDiv()
out = prim(input, other)
is equivalent to
.. code-block::
ops.floor_divide(input, other)
Refer to :func:`mindspore.ops.floor_divide` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return super().__call__(input, other)
floor_div_op=FloorDiv()
[文档]class FloorMod(Primitive):
r"""
.. code-block::
prim = ops.FloorMod()
out = prim(x, y)
is equivalent to
.. code-block::
ops.floor_mod(x, y)
Refer to :func:`mindspore.ops.floor_mod` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y):
return super().__call__(x, y)
floor_mod_op=FloorMod()
[文档]class Floor(Primitive):
r"""
.. code-block::
prim = ops.Floor()
out = prim(input)
is equivalent to
.. code-block::
ops.floor(input)
Refer to :func:`mindspore.ops.floor` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
floor_op=Floor()
class GatherDGradV2(Primitive):
r"""
Computes gradient for the GatherD operation. Note that the operator "GatherDGrad" has been abandoned.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dim, index, dout):
return _convert_stub(pyboost_gather_d_grad_v2(self, [x, dim, index, dout]))
gather_d_grad_v2_op=GatherDGradV2()
[文档]class GatherD(Primitive):
r"""
.. code-block::
prim = ops.GatherD()
out = prim(x, dim, index)
is equivalent to
.. code-block::
ops.gather_d(x, dim, index)
Refer to :func:`mindspore.ops.gather_d` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dim, index):
return _convert_stub(pyboost_gather_d(self, [x, dim, index]))
gather_d_op=GatherD()
[文档]class GatherNd(Primitive):
r"""
.. code-block::
prim = ops.GatherNd()
out = prim(input_x, indices)
is equivalent to
.. code-block::
ops.gather_nd(input_x, indices)
Refer to :func:`mindspore.ops.gather_nd` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, indices):
return super().__call__(input_x, indices)
gather_nd_op=GatherNd()
[文档]class Gather(Primitive):
r"""
.. code-block::
prim = ops.Gather(batch_dims)
out = prim(input_params, input_indices, axis)
is equivalent to
.. code-block::
ops.gather(input_params, input_indices, axis, batch_dims)
Refer to :func:`mindspore.ops.gather` for more details.
"""
@prim_arg_register
def __init__(self, batch_dims=0):
self._set_prim_arg("batch_dims", batch_dims)
def __call__(self, input_params, input_indices, axis):
return super().__call__(input_params, input_indices, axis, self.batch_dims)
[文档]class Gcd(Primitive):
r"""
.. code-block::
prim = ops.Gcd()
out = prim(input, other)
is equivalent to
.. code-block::
ops.gcd(input, other)
Refer to :func:`mindspore.ops.gcd` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return super().__call__(input, other)
gcd_op=Gcd()
class GeLUGrad(Primitive):
r"""
Gradients of GeLU operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x, y):
return _convert_stub(pyboost_gelu_grad(self, [dy, x, y]))
gelu_grad_op=GeLUGrad()
[文档]class GeLU(Primitive):
r"""
Gaussian Error Linear Units activation function.
GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
<https://arxiv.org/abs/1810.04805>`_.
GeLU is defined as follows:
.. math::
GELU(x_i) = x_i*P(X < x_i)
where :math:`P` is the cumulative distribution function of the standard Gaussian distribution,
:math:`x_i` is the input element.
Inputs:
- **x** (Tensor) - The input of the activation function GeLU, the data type is float16, float32 or float64.
Outputs:
Tensor, with the same type and shape as `x`.
Raises:
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `x` is not float16, float32 or float64.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> result = ops.GeLU()(x)
>>> print(result)
[0.841192 1.9545976 2.9963627]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_gelu(self, [input]))
gelu_op=GeLU()
[文档]class Geqrf(Primitive):
r"""
.. code-block::
prim = ops.Geqrf()
out = prim(input)
is equivalent to
.. code-block::
ops.geqrf(input)
Refer to :func:`mindspore.ops.geqrf` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
geqrf_op=Geqrf()
[文档]class GreaterEqual(Primitive):
r"""
.. code-block::
prim = ops.GreaterEqual()
out = prim(input, other)
is equivalent to
.. code-block::
ops.greater_equal(input, other)
Refer to :func:`mindspore.ops.greater_equal` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_greater_equal(self, [input, other]))
greater_equal_op=GreaterEqual()
[文档]class Greater(Primitive):
r"""
.. code-block::
prim = ops.Greater()
out = prim(input, other)
is equivalent to
.. code-block::
ops.greater(input, other)
Refer to :func:`mindspore.ops.greater` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_greater(self, [input, other]))
greater_op=Greater()
class GridSampler2DGrad(Primitive):
r"""
Computes gradients for GridSampler2D operation.
Args:
- **grad** (Tensor) - A 4-D tensor whose dtype is float16 or float32 and whose shape is :math:`(N, C,
H_{out}, W_{out})`. The shape is inconsistent with the shape of the output result of forward calculation.
- **input_x** (Tensor) - A 4-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, C,
H_{in}, W_{in})`.
- **grid** (Tensor) - A 4-D tensor whose dtype is the same as `grad` and whose
shape is :math:`(N, H_{out}, W_{out}, 2)`.
interpolation_mode (str): An optional string specifying the interpolation method. The optional values are
"bilinear" or "nearest". Default: "bilinear".
padding_mode (str): An optional string specifying the pad method. The optional values are "zeros", "border" or
"reflection". Default: "zeros".
align_corners (bool): An optional bool. If "true", the centers of the corner pixels of the input and output
tensors are aligned. Defaults to "false".
Returns:
- **dx** (Tensor) - A 4-D tensor whose dtype and shape are the same as `input_x`.
- **dgrid** (Tensor) - A 4-D tensor whose dtype and shape are the same as `grid`.
Raises:
TypeError: If `grad`, `input_x` or `grid` is not a Tensor.
TypeError: If the dtypes of `grad`, `input_x` and `grid` are inconsistent.
TypeError: If the dtype of `grad`, `input_x` or `grid` is not a valid type.
TypeError: If `align_corners` is not a boolean value.
ValueError: If the rank of `grad`, `input_x` or `grid` is not equal to 4.
ValueError: If the first dimension of `grad`, `input_x` and `grid` are inconsistent.
ValueError: If the last dimension of `grid` is not equal to 2.
ValueError: If `interpolation_mode` is not "bilinear", "nearest" or a string value.
ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value.
ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation.
Supported Platforms:
``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum)
self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum)
self._set_prim_arg("align_corners", align_corners)
def __call__(self, grad, input_x, grid):
return _convert_stub(pyboost_grid_sampler_2d_grad(self, [grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
[文档]class GridSampler2D(Primitive):
r"""
This operation samples 2d `input_x` by using interpolation based on flow field grid,
which is usually gennerated by :func:`mindspore.ops.affine_grid`.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.grid_sample` for more details.
Args:
interpolation_mode (str, optional): An optional string specifying the interpolation method.
The optional values are
``"bilinear"`` or ``"nearest"`` . Default: ``"bilinear"`` .
- ``"nearest"``: Nearest neighbor interpolation. Each output pixel is assigned the value of the
nearest input pixel. This method is simple and fast but can result in blocky or pixelated outputs.
- ``"bilinear"``: Bilinear interpolation. Each output pixel is a weighted average of the four nearest input
pixels, computed using bilinear interpolation. This method produces smoother results compared
to nearest neighbor interpolation.
padding_mode (str, optional): An optional string specifying the pad method.
The optional values are ``"zeros"`` , ``"border"`` or ``"reflection"`` . Default: ``"zeros"`` .
When the sampling grid is outside input's bounds, effects of various padding modes are as follows:
- ``"zeros"``: Pads the input tensor with zeros.
- ``"border"``: Pads the input tensor with the values of the pixels on the border of the tensor.
- ``"reflection"``: Pads the input tensor by reflecting the values of the pixels at the
boundary of the tensor.
align_corners (bool, optional): An optional bool. When set to ``True`` ,
the centers of the corner pixels of the input
and output tensors are aligned. When set to ``False`` , it is not aligned. Default: ``False`` .
Inputs:
- **input_x** (Tensor) - A 4-D tensor with shape
:math:`(N, C, H_{in}, W_{in})`. Supported dtypes:
- Ascend: float16, float32.
- GPU/CPU: float16, float32, float64.
- **grid** (Tensor) - A 4-D tensor whose dtype is the same as `input_x` and whose shape is
:math:`(N, H_{out}, W_{out}, 2)`.
Used to specify the sampling pixel locations normalized by the input spatial
dimensions.
Outputs:
A 4-D Tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, C, H_{out}, W_{out})`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> gridsampler = ops.GridSampler2D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
>>> input_x = Tensor(np.arange(16).reshape((2, 2, 2, 2)).astype(np.float32))
>>> grid = Tensor(np.arange(-9, 9, 0.5).reshape((2, 3, 3, 2)).astype(np.float32))
>>> output = gridsampler(input_x, grid)
>>> print(output)
[[[[ 0. 0. 0. ]
[ 0. 0. 0. ]
[ 0. 0. 0.5 ]]
[[ 0. 0. 0. ]
[ 0. 0. 0. ]
[ 0. 1.5 4.5 ]]]
[[[10. 8.25 1.375]
[ 0. 0. 0. ]
[ 0. 0. 0. ]]
[[14. 11.25 1.875]
[ 0. 0. 0. ]
[ 0. 0. 0. ]]]]
"""
@prim_arg_register
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum)
self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum)
self._set_prim_arg("align_corners", align_corners)
def __call__(self, input_x, grid):
return _convert_stub(pyboost_grid_sampler_2d(self, [input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
class GridSampler3DGrad(Primitive):
r"""
Computes gradients for GridSampler3D operation.
Args:
- **grad** (Tensor) - A 5-D tensor whose dtype is float32 or float64 and whose shape is :math:`(N, C, D_{out},
H_{out}, W_{out})`. The shape is inconsistent with the shape of the output result of forward calculation.
- **input_x** (Tensor) - A 5-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, C,
D_{in}, H_{in}, W_{in})`.
- **grid** (Tensor) - A 5-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, D_{out},
H_{out}, W_{out}, 3)`.
interpolation_mode (str): An optional string specifying the interpolation method. The optional values are
"bilinear" or "nearest". Default: "bilinear".
padding_mode (str): An optional string specifying the pad method. The optional values are "zeros", "border" or
"reflection". Default: "zeros".
align_corners (bool): An optional bool. If "true", the centers of the corner pixels of the input and output
tensors are aligned. Defaults to "false".
Returns:
- **dx** (Tensor) - A 5-D tensor whose dtype and shape are the same as `input_x`.
- **dgrid** (Tensor) - A 5-D tensor whose dtype and shape are the same as `grid`.
Raises:
TypeError: If `grad`, `input_x` or `grid` is not a Tensor.
TypeError: If the dtypes of `grad`, `input_x` and `grid` are inconsistent.
TypeError: If the dtype of `grad`, `input_x` or `grid` is not a valid type.
TypeError: If `align_corners` is not a boolean value.
ValueError: If the rank of `grad`, `input_x` or `grid` is not equal to 5.
ValueError: If the first dimension of `grad`, `input_x` and `grid` are inconsistent.
ValueError: If the last dimension of `grid` is not equal to 3.
ValueError: If `interpolation_mode` is not "bilinear", "nearest" or a string value.
ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value.
ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation.
Supported Platforms:
``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum)
self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum)
self._set_prim_arg("align_corners", align_corners)
def __call__(self, grad, input_x, grid):
return _convert_stub(pyboost_grid_sampler_3d_grad(self, [grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
[文档]class GridSampler3D(Primitive):
r"""
Given an input and a grid, the output is calculated using the input values
and pixel positions in the grid. Only volume (5-D) input is supported.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.grid_sample` for more details.
Args:
interpolation_mode (str, optional): An optional string specifying the interpolation method.
The optional values are ``"bilinear"`` or ``"nearest"`` . Default: ``"bilinear"`` .
- ``"nearest"``: Nearest neighbor interpolation. Each output pixel is assigned the value of the
nearest input pixel. This method is simple and fast but can result in blocky or pixelated outputs.
- ``"bilinear"``: Bilinear interpolation. Each output pixel is a weighted average of the four nearest input
pixels, computed using bilinear interpolation. This method produces smoother results compared
to nearest neighbor interpolation.
padding_mode (str, optional): An optional string specifying the pad method.
The optional values are ``"zeros"`` , ``"border"`` or ``"reflection"`` . Default: ``"zeros"`` .
When the sampling grid is outside input's bounds, effects of various padding modes are as follows:
- ``"zeros"``: Pads the input tensor with zeros.
- ``"border"``: Pads the input tensor with the values of the pixels on the border of the tensor.
- ``"reflection"``: Pads the input tensor by reflecting the values of the pixels at the
boundary of the tensor.
align_corners (bool, optional): An optional bool specifying alignment method. If set to ``True`` ,
the extrema (-1 and 1) are considered as referring to
the center points of the input's corner pixels. If set to ``False`` , they are instead considered as
referring to the corner points of the input's corner pixels, making the sampling more resolution agnostic.
Default: ``False`` .
Inputs:
- **input_x** (Tensor) - A 5-D tensor with dtype of float16, float32 or float64
and shape of :math:`(N, C, D_{in}, H_{in}, W_{in})`.
- **grid** (Tensor) - A 5-D tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, D_{out},
H_{out}, W_{out}, 3)`.
Outputs:
A 5-D Tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, C, D_{out}, H_{out}, W_{out})`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> gridsampler = ops.GridSampler3D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
>>> input_x = Tensor(np.arange(32).reshape((2, 2, 2, 2, 2)).astype(np.float32))
>>> grid = Tensor(np.arange(-0.2, 1, 0.1).reshape((2, 2, 1, 1, 3)).astype(np.float32))
>>> output = gridsampler(input_x, grid)
>>> print(output)
[[[[[ 3.3 ]]
[[ 4.35 ]]]
[[[11.300001]]
[[12.349999]]]]
[[[[21.4 ]]
[[22.449999]]]
[[[29.4 ]]
[[30.449999]]]]]
"""
@prim_arg_register
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum)
self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum)
self._set_prim_arg("align_corners", align_corners)
def __call__(self, input_x, grid):
return _convert_stub(pyboost_grid_sampler_3d(self, [input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
class GroupNormGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('x'),
sig.make_sig('mean'),
sig.make_sig('rstd'),
sig.make_sig('gamma_opt'),
sig.make_sig('num_groups'),
sig.make_sig('dx_is_require', default=True),
sig.make_sig('dgamma_is_require', default=True),
sig.make_sig('dbeta_is_require', default=True),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x, mean, rstd, gamma_opt, num_groups, dx_is_require=True, dgamma_is_require=True, dbeta_is_require=True):
return _convert_stub(pyboost_group_norm_grad(self, [dy, x, mean, rstd, gamma_opt, num_groups, dx_is_require, dgamma_is_require, dbeta_is_require]))
group_norm_grad_op=GroupNormGrad()
class GroupNorm(Primitive):
r"""
Group Normalization over a mini-batch of inputs.
Group Normalization is widely used in recurrent neural networks. It applies
normalization on a mini-batch of inputs for each single training case as described
in the paper `Group Normalization <https://arxiv.org/pdf/1803.08494.pdf>`_. Group Normalization
divides the channels into groups and computes within each group the mean and variance for normalization,
and it performs very stable over a wide range of batch size. :math:`\gamma` and :math:`\beta` are trainable scale
and shift.
It can be described using the following formula:
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
where :math:`\gamma` is `weight`, :math:`\beta` is `bias`, :math:`\epsilon` is `eps`.
Args:
input (Tensor): The input feature with shape :math:`(N, C, *)` where :math:`*` means, any number of
additional dimensions.
num_groups (int): The number of groups to be divided along the channel dimension.
weight (Tensor, optional): The shape :math:`(C,)`, Default: ``None``, has the same data type with `input`.
bias (Tensor, optional): The shape :math:`(C,)`, Default: ``None``, has the same data type with `input`.
eps (float, optional): A value added to the denominator for numerical stability. Default: ``1e-5`` .
Returns:
Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input`.
Raises:
TypeError: If `num_groups` is not an int.
TypeError: If `eps` is not a float.
ValueError: If `num_groups` is less than 1.
ValueError: If `C` (the second parameter of dimensions of `input`) is not divided by `num_groups`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import ops
>>> x = ms.Tensor(np.ones([1, 2, 4, 4], np.float32))
>>> group_norm_op = ops.GroupNorm()
>>> output = group_norm_op(x, 2)[0]
>>> print(output)
[[[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]]]
"""
__mindspore_signature__ = (
sig.make_sig('input', dtype=sig.sig_dtype.T),
sig.make_sig('num_groups', dtype=sig.sig_dtype.T1),
sig.make_sig('weight', dtype=sig.sig_dtype.T, default=None),
sig.make_sig('bias', dtype=sig.sig_dtype.T, default=None),
sig.make_sig('eps', dtype=sig.sig_dtype.T2, default=1e-5),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, num_groups, weight=None, bias=None, eps=1e-5):
return _convert_stub(pyboost_group_norm(self, [input, num_groups, weight, bias, eps]))
group_norm_op=GroupNorm()
class HShrinkGrad(Primitive):
r"""
Computes gradients for HShrinkGrad operation.
Args:
Gradients (Tensor) - the gradients of loss to output of HShrink function.
Currently gradients data type only support float16 and float32.
Features (Tensor) - Must be the input `input_x` of the forward operator HSHrink.
Currently features data type only support float16 and float32.
lambd (float): the lambda value for the Hardshrink formulation. Default: 0.5
Returns:
backprops - Tensor, with the same shape and data type as `features`.
Rasise:
ValueError: If `lambd` is not a float.
ValueError: If shape of `gradients` is not the same as `features`.
TypeError: If dtype of `gradients` is not the same as `features`.
TypeError: If dtype of `gradients` or `features` is neither float16 nor float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, lambd=0.5):
self._set_prim_arg("lambd", lambd)
def __call__(self, gradients, features):
return super().__call__(gradients, features, self.lambd)
[文档]class HShrink(Primitive):
r"""
Hard Shrink activation function.
Refer to :func:`mindspore.ops.hardshrink` for more details.
Args:
lambd (float, optional): The threshold :math:`\lambda` defined by the Hard Shrink formula. Default: ``0.5`` .
Inputs:
- **input_x** (Tensor) - The input of Hard Shrink with data type of float16 or float32.
Outputs:
Tensor, the same shape and data type as the input.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore as ms
>>> import mindspore.ops as ops
>>> from mindspore import Tensor, nn
>>> import numpy as np
>>> input_x = Tensor(np.array([[0.5, 1, 2.0], [0.0533, 0.0776, -2.1233]]), ms.float32)
>>> hshrink = ops.HShrink()
>>> output = hshrink(input_x)
>>> print(output)
[[ 0. 1. 2. ]
[ 0. 0. -2.1233]]
"""
@prim_arg_register
def __init__(self, lambd=0.5):
self._set_prim_arg("lambd", lambd)
def __call__(self, x):
return super().__call__(x, self.lambd)
class HSigmoidGrad(Primitive):
r"""
Gets the gradient of HSigmoid operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grads, input_x):
return super().__call__(grads, input_x)
hsigmoid_grad_op=HSigmoidGrad()
[文档]class HSigmoid(Primitive):
r"""
Hard sigmoid activation function.
Refer to :func:`mindspore.ops.hardsigmoid` for more details.
Inputs:
- **input_x** (Tensor) - The input Tensor.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> hsigmoid = ops.HSigmoid()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hsigmoid(input_x)
>>> print(result)
[0.3333 0.1666 0.5 0.8335 0.6665]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x):
return super().__call__(input_x)
hsigmoid_op=HSigmoid()
class HSwishGrad(Primitive):
r"""
Gets the gradient of HSwish operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y_grad, x):
return super().__call__(y_grad, x)
hswish_grad_op=HSwishGrad()
[文档]class HSwish(Primitive):
r"""
Hard swish activation function.
Refer to :func:`mindspore.ops.hardswish` for more details.
Inputs:
- **input_x** (Tensor) - The input Tensor.
Outputs:
Tensor, with the same type and shape as the `input_x`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> hswish = ops.HSwish()
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
>>> result = hswish(input_x)
>>> print(result)
[-0.3333 -0.3333 0 1.666 0.6665]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
hswish_op=HSwish()
[文档]class Identity(Primitive):
r"""
.. code-block::
prim = ops.Identity()
out = prim(input_x)
is equivalent to
.. code-block::
ops.deepcopy(input_x)
Refer to :func:`mindspore.ops.deepcopy` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x):
return super().__call__(input_x)
identity_op=Identity()
class IFFT2(Primitive):
r"""
.. code-block::
prim = ops.IFFT2()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.ifft2(input, s, dim, norm)
Refer to :func:`mindspore.ops.ifft2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=(-2, -1)),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=(-2, -1), norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IFFT2', 'norm', norm))
ifft2_op=IFFT2()
class IFFT(Primitive):
r"""
.. code-block::
prim = ops.IFFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.ifft(input, n, dim, norm)
Refer to :func:`mindspore.ops.ifft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('IFFT', 'norm', norm))
ifft_op=IFFT()
class IFFTN(Primitive):
r"""
.. code-block::
prim = ops.IFFTN()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.ifftn(input, s, dim, norm)
Refer to :func:`mindspore.ops.ifftn` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=None, norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IFFTN', 'norm', norm))
ifftn_op=IFFTN()
class IFFTShift(Primitive):
r"""
.. code-block::
prim = ops.IFFTShift()
out = prim(input, dim)
is equivalent to
.. code-block::
ops.ifftshift(input, dim)
Refer to :func:`mindspore.ops.ifftshift` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None):
return super().__call__(input, dim)
ifftshift_op=IFFTShift()
class IRFFTGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input1'),
sig.make_sig('input2'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input1, input2, n=None, dim=-1, norm=None):
return super().__call__(input1, input2, n, dim, norm if norm is None else str_to_enum('IRFFTGrad', 'norm', norm))
irfft_grad_op=IRFFTGrad()
class IRFFT(Primitive):
r"""
.. code-block::
prim = ops.IRFFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.irfft(input, n, dim, norm)
Refer to :func:`mindspore.ops.irfft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('IRFFT', 'norm', norm))
irfft_op=IRFFT()
[文档]class IsFinite(Primitive):
r"""
.. code-block::
prim = ops.IsFinite()
out = prim(x)
is equivalent to
.. code-block::
ops.isfinite(x)
Refer to :func:`mindspore.ops.isfinite` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return _convert_stub(pyboost_isfinite(self, [x]))
isfinite_op=IsFinite()
class LayerNormExt(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is weight, :math:`\beta` is bias, :math:`\epsilon` is eps.
Args:
input (Tensor): Tensor of shape :math:`(N, \ldots)`. The input of LayerNorm.
normalized_shape (Union(tuple[int], list[int])): The normalized shape of `input` for LayerNorm.
weight (Tensor, optional): Learnable parameter :math:`\gamma` . Tensor of shape `normalized_shape`. Default: ``None`` .
bias (Tensor, optional): Learnable parameter :math:`\beta` . Tensor of shape `normalized_shape`. Default: ``None`` .
eps (float, optional): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-5`` .
Returns:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
- **mean** (Tensor) - The first `begin_norm_axis` (The begin axis of the `input_x` to apply LayerNorm) dimensions of `mean` shape is the same as `input_x`,
and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
the shape of the `mean` is :math:`(x_1, \ldots, x_{begin_params_axis}, 1, \ldots, 1)`
(when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
- **rstd** (Tensor) - Shape is the same as `mean` .
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `normalized_shape` is not an integer, a list or a tuple.
TypeError: If `eps` is not a float.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> normalized_shape = (3,)
>>> gamma = Tensor(np.ones(normalized_shape), mindspore.float32)
>>> beta = Tensor(np.zeros(normalized_shape), mindspore.float32)
>>> eps = 1e-7
>>> layer_norm = ops.LayerNormExt()
>>> output, mean, rstd = layer_norm(input_x, normalized_shape, gamma, beta, eps)
>>> print(output)
[[-1.2247448 0. 1.2247448]
[-1.2247448 0. 1.2247448]]
>>> print(mean)
[[2.]
[2.]]
>>> print(rstd)
[[1.2247447]
[1.2247447]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('normalized_shape'),
sig.make_sig('weight', default=None),
sig.make_sig('bias', default=None),
sig.make_sig('eps', default=1e-5),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, normalized_shape, weight=None, bias=None, eps=1e-5):
return _convert_stub(pyboost_layer_norm_ext(self, [input, normalized_shape, weight, bias, eps]))
layer_norm_ext_op=LayerNormExt()
class LayerNormGradExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x, normalized_shape, mean, variance, gamma, beta):
return _convert_stub(pyboost_layer_norm_grad_ext(self, [dy, x, normalized_shape, mean, variance, gamma, beta]))
layer_norm_grad_ext_op=LayerNormGradExt()
class LayerNormGradGrad(Primitive):
r"""
Gets the gradient of LayerNormGrad operation.
Inputs:
- **x** (Tensor) - The input tensor to be normalized, float32 or float16.
- **dy** (Tensor) - The gradient of LayerNorm's output y, float32 or float16.
- **variance** (Tensor) - The variance of x, float32 or float16.
- **mean** (Tensor) - The mean of x, float32 or float16.
- **gamma** (Tensor) - The original value of weight gamma initialized in LayerNorm, float32 or float16.
Default: 'ones'.
- **d_dx** (Tensor) - The gradient of dx, where dx is the gradient of LayerNorm's input x, float32 or float16.
- **d_dg** (Tensor) - The gradient of dg, where dg is the gradient of LayerNorm's weight gamma,
float32 or float16.
- **d_db** (Tensor) - The gradient of db, where db is the gradient of LayerNorm's weight beta,
float32 or float16.
- **begin_norm_axis** (int) - The begin axis for the input to apply layernorm. Default: 1.
- **begin_params_axis** (int) - The begin axis for the parameter input to apply layernorm. Default: 1.
Outputs:
Tuple[Tensor], tuple of 3 Tensors (the gradients of layernormgrad x, dy, gamma).
Raises:
TypeError: If the 8 inputs don't have the same dtype.
ValueError: If x, dy, d_dx don't have the same shape.
ValueError: If variance, mean don't have the same shape.
ValueError: If gamma, d_dg, d_db don't have the same shape.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
def __call__(self, x, dy, variance, mean, gamma, d_dx, d_dg, d_db):
return super().__call__(x, dy, variance, mean, gamma, d_dx, d_dg, d_db, self.begin_norm_axis, self.begin_params_axis)
class LayerNormGrad(Primitive):
r"""
Applies the layer Normalization to the input array.
This operator will calculate the input gradients of layernorm.
Inputs:
x (Tensor): The inputs of layer norm op.
dy (Tensor): The gradient of outputs of layer norm op.
variance (Tensor): The variance of x.
mean (Tensor): The mean of x.
gamma (Tensor): The weights of normalized elements.
begin_norm_axis (int): The begin axis for the input to apply layernorm. Default: 1.
begin_params_axis (int): The begin axis for the parameter input to apply layernorm. Default: 1.
Outputs:
tuple[int], tuple of 3 values (the gradients of layernorm input, gamma, beta).
pd_x (Tensor): the gradients of layernorm input x.
pd_gamma (Tensor): the gradients of gamma.
pd_beta (Tensor): the gradients of beta.
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
def __call__(self, x, dy, variance, mean, gamma):
return super().__call__(x, dy, variance, mean, gamma, self.begin_norm_axis, self.begin_params_axis)
class LayerNormGradV3(Primitive):
r"""
Applies the layer Normalization to the input array.
This operator will calculate the input gradients of LayerNormV3.
Inputs:
x (Tensor): The inputs of layer norm operator.
dy (Tensor): The gradient of outputs of layer norm operator.
rstd (Tensor): The rstd of x.
mean (Tensor): The mean of x.
gamma (Tensor): The weights of normalized elements.
begin_norm_axis (int): The begin axis for the input to apply LayerNormV3. Default: 1.
begin_params_axis (int): The begin axis for the parameter input to apply LayerNormV3. Default: 1.
Outputs:
tuple[int], tuple of 3 values (the gradients of LayerNormV3 input, gamma, beta).
pd_x (Tensor): the gradients of LayerNormV3 input x.
pd_gamma (Tensor): the gradients of gamma.
pd_beta (Tensor): the gradients of beta.
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
def __call__(self, x, dy, variance, mean, gamma):
return super().__call__(x, dy, variance, mean, gamma, self.begin_norm_axis, self.begin_params_axis)
[文档]class LayerNorm(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNorm,
the value must be in [-1, rank(input_x)). Default: ``1`` .
begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to
apply LayerNorm, the value must be in [-1, rank(input_x)). Default: ``1`` .
epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-7`` .
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)`.
The input of LayerNorm. Supported dtypes: float16, float32, float64.
- **gamma** (Tensor) - Learnable parameter :math:`\gamma` . Tensor of shape `input_x_shape[begin_params_axis:]`. Supported dtypes: float16, float32, float64.
- **beta** (Tensor) - Learnable parameter :math:`\beta` . Tensor of shape `input_x_shape[begin_params_axis:]`. Supported dtypes: float16, float32, float64.
Outputs:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
- **mean** (Tensor) - The first `begin_norm_axis` dimensions of `mean` shape is the same as `input_x`,
and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
the shape of the `mean` is :math:`(x_1, \ldots, x_{begin\_norm\_axis}, 1, \ldots, 1)`
(when `begin_norm_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
- **rstd** (Tensor) - The reciprocal of the input standard deviation. Shape is the same as `mean` .
Raises:
TypeError: If `begin_norm_axis` or `begin_params_axis` is not an int.
TypeError: If `epsilon` is not a float.
TypeError: If `input_x`, `gamma` or `beta` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> beta = Tensor(np.ones([3]), mindspore.float32)
>>> layer_norm = ops.LayerNorm()
>>> output, _, _ = layer_norm(input_x, gamma, beta)
>>> print(output)
[[-0.2247448 1. 2.2247448]
[-0.2247448 1. 2.2247448]]
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
self._set_prim_arg("epsilon", epsilon)
def __call__(self, input_x, gamma, beta):
return super().__call__(input_x, gamma, beta, self.begin_norm_axis, self.begin_params_axis, self.epsilon)
class LayerNormV3(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNormV3 is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNormV3,
the value must be in [-1, rank(input_x)). Default: ``1`` .
begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to
apply LayerNormV3, the value must be in [-1, rank(input_x)). Default: ``1`` .
epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-7`` .
Inputs:
- **input_x** (Tensor) - Tensor with shape :math:`(N, \ldots)`.
The input of LayerNormV3. Supported dtypes: float16, float32, bfloat16.
- **gamma** (Tensor) - Tensor with shape `input_x_shape[begin_params_axis:]`.
- **beta** (Tensor) - Tensor with shape `input_x_shape[begin_params_axis:]`.
Outputs:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
- **mean** (Tensor) - The first `begin_norm_axis` dimensions of `mean` shape is the same as `input_x`,
and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
the shape of the `mean` is :math:`(x_1, \ldots, x_{begin_params_axis}, 1, \ldots, 1)`
(when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
- **rstd** (Tensor) - Shape is the same as `mean` .
Raises:
TypeError: If `begin_norm_axis` or `begin_params_axis` is not an int.
TypeError: If `epsilon` is not a float.
TypeError: If `input_x`, `gamma` or `beta` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> beta = Tensor(np.ones([3]), mindspore.float32)
>>> layer_norm = ops.LayerNormV3()
>>> output, mean, variance = layer_norm(input_x, gamma, beta)
>>> print(output)
[[-0.22474468 1. 2.22474468]
[-0.22474468 1. 2.22474468]]
>>> print(mean)
[[2.]
[2.]]
>>> print(variance)
[[1.2247447]
[.2247447]]
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
self._set_prim_arg("epsilon", epsilon)
def __call__(self, input_x, gamma, beta):
return super().__call__(input_x, gamma, beta, self.begin_norm_axis, self.begin_params_axis, self.epsilon)
class LeakyReLUExt(Primitive):
r"""
.. code-block::
prim = ops.LeakyReLUExt()
out = prim(input, negative_slope)
is equivalent to
.. code-block::
ops.leaky_relu_ext(input, negative_slope)
Refer to :func:`mindspore.ops.leaky_relu_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('negative_slope', default=0.01),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, negative_slope=0.01):
return _convert_stub(pyboost_leaky_relu_ext(self, [input, negative_slope]))
leaky_relu_ext_op=LeakyReLUExt()
class LeakyReLUGradExt(Primitive):
r"""
Computes gradient for the LeakyReLU activation.
Args:
dy (Tensor): Input gradients tensor, has the same dtype and shape as `input`.
input (Tensor): Origin input tensor.
negative_slope (Scalar): Origin negative_slope
is_result(bool): Output input if True.
Returns:
Tensor, has the same dtype and shape as `input`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input'),
sig.make_sig('negative_slope', default=0.01),
sig.make_sig('is_result', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input, negative_slope=0.01, is_result=False):
return _convert_stub(pyboost_leaky_relu_grad_ext(self, [dy, input, negative_slope, is_result]))
leaky_relu_grad_ext_op=LeakyReLUGradExt()
[文档]class LessEqual(Primitive):
r"""
.. code-block::
prim = ops.LessEqual()
out = prim(input, other)
is equivalent to
.. code-block::
ops.less_equal(input, other)
Refer to :func:`mindspore.ops.less_equal` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_less_equal(self, [input, other]))
less_equal_op=LessEqual()
[文档]class Less(Primitive):
r"""
.. code-block::
prim = ops.Less()
out = prim(input, other)
is equivalent to
.. code-block::
ops.less(input, other)
Refer to :func:`mindspore.ops.less` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_less(self, [input, other]))
less_op=Less()
class LinSpaceExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('start'),
sig.make_sig('end'),
sig.make_sig('steps'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, start, end, steps, dtype=None):
return _convert_stub(pyboost_lin_space_ext(self, [start, end, steps, dtype if dtype is None else dtype_to_type_id('LinSpaceExt', 'dtype', dtype)]))
lin_space_ext_op=LinSpaceExt()
[文档]class LinSpace(Primitive):
r"""
Returns a Tensor whose value is `num` evenly spaced in the interval `start` and `stop` (including `start` and
`stop`), and the length of the output Tensor is `num`.
Refer to :func:`mindspore.ops.linspace` for more details.
Inputs:
- **start** (Tensor) - Start value of interval, 0-D Tensor with dtype float32 or float64.
- **stop** (Tensor) - Last value of interval, 0-D Tensor with dtype float32 or float64.
- **num** (Union[int, Tensor]) - Number of ticks in the interval, inclusive of `start` and `stop`.
Must be a positive integer. When the input is Tensor, it must be a 0-D Tensor with dtype int32 or int64.
Outputs:
Tensor, has the same shape and dtype as `start`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> start = Tensor(1, mindspore.float32)
>>> stop = Tensor(10, mindspore.float32)
>>> num = 5
>>> output = ops.LinSpace()(start, stop, num)
>>> print(output)
[ 1. 3.25 5.5 7.75 10. ]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, start, stop, num):
return super().__call__(start, stop, num)
lin_space_op=LinSpace()
[文档]class Log1p(Primitive):
r"""
.. code-block::
prim = ops.Log1p()
out = prim(input)
is equivalent to
.. code-block::
ops.log1p(input)
Refer to :func:`mindspore.ops.log1p` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
log1p_op=Log1p()
class LogMatrixDeterminant(Primitive):
r"""
Computes the sign and the log of the absolute value of the determinant of one or more square matrices.
Note:
The type of output always be real-value, even `input` is complex.
Args:
input (Tensor): A matrix to be calculated, its shape is :math:`(..., M, M)`.
The matrix must be at least two dimensions, and the last two
dimensions must be the same size. Data type must be float32, float64, complex64 or complex128.
Returns:
Tensor. The signs of the log determinants. The shape is :math:`input.shape[:-2]`.
Tensor. The absolute values of the log determinants. The shape is :math:`input.shape[:-2]`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` not float32, float64, complex64 or complex128.
ValueError: If the last two dimensions of `input` is not same size.
ValueError: If the dimension of `input` is less than 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
>>> sign, output = ops.LogMatrixDeterminant()(input_x)
>>> print(sign)
[-1. 1.]
>>> print(output)
[2.80336046e+00 3.04452229e+00]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
log_matrix_determinant_op=LogMatrixDeterminant()
[文档]class Log(Primitive):
r"""
.. code-block::
prim = ops.Log()
out = prim(input)
is equivalent to
.. code-block::
ops.log(input)
Refer to :func:`mindspore.ops.log` for more details.
"""
@prim_arg_register
def __init__(self):
self.add_prim_attr("cust_aicpu", 'Log')
self.add_prim_attr("base", -1.0)
self.add_prim_attr("scale", 1.0)
self.add_prim_attr("shift", 0.0)
def __call__(self, input):
return _convert_stub(pyboost_log(self, [input]))
log_op=Log()
class LogSoftmaxGrad(Primitive):
r"""
Computes gradient for the Log Softmax activation.
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", axis)
def __call__(self, logits, grad):
return super().__call__(logits, grad, self.axis)
[文档]class LogSoftmax(Primitive):
r"""
.. code-block::
prim = ops.LogSoftmax(axis)
out = prim(logits)
is equivalent to
.. code-block::
ops.log_softmax(logits, axis)
Refer to :func:`mindspore.ops.log_softmax` for more details.
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", axis)
def __call__(self, logits):
return super().__call__(logits, self.axis)
[文档]class LogicalAnd(Primitive):
r"""
Computes the "logical AND" of two tensors element-wise.
Refer to :func:`mindspore.ops.logical_and` for more details.
Inputs:
- **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly
converted to bool.
- **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
a tensor whose data type can be implicitly converted to bool.
Outputs:
Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> logical_and = ops.LogicalAnd()
>>> output = logical_and(x, y)
>>> print(output)
[ True False False]
>>> x = Tensor(1, mindspore.bool_)
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalAnd()(x, y)
>>> print(output)
False
>>> x = True
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalAnd()(x, y)
>>> print(output)
False
>>> x = True
>>> y = Tensor(np.array([True, False]), mindspore.bool_)
>>> output = ops.LogicalAnd()(x, y)
>>> print(output)
[True False]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y):
return _convert_stub(pyboost_logical_and(self, [x, y]))
logical_and_op=LogicalAnd()
[文档]class LogicalNot(Primitive):
r"""
Computes the "logical NOT" of a tensor element-wise.
Refer to :func:`mindspore.ops.logical_not` for more details.
Inputs:
- **x** (Tensor) - The input tensor.
Outputs:
Tensor, the shape is the same as the `x`, and the dtype is bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> logical_not = ops.LogicalNot()
>>> output = logical_not(x)
>>> print(output)
[False True False]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_logical_not(self, [input]))
logical_not_op=LogicalNot()
[文档]class LogicalOr(Primitive):
r"""
Computes the "logical OR" of two tensors element-wise.
Refer to :func:`mindspore.ops.logical_or` for more details.
Inputs:
- **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly
converted to bool.
- **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
a tensor whose data type can be implicitly converted to bool.
Outputs:
Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> logical_or = ops.LogicalOr()
>>> output = logical_or(x, y)
>>> print(output)
[ True True True]
>>> x = Tensor(1, mindspore.bool_)
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalOr()(x, y)
>>> print(output)
True
>>> x = True
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalOr()(x, y)
>>> print(output)
True
>>> x = True
>>> y = Tensor(np.array([True, False]), mindspore.bool_)
>>> output = ops.LogicalOr()(x, y)
>>> print(output)
[True True]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y):
return _convert_stub(pyboost_logical_or(self, [x, y]))
logical_or_op=LogicalOr()
[文档]class LogicalXor(Primitive):
r"""
Computes the "logical XOR" of two tensors element-wise.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.logical_xor` for more details.
Inputs:
- **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly
converted to bool.
- **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
a tensor whose data type can be implicitly converted to bool.
Outputs:
Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> logical_xor = ops.LogicalXor()
>>> output = logical_xor(x, y)
>>> print(output)
[ False True True]
>>> x = Tensor(1, mindspore.bool_)
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalXor()(x, y)
>>> print(output)
True
>>> x = True
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalXor()(x, y)
>>> print(output)
True
>>> x = True
>>> y = Tensor(np.array([True, False]), mindspore.bool_)
>>> output = ops.LogicalXor()(x, y)
>>> print(output)
[False True]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return super().__call__(input, other)
logical_xor_op=LogicalXor()
class LogitGrad(Primitive):
r"""
Computes LogitGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self, eps=-1.0):
self._set_prim_arg("eps", eps)
def __call__(self, grad, input):
return super().__call__(grad, input, self.eps)
[文档]class Logit(Primitive):
r"""
Calculate the logit of a tensor element-wise. Element in `x` is clamped to [eps, 1-eps].
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.logit` for more details.
Args:
eps (float, optional): The epsilon. The input clamp bound is defined as [eps, 1-eps]. Default: ``-1.0`` .
Inputs:
- **x** (Tensor) - The input tensor of type float16, float32 or float64.
Outputs:
Tensor, with the same shape and dtype as the `x`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([0.1, 0.2, 0.3]).astype(np.float32))
>>> op = ops.Logit(eps=1e-5)
>>> output = op(x)
>>> print(output)
[-2.1972246 -1.3862944 -0.8472978]
"""
@prim_arg_register
def __init__(self, eps=-1.0):
self._set_prim_arg("eps", eps)
def __call__(self, input):
return super().__call__(input, self.eps)
[文档]class MaskedFill(Primitive):
r"""
.. code-block::
prim = ops.MaskedFill()
out = prim(input_x, mask, value)
is equivalent to
.. code-block::
ops.masked_fill(input_x, mask, value)
Refer to :func:`mindspore.ops.masked_fill` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, mask, value):
return _convert_stub(pyboost_masked_fill(self, [input_x, mask, value]))
masked_fill_op=MaskedFill()
class MatMulExt(Primitive):
r"""
.. code-block::
prim = ops.MatMulExt()
out = prim(input, mat2)
is equivalent to
.. code-block::
ops.matmul_ext(input, mat2)
Refer to :func:`mindspore.ops.matmul_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mat2):
return _convert_stub(pyboost_matmul_ext(self, [input, mat2]))
matmul_ext_op=MatMulExt()
[文档]class MatMul(Primitive):
r"""
Multiplies matrix `a` and matrix `b`.
.. math::
Output_{i j}=\sum_{k=1}^{p} a_{i k} b_{k j}=a_{i 1} b_{1 j}+a_{i 2} b_{2 j}+\cdots+a_{i p} b_{p j}, p\in N
where the :math:`i,j` indicates the output of the i-th row and j-th column element.
Note:
- If :math:`N * M` cannot be divided by 16, the performance will be poor in ascend environment.
- The dtype of inputs must be same.
- On Ascend, float64 doesn't be supported.
Args:
transpose_a (bool): If ``True`` , `a` is transposed before multiplication. Default: ``False`` .
transpose_b (bool): If ``True`` , `b` is transposed before multiplication. Default: ``False`` .
Inputs:
- **a** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
`transpose_a` is ``True`` , its shape must be :math:`(C, N)` after transpose.
- **b** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
`transpose_b` is ``True`` , its shape must be :math:`(M, C)` after transpose.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, M)`.
Raises:
TypeError: If `transpose_a` or `transpose_b` is not a bool.
TypeError: If the dtype of `a` and the dtype of `b` are not the same.
ValueError: If the column of matrix dimensions of `a` is not equal to
the row of matrix dimensions of `b`.
ValueError: If length of shape of `a` or `b` is not equal to 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> a = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
>>> b = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
>>> matmul = ops.MatMul()
>>> output = matmul(a, b)
>>> print(output)
[[3. 3. 3. 3.]]
"""
@prim_arg_register
def __init__(self, transpose_a=False, transpose_b=False):
self._set_prim_arg("transpose_a", transpose_a)
self._set_prim_arg("transpose_b", transpose_b)
def __call__(self, input, mat2):
return _convert_stub(pyboost_matmul(self, [input, mat2, self.transpose_a, self.transpose_b]))
class MatrixDeterminant(Primitive):
r"""
Calculates the value of the determinant for one or more square matrices.
Refer to :func:`mindspore.ops.det` for more details.
Inputs:
- **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, and the last two
dimensions must be the same size.
Outputs:
Tensor, the shape is `x_shape[:-2]`, the dtype is same as `x`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
>>> op = ops.MatrixDeterminant()
>>> output = op(input_x)
>>> print(output)
[-16.5 21. ]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
matrix_determinant_op=MatrixDeterminant()
class MatrixExp(Primitive):
r"""
.. code-block::
prim = ops.MatrixExp()
out = prim(input)
is equivalent to
.. code-block::
ops.matrix_exp(input)
Refer to :func:`mindspore.ops.matrix_exp` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
matrix_exp_op=MatrixExp()
class Max(Primitive):
r"""
.. code-block::
prim = ops.Max()
out = prim(input)
is equivalent to
.. code-block::
ops.max_(input)
Refer to :func:`mindspore.ops.max_` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_max(self, [input]))
max_op=Max()
class MaxPoolGradWithIndices(Primitive):
r"""
Gradients of the MaxPoolWithIndices operation.
"""
@prim_arg_register
def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pads", pads, to_output_padding)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("ceil_mode", ceil_mode)
self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
def __call__(self, x, grad, argmax):
return _convert_stub(pyboost_max_pool_grad_with_indices(self, [x, grad, argmax, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
class MaxPoolGradWithMask(Primitive):
r"""
Gradients of the MaxPoolWithMask operation.
"""
@prim_arg_register
def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pads", pads, to_output_padding)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("ceil_mode", ceil_mode)
self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
def __call__(self, x, grad, mask):
return _convert_stub(pyboost_max_pool_grad_with_mask(self, [x, grad, mask, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
class MaxPoolWithIndices(Primitive):
r"""
Performs max pooling on the input Tensor and returns both max values and indices.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows:
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
.. warning::
This is an experimental API that is subject to change or deletion. Only support on Atlas training series.
Args:
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and argmax
value, is an int number that represents height and width of the kernel, or a tuple of
two int numbers that represent height and width respectively.
strides (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents
not only the height of movement but also the width of movement, or a tuple of two int numbers that
represent height and width of movement respectively. Default: ``None`` , meaning that
`strides = kernel_size`.
pads (Union[int, tuple[int]], optional): An int number that represents the depth,
height and width of movement are both strides, or a tuple of two int numbers that represent
depth, height and width of movement respectively.
Default: 0.
dilation (Union[int, tuple[int]], optional): Control the stride of elements in the kernel. Default: ``(1, 1)`` .
ceil_mode (bool, optional): Whether to use ceil instead of floor to calculate output shape. Default: ``False`` .
argmax_type (mindspore.dtype, optional) : The dtype for argmax.
Default: ``mstype.int64`` . [Disabled in Ascend.]
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})` with data type of float32 in Ascend.
Outputs:
Tuple of 2 Tensors, representing the maxpool result and where the max values are generated.
- **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`.
It has the same data type as `x`.
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{pads[0]} - \text{dilation[0]}
\times (\text{kernel_size[0]} - 1) - 1}{\text{strides[0]}} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{pads[1]} - \text{dilation[1]}
\times (\text{kernel_size[1]} - 1) - 1}{\text{strides[1]}} + 1\right\rfloor
- **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int32 in Ascend.
Raises:
TypeError: If `x` is not a Tensor.
ValueError: If length of shape of `x` is not equal to 4.
TypeError: If `kernel_size` , `strides` , `pads` or `dilation` is not int or tuple.
ValueError: If `kernel_size`, `strides` or `dilation` is less than 1.
ValueError: If `pads` is less than 0.
ValueError: If `pads` is more than half of `kernel_size`.
TypeError: If `ceil_mode` is not bool.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pads", pads, to_output_padding)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("ceil_mode", ceil_mode)
self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
def __call__(self, x):
return _convert_stub(pyboost_max_pool_with_indices(self, [x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
class MaxPoolWithMask(Primitive):
r"""
Performs max pooling on the input Tensor and returns both max values and mask.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows:
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
.. warning::
This is an experimental API that is subject to change or deletion. Only support on Atlas training series.
Args:
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and argmax
value, is an int number that represents height and width of the kernel, or a tuple of
two int numbers that represent height and width respectively.
strides (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents
not only the height of movement but also the width of movement, or a tuple of two int numbers that
represent height and width of movement respectively. Default: ``1``.
pads (Union[int, tuple[int]], optional): An int number that represents the depth,
height and width of movement are both strides, or a tuple of two int numbers that represent
depth, height and width of movement respectively.
Default: 0.
dilation (Union[int, tuple[int]], optional): Control the stride of elements in the kernel.
Default: ``(1, 1)`` .
ceil_mode (bool, optional): Whether to use ceil instead of floor to calculate output shape.
Default: ``False`` .
argmax_type (mindspore.dtype, optional) : The dtype for argmax.
Default: ``mstype.int64`` . [Disabled in Ascend.]
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})` with data type of float16
and float32 in Ascend.
Outputs:
Tuple of 2 Tensors, representing the maxpool result and mask are generated.
- **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`.
It has the same data type as `x`.
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{pads[0]} - \text{dilation[0]}
\times (\text{kernel_size[0]} - 1) - 1}{\text{strides[0]}} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{pads[1]} - \text{dilation[1]}
\times (\text{kernel_size[1]} - 1) - 1}{\text{strides[1]}} + 1\right\rfloor
- **mask** (Tensor) - Maxpooling mask. Data type is int8 in Ascend.
Raises:
TypeError: If `x` is not a Tensor.
ValueError: If length of shape of `x` is not equal to 4.
TypeError: If `kernel_size` , `strides` , `pads` or `dilation` is not int or tuple.
ValueError: If `kernel_size`, `strides` or `dilation` is less than 1.
ValueError: If `pads` is less than 0.
ValueError: If `pads` is more than half of `kernel_size`.
TypeError: If `ceil_mode` is not bool.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pads", pads, to_output_padding)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("ceil_mode", ceil_mode)
self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
def __call__(self, x):
return _convert_stub(pyboost_max_pool_with_mask(self, [x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
class MaximumGradGrad(Primitive):
r"""
Grad for maximum grad.
"""
@prim_arg_register
def __init__(self, grad_x=True, grad_y=True):
self._set_prim_arg("grad_x", grad_x)
self._set_prim_arg("grad_y", grad_y)
def __call__(self, x, y, dx, dy):
return super().__call__(x, y, dx, dy, self.grad_x, self.grad_y)
class MaximumGrad(Primitive):
r"""
Grad for maximum.
"""
@prim_arg_register
def __init__(self, grad_x=True, grad_y=True):
self._set_prim_arg("grad_x", grad_x)
self._set_prim_arg("grad_y", grad_y)
def __call__(self, x, y, grads):
return super().__call__(x, y, grads, self.grad_x, self.grad_y)
[文档]class Maximum(Primitive):
r"""
.. code-block::
prim = ops.Maximum()
out = prim(input, other)
is equivalent to
.. code-block::
ops.maximum(input, other)
Refer to :func:`mindspore.ops.maximum` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_maximum(self, [input, other]))
maximum_op=Maximum()
class MeanExt(Primitive):
r"""
.. code-block::
prim = ops.MeanExt()
out = prim(input, axis, keep_dims, dtype)
is equivalent to
.. code-block::
ops.mean_ext(input, axis, keep_dims, dtype)
Refer to :func:`mindspore.ops.mean_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('axis', default=None),
sig.make_sig('keep_dims', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, axis=None, keep_dims=False, dtype=None):
return _convert_stub(pyboost_mean_ext(self, [input, axis, keep_dims, dtype if dtype is None else dtype_to_type_id('MeanExt', 'dtype', dtype)]))
mean_ext_op=MeanExt()
class Min(Primitive):
r"""
.. code-block::
prim = ops.Min()
out = prim(input)
is equivalent to
.. code-block::
ops.min_(input)
Refer to :func:`mindspore.ops.min_` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_min(self, [input]))
min_op=Min()
class MinimumGrad(Primitive):
r"""
Grad for minimum.
"""
@prim_arg_register
def __init__(self, grad_x=True, grad_y=True):
self._set_prim_arg("grad_x", grad_x)
self._set_prim_arg("grad_y", grad_y)
def __call__(self, x1, x2, grads):
return super().__call__(x1, x2, grads, self.grad_x, self.grad_y)
[文档]class Minimum(Primitive):
r"""
.. code-block::
prim = ops.Minimum()
out = prim(input, other)
is equivalent to
.. code-block::
ops.minimum(input, other)
Refer to :func:`mindspore.ops.minimum` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_minimum(self, [input, other]))
minimum_op=Minimum()
class MoeFinalizeRouting(Primitive):
r"""
.. code-block::
prim = ops.MoeFinalizeRouting()
out = prim(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
is equivalent to
.. code-block::
ops.moe_finalize_routing(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
Refer to :func:`mindspore.ops.moe_finalize_routing` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('expanded_x'),
sig.make_sig('x1'),
sig.make_sig('x2', default=None),
sig.make_sig('bias', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('expanded_row_idx', default=None),
sig.make_sig('expanded_expert_idx', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, expanded_x, x1, x2=None, bias=None, scales=None, expanded_row_idx=None, expanded_expert_idx=None):
return super().__call__(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
moe_finalize_routing_op=MoeFinalizeRouting()
[文档]class Mul(Primitive):
r"""
.. code-block::
prim = ops.Mul()
out = prim(input, other)
is equivalent to
.. code-block::
ops.mul(input, other)
Refer to :func:`mindspore.ops.mul` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
sig.make_sig('other', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_mul(self, [input, other]))
mul_op=Mul()
class Mv(Primitive):
r"""
.. code-block::
prim = ops.Mv()
out = prim(input, vec)
is equivalent to
.. code-block::
ops.mv(input, vec)
Refer to :func:`mindspore.ops.mv` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, vec):
return _convert_stub(pyboost_mv(self, [input, vec]))
mv_op=Mv()
[文档]class NanToNum(Primitive):
r"""
Replaces `NaN`, positive infinity and negative infinity values in the input Tensor with the values
specified by `nan`, `posinf` and `neginf` respectively.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.nan_to_num` for more details.
Args:
nan (float, optional): The value to replace `NaN`. Default value is ``None`` .
posinf (float, optional): If a Number, the value to replace positive infinity values with. If None, positive
infinity values are replaced with the greatest finite value representable by `x`'s dtype.
Default value is ``None`` .
neginf (float, optional): if a Number, the value to replace negative infinity values with. If None, negative
infinity values are replaced with the lowest finite value representable by `x`'s dtype.
Default value is ``None`` .
Inputs:
- **x** (Tensor) - Input Tensor of any dimensions. Supported data types: float32 or float16.
Outputs:
Tensor, has the same shape and dtype as the `x`.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> nan_to_num = ops.NanToNum()
>>> x = Tensor(np.array([float('nan'), float('inf'), -float('inf'), 3.14]), mindspore.float32)
>>> output = nan_to_num(x)
>>> print(output)
[ 0.0000000e+00 3.4028235e+38 -3.4028235e+38 3.1400001e+00]
"""
@prim_arg_register
def __init__(self, nan=None, posinf=None, neginf=None):
self._set_prim_arg("nan", nan)
self._set_prim_arg("posinf", posinf)
self._set_prim_arg("neginf", neginf)
def __call__(self, x):
return super().__call__(x, self.nan, self.posinf, self.neginf)
[文档]class Neg(Primitive):
r"""
.. code-block::
prim = ops.Neg()
out = prim(input)
is equivalent to
.. code-block::
ops.neg(input)
Refer to :func:`mindspore.ops.neg` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_neg(self, [input]))
neg_op=Neg()
[文档]class NextAfter(Primitive):
r"""
.. code-block::
prim = ops.NextAfter()
out = prim(input, other)
is equivalent to
.. code-block::
ops.nextafter(input, other)
Refer to :func:`mindspore.ops.nextafter` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return super().__call__(input, other)
next_after_op=NextAfter()
class NLLLossGrad(Primitive):
r"""
Computes the gradients of `NLLLoss`.
"""
@prim_arg_register
def __init__(self, reduction='mean', ignore_index=-100):
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
self._set_prim_arg("ignore_index", ignore_index)
def __call__(self, logits, loss_grad, labels, weight, total_weight):
return super().__call__(logits, loss_grad, labels, weight, total_weight, self.reduction, self.ignore_index)
[文档]class NLLLoss(Primitive):
r"""
Gets the negative log likelihood loss between logits and labels.
The nll loss with :math:`reduction = none` can be described as:
.. math::
\ell(x, t)=L=\left\{l_{1}, \ldots, l_{N}\right\}^{\top},
\quad l_{n}=-w_{t_{n}} x_{n, t_{n}},
\quad w_{c}=\text { weight }[c] \cdot 1
where :math:`x` is the logits, :math:`t` is the labels, :math:`w` is the weight,
N is the batch size, :math:`c` belonging to [0, C-1] is class index, where :math:`C` is the number of classes.
If :math:`reduction \neq none` (default ``'mean'`` ), then
.. math::
\ell(x, t)=\left\{\begin{array}{ll}
\sum_{n=1}^{N} \frac{1}{\sum_{n=1}^{N} w_{t n}} l_{n}, & \text { if reduction }=\text { 'mean'; } \\
\sum_{n=1}^{N} l_{n}, & \text { if reduction }=\text { 'sum' }
\end{array}\right.
Args:
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
``'sum'`` . Default: ``'mean'`` .
- ``'none'``: no reduction will be applied.
- ``'mean'``: compute and return the weighted mean of elements in the output.
- ``'sum'``: the output elements will be summed.
ignore_index (int): Specifies a target value that is ignored
and does not contribute to the input gradient. Default: ``-100`` .
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type only supports float32 or float16.
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N,)`, where each value belong to
:math:`[0, C-1]`. Data type only supports int32 or int64.
- **weight** (Tensor) - The rescaling weight to each class, with shape :math:`(C,)` and data type only
supports float32 or float16.
Outputs:
Tuple of 2 tensors composed with `loss` and `total_weight`.
- **loss** (Tensor) - When `reduction` is ``'none'`` and `logits` is a 2D tensor,
the `loss` shape is :math:`(N,)`. Otherwise, the `loss` is a scalar.
The data type is the same with `input's`.
- **total_weight** (Tensor) - The `total_weight` is a scalar. The data type is the same with `weight's`.
Raises:
TypeError: If dtype of `logits` or `weight` is neither float16 nor float32.
TypeError: If dtype of `labels` is neither int32 nor int64.
ValueError: If `logits` is not a one or two dimension tensor, `labels` and `weight` are not
one dimension tensors.
When `logits` is a two dimension tensor, the first dimension of `logits` is not equal to `labels`,
and second dimension of `logits` is not equal to `weight`.
When `logits` is a one dimension tensor, the dimensions of `logits`, `labels`
and `weight` should be equal to each other.
ValueError: If the value of `labels` exceed :math:`[0, C-1]`, where :math:`C` is the number of classes.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> logits = Tensor(np.array([[0.5488135, 0.71518934],
... [0.60276335, 0.5448832],
... [0.4236548, 0.6458941]]).astype(np.float32))
>>> labels = Tensor(np.array([0, 0, 0]).astype(np.int32))
>>> weight = Tensor(np.array([0.3834415, 0.79172504]).astype(np.float32))
>>> nll_loss = ops.NLLLoss(reduction="mean")
>>> loss, weight = nll_loss(logits, labels, weight)
>>> print(loss)
-0.52507716
>>> print(weight)
1.1503246
"""
@prim_arg_register
def __init__(self, reduction='mean', ignore_index=-100):
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
self._set_prim_arg("ignore_index", ignore_index)
def __call__(self, logits, labels, weight):
return super().__call__(logits, labels, weight, self.reduction, self.ignore_index)
[文档]class NonZero(Primitive):
r"""
.. code-block::
prim = ops.NonZero()
out = prim(input)
is equivalent to
.. code-block::
ops.nonzero(input)
Refer to :func:`mindspore.ops.nonzero` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
non_zero_op=NonZero()
class Norm(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input_x'),
sig.make_sig('ord', default=None),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, ord=None, dim=None, keepdim=False, dtype=None):
return _convert_stub(pyboost_norm(self, [input_x, ord, dim, keepdim, dtype if dtype is None else dtype_to_type_id('Norm', 'dtype', dtype)]))
norm_op=Norm()
class NormalExt(Primitive):
r"""
Generates random numbers according to the Normal random number distribution.
Inputs:
- **mean** (Union[float, Tensor]) - The mean is a tensor with the mean of each output element's normal distribution.
- **std** (Union[float, Tensor]) - The tensor of per-element standard deviations.
- **generator** (Generator, optional) - Mindspore generator.
Outputs:
- **output** (Tensor) - With the same type and shape as the 'mean'.
Raises:
TypeError: If `mean` or `std` is not Union[float, Tensor].
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, mean, std, seed, offset):
return _convert_stub(pyboost_normal_ext(self, [mean, std, seed, offset]))
normal_ext_op=NormalExt()
[文档]class NotEqual(Primitive):
r"""
.. code-block::
prim = ops.NotEqual()
out = prim(input, other)
is equivalent to
.. code-block::
ops.not_equal(input, other)
Refer to :func:`mindspore.ops.not_equal` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return _convert_stub(pyboost_not_equal(self, [input, other]))
not_equal_op=NotEqual()
class NPUClearFloatStatusV2(Primitive):
r"""
Compare to NPUClearStatus
Clear the flag for storage overflow status. This flag is located in a register at a
fixed address on the `Ascend` device, and overflow information is automatically
written to this register.
The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
If the value of flag is zero, no overflow has occurred, otherwise, overflow.
When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
status after the network execution is completed.
Note:
- In order to avoid mis-optimization by the compiler, additional input and output are added to
this operator. The input and output are defined as a shape of: math:`(8,)` and data type of
`mindspore.dtype.int32` Tensor, meaningless.
- Since this op lacks contextual dependencies with parameters in the network,
:class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
Inputs:
- **input** Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
data type is `mindspore.dtype.int32`, and has no actual meaning..
Outputs:
- **output** Tensor, shape and data type are the same as input, meaningless.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import ops, nn, Tensor
>>> from mindspore.ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
>>> class Net(nn.Cell):
... def __init__(self):
... super().__init__()
... self.clear_status = NPUClearFloatStatusV2()
... self.get_status = NPUGetFloatStatusV2()
... self.sub = ops.Sub()
... self.neg = ops.Neg()
... self.equal = ops.Equal()
... self.reduce_all = ops.ReduceAll(keep_dims=False)
... self.base = Tensor([0], dtype=ms.int32)
... self.logic_not = ops.LogicalNot()
...
... def construct(self, x):
... init = Tensor([0]*8, dtype=ms.int32)
... clear_status = self.clear_status(init)
... x = ops.depend(x, clear_status)
... res = self.sub(x, self.neg(x))
... init = ops.depend(init, res)
... get_status = self.get_status(init)
... flag = self.equal(self.base, get_status)
... overall_finite = self.reduce_all(flag)
... overflow = self.logic_not(overall_finite)
... return overflow
...
>>> value = 65504
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
True
>>> value = 10
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
False
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
npu_clear_float_status_v2_op=NPUClearFloatStatusV2()
class NPUGetFloatStatusV2(Primitive):
r"""
Get the flag for storage overflow status. This flag is located in a register at a
fixed address on the `Ascend` device, and overflow information is automatically
written to this register.
The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
If the value of flag is zero, no overflow has occurred, otherwise, overflow.
When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
status after the network execution is completed.
Note:
- In order to avoid mis-optimization by the compiler, additional input is added to
this operator. The input is defined as a shape of: math:`(8,)` and data type of
`mindspore.dtype.int32` Tensor, meaningless.
- Since this op lacks contextual dependencies with parameters in the network,
:class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
Inputs:
- **input** Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
data type is `mindspore.dtype.int32`, and has no actual meaning.
Usually use the output of `NPUClearFloatStatusV2`.
Outputs:
- **output** Tensor, shape and data type are the same as input. If all are zero, it means no overflow, otherwise, overflow.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import ops, nn, Tensor
>>> from mindspore.ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
>>> class Net(nn.Cell):
... def __init__(self):
... super().__init__()
... self.clear_status = NPUClearFloatStatusV2()
... self.get_status = NPUGetFloatStatusV2()
... self.sub = ops.Sub()
... self.neg = ops.Neg()
... self.equal = ops.Equal()
... self.reduce_all = ops.ReduceAll(keep_dims=False)
... self.base = Tensor([0], dtype=ms.int32)
... self.logic_not = ops.LogicalNot()
...
... def construct(self, x):
... init = Tensor([0]*8, dtype=ms.int32)
... clear_status = self.clear_status(init)
... x = ops.depend(x, clear_status)
... res = self.sub(x, self.neg(x))
... init = ops.depend(init, res)
... get_status = self.get_status(init)
... flag = self.equal(self.base, get_status)
... overall_finite = self.reduce_all(flag)
... overflow = self.logic_not(overall_finite)
... return overflow
...
>>> value = 65504
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
True
>>> value = 10
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
False
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
npu_get_float_status_v2_op=NPUGetFloatStatusV2()
class OneHotExt(Primitive):
r"""
Computes a one-hot tensor.
The locations represented by tensor in `tensor` take value `1`, while all
other locations take value `0`.
Args:
- **tensor** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
Data type must be int32 or int64.
- **num_classes** (int) - A scalar defining the depth of the one-hot dimension.
Returns:
Tensor, one-hot tensor.
Raises:
TypeError: If `num_classes` is not an int.
TypeError: If dtype of `tensor` is not int32 or int64.
ValueError: If `num_classes` is less than 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> import mindspore.ops as ops
>>> from mindspore import Tensor
>>> tensor = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> num_classes = 3
>>> output = ops.extend.one_hot(tensor, num_classes)
>>> print(output)
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]]
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", axis)
def __call__(self, tensor, num_classes, on_value, off_value):
return _convert_stub(pyboost_one_hot_ext(self, [tensor, num_classes, on_value, off_value, self.axis]))
[文档]class OneHot(Primitive):
r"""
Computes a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`, while all
other locations take value `off_value`.
Note:
If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
On Ascend, if `on_value` is Int64 dtype, `indices` must be Int64 dtype, and the value for `on_value` and
`off_value` can only be 1 and 0.
Args:
axis (int): Position to insert the value. e.g. If shape of `indices` is :math:`(N, C)`, and `axis` is -1,
the output shape will be :math:`(N, C, D)`, If `axis` is 0, the output shape will be :math:`(D, N, C)`.
Default: ``-1`` .
Inputs:
- **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
Data type must be int32 or int64.
- **depth** (Union[int, Tensor]) - A scalar defining the depth of the one-hot dimension.
- **on_value** (Tensor) - A value to fill in output when `indices[j] = i`.
- **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.
It has the same data type as `on_value`.
Outputs:
Tensor, one-hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`.
Raises:
TypeError: If `axis` or `depth` is not an int.
TypeError: If dtype of `on_value` is not int32, int64, float16 or float32.
TypeError: If dtype of `indices` is not int32 or int64.
TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
ValueError: If `axis` is not in range [-1, len(indices_shape)].
ValueError: If `depth` is less than 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
>>> onehot = ops.OneHot()
>>> output = onehot(indices, depth, on_value, off_value)
>>> print(output)
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]]
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", axis)
def __call__(self, indices, depth, on_value, off_value):
return super().__call__(indices, depth, on_value, off_value, self.axis)
class OnesLikeExt(Primitive):
r"""
Returns a Tensor with a value of 1 and its shape and data type is the same as the input.
Refer to :func:`mindspore.ops.ones_like` for more details.
Args:
- **input_x** (Tensor) - Tensor of any dimension.
Returns:
Tensor, has the same shape and type as `input_x` but filled with ones.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dtype=None):
return _convert_stub(pyboost_ones_like_ext(self, [input, dtype if dtype is None else dtype_to_type_id('OnesLikeExt', 'dtype', dtype)]))
ones_like_ext_op=OnesLikeExt()
[文档]class OnesLike(Primitive):
r"""
Returns a Tensor with a value of 1 and its shape and data type is the same as the input.
Refer to :func:`mindspore.ops.ones_like` for more details.
Inputs:
- **input_x** (Tensor) - Tensor of any dimension.
Outputs:
Tensor, has the same shape and type as `input_x` but filled with ones.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
>>> output = ops.OnesLike()(input_x)
>>> print(output)
[[1 1]
[1 1]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
ones_like_op=OnesLike()
class PagedAttentionMask(Primitive):
r"""
.. code-block::
prim = ops.PagedAttentionMask(head_num, scale_value, kv_head_num)
out = prim(query, key_cache, value_cache, block_tables, context_lens, alibi_mask)
is equivalent to
.. code-block::
ops.paged_attention_mask(query, key_cache, value_cache, block_tables, context_lens, alibi_mask, head_num, scale_value, kv_head_num)
Refer to :func:`mindspore.ops.paged_attention_mask` for more details.
"""
@prim_arg_register
def __init__(self, head_num, scale_value, kv_head_num):
self._set_prim_arg("head_num", head_num)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("kv_head_num", kv_head_num)
def __call__(self, query, key_cache, value_cache, block_tables, context_lens, alibi_mask):
return super().__call__(query, key_cache, value_cache, block_tables, context_lens, alibi_mask, self.head_num, self.scale_value, self.kv_head_num)
class PagedAttention(Primitive):
r"""
.. code-block::
prim = ops.PagedAttention(head_num, scale_value, kv_head_num)
out = prim(query, key_cache, value_cache, block_tables, context_lens)
is equivalent to
.. code-block::
ops.paged_attention(query, key_cache, value_cache, block_tables, context_lens, head_num, scale_value, kv_head_num)
Refer to :func:`mindspore.ops.paged_attention` for more details.
"""
@prim_arg_register
def __init__(self, head_num, scale_value, kv_head_num):
self._set_prim_arg("head_num", head_num)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("kv_head_num", kv_head_num)
def __call__(self, query, key_cache, value_cache, block_tables, context_lens):
return super().__call__(query, key_cache, value_cache, block_tables, context_lens, self.head_num, self.scale_value, self.kv_head_num)
[文档]class Pow(Primitive):
r"""
.. code-block::
prim = ops.Pow()
out = prim(input, exponent)
is equivalent to
.. code-block::
ops.pow(input, exponent)
Refer to :func:`mindspore.ops.pow` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, exponent):
return _convert_stub(pyboost_pow(self, [input, exponent]))
pow_op=Pow()
class PReLUGrad(Primitive):
r"""
Gradients of PReLU operation.
Note:
1-dimensional input_x is not supported.
Inputs:
- **dy** (Tensor) - Representing the backprop of the next layer.
- **x** (Tensor) - Must be the input `x` of forward operator PRelu.
- **weight** (Tensor) - Float Tensor, w > 0, must be the input `weight` of forward operator PRelu.
Outputs:
- **dx** (Tensor), with the same type as `x`.
- **dw** (Tensor), with the same type as `weight`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x, weight):
return super().__call__(dy, x, weight)
prelu_grad_op=PReLUGrad()
[文档]class PReLU(Primitive):
r"""
.. code-block::
prim = ops.PReLU()
out = prim(x, weight)
is equivalent to
.. code-block::
ops.prelu(x, weight)
Refer to :func:`mindspore.ops.prelu` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, weight):
return super().__call__(x, weight)
prelu_op=PReLU()
class ProdExt(Primitive):
r"""
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
reduce a dimension of `input` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Args:
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
axis (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
Returns:
Tensor, has the same data type as input tensor.
- If `axis` is ``None`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the product of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `axis` is not one of the following: int or None.
TypeError: If `keep_dims` is not a bool.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.ProdExt()(x, 1, keep_dims=True)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = ops.ProdExt()(x)
>>> print(output)
2.2833798e+33
>>> print(output.shape)
()
>>> # case 2: Reduces a dimension along axis 0.
>>> output = ops.ProdExt()(x, 0, True)
>>> print(output)
[[[ 28. 28. 28. 28. 28. 28.]
[ 80. 80. 80. 80. 80. 80.]
[162. 162. 162. 162. 162. 162.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = ops.ProdExt()(x, 1, True)
>>> print(output)
[[[ 6. 6. 6. 6. 6. 6.]]
[[120. 120. 120. 120. 120. 120.]]
[[504. 504. 504. 504. 504. 504.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = ops.ProdExt()(x, 2, True)
>>> print(output)
[[[1.00000e+00]
[6.40000e+01]
[7.29000e+02]]
[[4.09600e+03]
[1.56250e+04]
[4.66560e+04]]
[[1.17649e+05]
[2.62144e+05]
[5.31441e+05]]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('axis', default=None),
sig.make_sig('keep_dims', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, axis=None, keep_dims=False, dtype=None):
return _convert_stub(pyboost_prod_ext(self, [input, axis, keep_dims, dtype if dtype is None else dtype_to_type_id('ProdExt', 'dtype', dtype)]))
prod_ext_op=ProdExt()
class PromptKVCache(Primitive):
r"""
.. code-block::
prim = ops.PromptKVCache(align_mode)
out = prim(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
is equivalent to
.. code-block::
ops.prompt_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len, align_mode)
Refer to :func:`mindspore.ops.prompt_k_v_cache` for more details.
"""
@prim_arg_register
def __init__(self, align_mode='LEFT'):
self._set_prim_arg_with_handler("align_mode", align_mode, str_to_enum)
self.add_prim_attr("side_effect_mem", True)
def __call__(self, cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len):
return super().__call__(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len, self.align_mode)
class Qr(Primitive):
r"""
Returns the QR decomposition of one or more matrices.
If `mode` is 'reduced'(the default), compute the P columns of Q where P is minimum of the 2 innermost dimensions of
input. If `mode` is 'complete', compute full-sized Q and R.
Args:
full_matrices (bool, optional): Whether compute full-sized QR decomposition. Default: ``False`` .
Inputs:
- **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, the supported dtype are
float16, float32, float64, complex64 and complex128.
Define the shape of input as :math:`(..., m, n)`, p as the
minimum values of m and n.
Outputs:
- **Q** (Tensor) - The orthonormal matrices of input. If `mode` is 'complete', the shape is :math:`(m, m)`,
else the shape is :math:`(m, p)`. The dtype of `Q` is same as `input`.
- **R** (Tensor) - The upper triangular matrices of input. If `mode` is 'complete', the shape is :math:`(m, n)`,
else the shape is :math:`(p, n)`. The dtype of `R` is same as `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `mode` is neither 'reduced' nor 'complete'.
ValueError: If the dimension of `input` is less than 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore as ms
>>> from mindspore import Tensor, ops
>>> import numpy as np
>>> x = Tensor(np.array([[20., -31, 7], [4, 270, -90], [-8, 17, -32]]), ms.float32)
>>> Q, R = ops.Qr()(x)
>>> print(Q)
[[-0.912871 0.16366126 0.37400758]
[-0.18257418 -0.9830709 -0.01544376]
[ 0.36514837 -0.08238228 0.92729706]]
>>> print(R)
[[ -21.908903 -14.788506 -1.6431675]
[ 0. -271.9031 92.25824 ]
[ 0. 0. -25.665514 ]]
"""
@prim_arg_register
def __init__(self, full_matrices=False):
self._set_prim_arg("full_matrices", full_matrices)
def __call__(self, x):
return super().__call__(x, self.full_matrices)
class QuantBatchMatmul(Primitive):
r"""
.. code-block::
prim = ops.QuantBatchMatmul(transpose_x1, transpose_x2, dtype)
out = prim(x1, x2, scale, offset, bias)
is equivalent to
.. code-block::
ops.quant_batch_matmul(x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype)
Refer to :func:`mindspore.ops.quant_batch_matmul` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x1', dtype=sig.sig_dtype.T),
sig.make_sig('x2', dtype=sig.sig_dtype.T),
sig.make_sig('scale', dtype=sig.sig_dtype.T1),
sig.make_sig('offset', dtype=sig.sig_dtype.T2, default=None),
sig.make_sig('bias', dtype=sig.sig_dtype.T3, default=None),
)
@prim_arg_register
def __init__(self, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
self._set_prim_arg("transpose_x1", transpose_x1)
self._set_prim_arg("transpose_x2", transpose_x2)
self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id)
def __call__(self, x1, x2, scale, offset=None, bias=None):
return _convert_stub(pyboost_quant_batch_matmul(self, [x1, x2, scale, offset, bias, self.transpose_x1, self.transpose_x2, self.dtype]))
[文档]class RandpermV2(Primitive):
r"""
.. code-block::
prim = ops.RandpermV2(seed, offset, dtype)
out = prim(n)
is equivalent to
.. code-block::
ops.randperm(n, seed, offset, dtype)
Refer to :func:`mindspore.ops.randperm` for more details.
"""
@prim_arg_register
def __init__(self, seed=0, offset=0, dtype=mstype.int64):
self._set_prim_arg("seed", type_it('RandpermV2', 'seed', seed, OpDtype.DT_TENSOR, OpDtype.DT_INT))
self._set_prim_arg("offset", type_it('RandpermV2', 'offset', offset, OpDtype.DT_TENSOR, OpDtype.DT_INT))
self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id)
def __call__(self, n):
return super().__call__(n, self.seed, self.offset, self.dtype)
[文档]class Range(Primitive):
r"""
.. code-block::
prim = ops.Range(maxlen)
out = prim(start, end, step)
is equivalent to
.. code-block::
ops.range(start, end, step, maxlen)
Refer to :func:`mindspore.ops.range` for more details.
"""
@prim_arg_register
def __init__(self, maxlen=1000000):
self._set_prim_arg("maxlen", maxlen)
def __call__(self, start, end, step):
return super().__call__(start, end, step, self.maxlen)
[文档]class RealDiv(Primitive):
r"""
Divides the first input tensor by the second input tensor in floating-point type element-wise.
Refer to :func:`mindspore.ops.div` for more details.
Inputs:
- **x** (Union[Tensor, Number, bool]) - The first input is a number or
a bool or a tensor whose data type is number or bool.
- **y** (Union[Tensor, Number, bool]) - The second input is a number or
a bool when the first input is a tensor or a tensor whose data type is number or bool.
Outputs:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
>>> realdiv = ops.RealDiv()
>>> output = realdiv(x, y)
>>> print(output)
[0.25 0.4 0.5 ]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y):
return super().__call__(x, y)
real_div_op=RealDiv()
[文档]class Real(Primitive):
r"""
.. code-block::
prim = ops.Real()
out = prim(input)
is equivalent to
.. code-block::
ops.real(input)
Refer to :func:`mindspore.ops.real` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
real_op=Real()
class ReciprocalGrad(Primitive):
r"""
Performs grad of Reciprocal operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y, dy):
return super().__call__(y, dy)
reciprocal_grad_op=ReciprocalGrad()
[文档]class Reciprocal(Primitive):
r"""
Returns reciprocal of a tensor element-wise.
.. math::
out_{i} = \frac{1}{x_{i}}
Inputs:
- **x** (Tensor) - The input tensor.
Outputs:
Tensor, has the same shape as the `x`.
Raises:
TypeError: If `x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> reciprocal = ops.Reciprocal()
>>> output = reciprocal(x)
>>> print(output)
[1. 0.5 0.25]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return _convert_stub(pyboost_reciprocal(self, [x]))
reciprocal_op=Reciprocal()
[文档]class ReduceAll(Primitive):
r"""
.. code-block::
prim = ops.ReduceAll(keep_dims)
out = prim(input, axis)
is equivalent to
.. code-block::
ops.all(input, axis, keep_dims)
Refer to :func:`mindspore.ops.all` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('axis', default=None),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, input, axis=None):
return _convert_stub(pyboost_reduce_all(self, [input, axis, self.keep_dims]))
[文档]class ReduceAny(Primitive):
r"""
Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by default. And also can
reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
Outputs:
Tensor, the dtype is bool.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the "logical or" of all elements in the input tensor.
- If `axis` is int, set as 2, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_3, ..., x_R)`.
- If `axis` is tuple(int), set as (2, 3), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [2, 3], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[True, False], [True, True]]))
>>> op = ops.ReduceAny(keep_dims=True)
>>> # case 1: Reduces a dimension by the "logical OR" of all elements in the dimension.
>>> output = op(x)
>>> print(output)
[[ True]]
>>> print(output.shape)
(1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = op(x, 0)
>>> print(output)
[[ True True]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = op(x, 1)
>>> print(output)
[[True]
[ True]]
>>> # case 4: input is a scalar.
>>> x = Tensor(True)
>>> op = ops.ReduceAny()
>>> output = op(x)
>>> print(output)
True
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
return _convert_stub(pyboost_reduce_any(self, [x, axis, self.keep_dims]))
[文档]class ReduceMax(Primitive):
r"""
Reduces a dimension of a tensor by the maximum value in this dimension, by default. And also can
reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Must be in the range [-r, r).
Outputs:
output(Tensor): has the same dtype as the `x`.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the maximum of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
- If `axis` is tuple(int) or list(int), set as (1, 2), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.ReduceMax(keep_dims=True)(x, 1)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by the maximum value of all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = ops.ReduceMax(keep_dims=True)(x, ())
>>> print(output)
[[[9.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = ops.ReduceMax(keep_dims=True)(x, 0)
>>> print(output)
[[[7. 7. 7. 7. 7. 7.]
[8. 8. 8. 8. 8. 8.]
[9. 9. 9. 9. 9. 9.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = ops.ReduceMax(keep_dims=True)(x, 1)
>>> print(output)
[[[3. 3. 3. 3. 3. 3.]]
[[6. 6. 6. 6. 6. 6.]]
[[9. 9. 9. 9. 9. 9.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = ops.ReduceMax(keep_dims=True)(x, 2)
>>> print(output)
[[[1.]
[2.]
[3.]]
[[4.]
[5.]
[6.]]
[[7.]
[8.]
[9.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
return super().__call__(x, axis, self.keep_dims)
[文档]class ReduceMean(Primitive):
r"""
Reduces a dimension of a tensor by averaging all elements in the dimension, by default. And also can reduce
a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by
controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Only constant value is allowed. Must be in the range [-r, r).
Outputs:
Tensor, has the same dtype as the `x`.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the mean of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
- If `axis` is tuple(int) or list(int), set as (1, 2), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = ops.ReduceMean(keep_dims=True)
>>> output = op(x, 1)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by averaging all elements in the dimension.
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
... mindspore.float32)
>>> output = op(x)
>>> print(output)
[[[5.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along the axis 0
>>> output = op(x, 0)
>>> print(output)
[[[4. 4. 4. 4. 4. 4.]
[5. 5. 5. 5. 5. 5.]
[6. 6. 6. 6. 6. 6.]]]
>>> # case 3: Reduces a dimension along the axis 1
>>> output = op(x, 1)
>>> print(output)
[[[2. 2. 2. 2. 2. 2.]]
[[5. 5. 5. 5. 5. 5.]]
[[8. 8. 8. 8. 8. 8.]]]
>>> # case 4: Reduces a dimension along the axis 2
>>> output = op(x, 2)
>>> print(output)
[[[ 2.]
[ 2.]
[ 2.]]
[[ 4.]
[ 5.]
[ 6.]]
[[ 6.]
[ 8.]
[10.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
return super().__call__(x, axis, self.keep_dims)
[文档]class ReduceMin(Primitive):
r"""
Reduces a dimension of a tensor by the minimum value in the dimension, by default. And also can
reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Only constant value is allowed. Must be in the range [-r, r).
Outputs:
Tensor, has the same dtype as the `x`.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the minimum of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = ops.ReduceMin(keep_dims=True)
>>> output = op(x, 1)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by the minimum value of all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = op(x)
>>> print(output)
[[[1.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = op(x, 0)
>>> print(output)
[[[1. 1. 1. 1. 1. 1.]
[2. 2. 2. 2. 2. 2.]
[3. 3. 3. 3. 3. 3.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = op(x, 1)
>>> print(output)
[[[1. 1. 1. 1. 1. 1.]]
[[4. 4. 4. 4. 4. 4.]]
[[7. 7. 7. 7. 7. 7.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = op(x, 2)
>>> print(output)
[[[1.]
[2.]
[3.]]
[[4.]
[5.]
[6.]]
[[7.]
[8.]
[9.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
return super().__call__(x, axis, self.keep_dims)
[文档]class ReduceProd(Primitive):
r"""
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Only constant value is allowed. Must be in the range [-r, r).
Outputs:
Tensor, has the same dtype as the `x`.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the product of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = ops.ReduceProd(keep_dims=True)
>>> output = op(x, 1)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = op(x)
>>> print(output)
[[[2.2833798e+33]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = op(x, 0)
>>> print(output)
[[[ 28. 28. 28. 28. 28. 28.]
[ 80. 80. 80. 80. 80. 80.]
[162. 162. 162. 162. 162. 162.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = op(x, 1)
>>> print(output)
[[[ 6. 6. 6. 6. 6. 6.]]
[[120. 120. 120. 120. 120. 120.]]
[[504. 504. 504. 504. 504. 504.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = op(x, 2)
>>> print(output)
[[[1.00000e+00]
[6.40000e+01]
[7.29000e+02]]
[[4.09600e+03]
[1.56250e+04]
[4.66560e+04]]
[[1.17649e+05]
[2.62144e+05]
[5.31441e+05]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
return super().__call__(x, axis, self.keep_dims)
class ReduceStd(Primitive):
r"""
Returns the standard-deviation and mean of the input Tensor along
dimension(s) specified by `axis`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
Default: ``()`` , reduce all dimensions. Only constant value is allowed.
Let `r` be rank of `input_x`, it should be in the range :math:`[-r,r)`.
unbiased (bool, optional): Whether to use Bessel's correction.
If ``True`` , will use the Bessel correction unbiased estimation.
If ``False`` , will through the biased estimation to calculate the standard deviation.
Default: ``True`` .
keep_dims (bool, optional): Whether the output Tensor has dim retained or not.
If ``True`` , keep these reduced dimensions specified by `axis` and the length is 1.
If ``False`` , don't keep these dimensions.
Default: ``Fasle`` .
Inputs:
- **input_x** (Tensor[Number]) - The input Tensor with shape
:math:`(N, *)` where :math:`*` means any number of additional dimensions.
Supported dtypes: float16, float32.
Outputs:
Tuple(output_std, output_mean) containing the standard deviation and mean.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `input_x` is not a Tensor.
ValueError: If `axis` is not one of the following: int, tuple, list or Tensor.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import ops, Tensor
>>> input_x = Tensor(np.array([[1, 2, 3], [-1, 1, 4]]).astype(np.float32))
>>> op = ops.ReduceStd(axis=1, unbiased=True, keep_dims=False)
>>> output = op(input_x)
>>> output_std, output_mean = output[0], output[1]
>>> print(output_std)
[1. 2.5166113]
>>> print(output_mean)
[2. 1.3333334]
"""
@prim_arg_register
def __init__(self, axis=[], unbiased=True, keep_dims=False):
self._set_prim_arg("axis", type_it('ReduceStd', 'axis', axis, (OpDtype.DT_INT, OpDtype.DT_LIST_INT, OpDtype.DT_TENSOR), OpDtype.DT_TUPLE_INT))
self._set_prim_arg("unbiased", unbiased)
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x):
return super().__call__(x, self.axis, self.unbiased, self.keep_dims)
[文档]class ReduceSum(Primitive):
r"""
Reduces a dimension of a tensor by summing all elements in the dimension, by default. And also can reduce a
dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by
controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
skip_mode (bool): If ``True`` and `axis` is empty tuple or empty list,
the ReduceSum operation isn't performed, skip it.
If ``True`` and `axis` is other values, the ReduceSum calculation is performed normally.
If ``False`` , do reduce. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions when `skip_mode` is ``False`` . Only constant value is allowed. Must be in the range
[-rank(`x`), rank(`x`)).
Outputs:
Tensor, has the same dtype as the `x`.
- If `axis` is ``()`` , `keep_dims` is ``False`` , and `skip_mode` is ``False`` ,
the output is a 0-D tensor representing the sum of all elements in the input tensor.
- If `axis` is ``()`` , and `skip_mode` is ``True`` ,
the ReduceSum operation is not performed, output tensor is equal to the input tensor.
- If `axis` is int, set as 2, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_3, ..., x_R)`.
- If `axis` is tuple(int) or list(int), set as (2, 3), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [2, 3], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `skip_mode` is not a bool.
TypeError: If `x` is not a Tensor.
ValueError: If `axis` is None.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = ops.ReduceSum(keep_dims=True)
>>> output = op(x, 1)
>>> output.shape
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by summing all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = op(x)
>>> print(output)
[[[270.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = op(x, 0)
>>> print(output)
[[[12. 12. 12. 12. 12. 12.]
[15. 15. 15. 15. 15. 15.]
[18. 18. 18. 18. 18. 18.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = op(x, 1)
>>> print(output)
[[[ 6. 6. 6. 6. 6. 6.]]
[[15. 15. 15. 15. 15. 15.]]
[[24. 24. 24. 24. 24. 24.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = op(x, 2)
>>> print(output)
[[[ 6.]
[12.]
[18.]]
[[24.]
[30.]
[36.]]
[[42.]
[48.]
[54.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False, skip_mode=False):
self._set_prim_arg("keep_dims", keep_dims)
self._set_prim_arg("skip_mode", skip_mode)
def __call__(self, x, axis=()):
return super().__call__(x, axis, self.keep_dims, self.skip_mode)
class ReflectionPad1DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
return _convert_stub(pyboost_reflection_pad_1d_grad(self, [grad_output, input, padding]))
reflection_pad_1d_grad_op=ReflectionPad1DGrad()
class ReflectionPad1D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
return _convert_stub(pyboost_reflection_pad_1d(self, [input, padding]))
reflection_pad_1d_op=ReflectionPad1D()
class ReflectionPad2DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
return _convert_stub(pyboost_reflection_pad_2d_grad(self, [grad_output, input, padding]))
reflection_pad_2d_grad_op=ReflectionPad2DGrad()
class ReflectionPad2D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
return _convert_stub(pyboost_reflection_pad_2d(self, [input, padding]))
reflection_pad_2d_op=ReflectionPad2D()
class ReflectionPad3DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
return _convert_stub(pyboost_reflection_pad_3d_grad(self, [grad_output, input, padding]))
reflection_pad_3d_grad_op=ReflectionPad3DGrad()
class ReflectionPad3D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
return _convert_stub(pyboost_reflection_pad_3d(self, [input, padding]))
reflection_pad_3d_op=ReflectionPad3D()
class ReLU6Grad(Primitive):
r"""
Computes gradient for the ReLU6 activation.
Args:
y_backprop (Tensor): Input gradients tensor, has the same dtype and shape as `x`.
x (Tensor): Origin input tensor.
Returns:
Tensor, has the same dtype and shape as `x`.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y_backprop, x):
return super().__call__(y_backprop, x)
relu6_grad_op=ReLU6Grad()
[文档]class ReLU6(Primitive):
r"""
.. code-block::
prim = ops.ReLU6()
out = prim(x)
is equivalent to
.. code-block::
ops.relu6(x)
Refer to :func:`mindspore.ops.relu6` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
relu6_op=ReLU6()
class ReluGrad(Primitive):
r"""
Computes gradient for the ReLU activation.
Args:
y_backprop (Tensor): Input gradients tensor, has the same dtype and shape as `x`.
x (Tensor): Origin input tensor.
Returns:
Tensor, has the same dtype and shape as `x`.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y_backprop, x):
return _convert_stub(pyboost_relu_grad(self, [y_backprop, x]))
relu_grad_op=ReluGrad()
[文档]class ReLU(Primitive):
r"""
.. code-block::
prim = ops.ReLU()
out = prim(input)
is equivalent to
.. code-block::
ops.relu(input)
Refer to :func:`mindspore.ops.relu` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_relu(self, [input]))
relu_op=ReLU()
class RepeatInterleave(Primitive):
r"""
.. code-block::
prim = ops.RepeatInterleave()
out = prim(input, repeats, axis, output_size)
is equivalent to
.. code-block::
ops.repeat_interleave(input, repeats, axis, output_size)
Refer to :func:`mindspore.ops.repeat_interleave` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('repeats'),
sig.make_sig('axis', default=None),
sig.make_sig('output_size', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, repeats, axis=None, output_size=None):
return _convert_stub(pyboost_repeat_interleave(self, [input, repeats, axis, output_size]))
repeat_interleave_op=RepeatInterleave()
class ReplicationPad1DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
return _convert_stub(pyboost_replication_pad_1d_grad(self, [grad_output, input, padding]))
replication_pad_1d_grad_op=ReplicationPad1DGrad()
class ReplicationPad1D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
return _convert_stub(pyboost_replication_pad_1d(self, [input, padding]))
replication_pad_1d_op=ReplicationPad1D()
class ReplicationPad2DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
return _convert_stub(pyboost_replication_pad_2d_grad(self, [grad_output, input, padding]))
replication_pad_2d_grad_op=ReplicationPad2DGrad()
class ReplicationPad2D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
return _convert_stub(pyboost_replication_pad_2d(self, [input, padding]))
replication_pad_2d_op=ReplicationPad2D()
class ReplicationPad3DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
return _convert_stub(pyboost_replication_pad_3d_grad(self, [grad_output, input, padding]))
replication_pad_3d_grad_op=ReplicationPad3DGrad()
class ReplicationPad3D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
return _convert_stub(pyboost_replication_pad_3d(self, [input, padding]))
replication_pad_3d_op=ReplicationPad3D()
class ReshapeAndCache(Primitive):
r"""
.. code-block::
prim = ops.ReshapeAndCache()
out = prim(key, value, key_cache, value_cache, slot_mapping)
is equivalent to
.. code-block::
ops.reshape_and_cache(key, value, key_cache, value_cache, slot_mapping)
Refer to :func:`mindspore.ops.reshape_and_cache` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('key', dtype=sig.sig_dtype.T),
sig.make_sig('value', dtype=sig.sig_dtype.T),
sig.make_sig('key_cache', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('value_cache', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('slot_mapping', dtype=sig.sig_dtype.T1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, key, value, key_cache, value_cache, slot_mapping):
return super().__call__(key, value, key_cache, value_cache, slot_mapping)
reshape_and_cache_op=ReshapeAndCache()
[文档]class Reshape(Primitive):
r"""
.. code-block::
prim = ops.Reshape()
out = prim(input, shape)
is equivalent to
.. code-block::
ops.reshape(input, shape)
Refer to :func:`mindspore.ops.reshape` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, shape):
return super().__call__(input, shape)
reshape_op=Reshape()
class ResizeBicubicGrad(Primitive):
r"""
Computes gradients for ResizeBicubicGrad operation.
Args:
grads (Tensor): A Tensor of type float. 4-D with shape [batch, height, width, channels]. The format must be NHWC.
image (Tensor): A Tensor. Must be one of the following types: float, double.
4-D with shape [batch, orig_height, orig_width, channels], The origin image tensor that was resized.
The format must be NHWC.
align_corners (bool): If true, the centers of the 4 corner pixels of the input and output tensors are
aligned, preserving the values at the corner pixels.Default: ``False``.
half_pixel_centers (bool): An optional bool. Default: ``False``.
Outputs:
A 4-D Tensor , with the same shape and data type as `image`.
Rasise:
TypeError: If `grads` is not allowed.
TypeError: If `image` is not allowed.
ValueError: If `image` dim is not 4.
ValueError: If `size` dim is not 4.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, grads, image):
return super().__call__(grads, image, self.align_corners, self.half_pixel_centers)
[文档]class ResizeBicubic(Primitive):
r"""
Resize images to size using bicubic interpolation.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
align_corners (bool, optional): If ``True`` , the centers of the 4 corner pixels of the input
and output tensors are aligned, preserving the values at the corner pixels. Default: ``False`` .
half_pixel_centers (bool, optional): Whether to use half-pixel center alignment. If set to ``True`` ,
`align_corners` should be ``False`` . Default: ``False`` .
Inputs:
- **images** (Tensor) - The input image must be a 4-D tensor of shape :math:`(batch, channels, height, width)`.
The format must be NCHW. Types allowed: float16, float32, float64.
- **size** (Union[tuple[int], Tensor[int]]) - A 1-D tensor or tuple with 2 elements: new_height, new_width. Besides, tuple[int] is recommended.
Outputs:
A 4-D tensor with shape :math:`(batch, channels, new\_height, new\_width)` whose dtype is the same as `images` .
Raises:
TypeError: If the type of `images` is not allowed.
TypeError: If the type of `align_corners` is not bool.
TypeError: If the type of `half_pixel_centers` is not bool.
ValueError: If the dim of `images` is not 4.
ValueError: If the dim of `size` is not 1 when `size` is a tensor.
ValueError: If the number of elements in `size` is not 2.
ValueError: If any value of `size` is not positive.
ValueError: If the values of `align_corners` and `half_pixel_centers` are both ``True`` .
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops, nn
>>> class NetResizeBicubic(nn.Cell):
... def __init__(self):
... super(NetResizeBicubic, self).__init__()
... align_corners = False
... half_pixel_centers = False
... self.resize = ops.ResizeBicubic(align_corners, half_pixel_centers)
...
... def construct(self, images, size):
... return self.resize(images, size)
...
>>> images = Tensor(np.array([1, 2, 3, 4]).reshape(1, 1, 2, 2).astype(np.float32))
>>> size = Tensor([1, 4], mindspore.int32)
>>> resizebicubic = NetResizeBicubic()
>>> output = resizebicubic(images, size)
>>> print(output)
[[[[1. 1.5 2. 2.09375]]]]
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, image, size):
return super().__call__(image, size, self.align_corners, self.half_pixel_centers)
class ResizeBilinearGrad(Primitive):
r"""
Performs grad of ResizeBilinear operation.
Args:
grads (Tensor): A 4-D Tensor with shape [batch, channel, height, width].
image (Tensor): A 4-D Tensor with shape [batch, channel, height, width], The origin image tensor that was resized.
align_corners (bool): If true, the centers of the 4 corner pixels of the input and output tensors are
aligned, preserving the values at the corner pixels.Default: ``False``.
half_pixel_centers (bool): An optional bool. Default: ``False``.
Outputs:
A 4-D Tensor , with the same shape and data type as `image`.
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, grads, image):
return super().__call__(grads, image, self.align_corners, self.half_pixel_centers)
[文档]class ResizeBilinearV2(Primitive):
r"""
Resizes an image to a certain size using the bilinear interpolation.
The resizing only affects the lower two dimensions which represent the height and width.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
align_corners (bool, optional): If ``True`` , rescale input by :math:`(new\_height - 1) / (height - 1)`,
which exactly aligns the 4 corners of images and resized images. If ``False`` ,
rescale by :math:`new\_height / height`. Default: ``False`` .
half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` , `align_corners` should be
``False`` . Default: ``False`` .
Inputs:
- **x** (Tensor) - Image to be resized. Input images must be a 4-D tensor with shape
:math:`(batch, channels, height, width)`, with data type of float32 or float16.
- **size** (Union[tuple[int], list[int], Tensor]) - The new size of the images.
A tuple or list or Tensor of 2 int elements :math:`(new\_height, new\_width)`.
Outputs:
Tensor, resized image. 4-D with shape :math:`(batch, channels, new\_height, new\_width)`,
with the same data type as input `x`.
Raises:
TypeError: If `align_corners` is not a bool.
TypeError: If `half_pixel_centers` is not a bool.
TypeError: If `align_corners` and `half_pixel_centers` are all ``True`` .
ValueError: If `half_pixel_centers` is ``True`` and device_target is CPU.
ValueError: If dim of `x` is not 4.
ValueError: If `size` is Tensor and its dim is not 1.
ValueError: If `size` contains other than 2 elements.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> x = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
>>> output = ops.ResizeBilinearV2()(x, (5, 5))
>>> print(output)
[[[[1. 2. 3. 4. 5.]
[1. 2. 3. 4. 5.]
[1. 2. 3. 4. 5.]
[1. 2. 3. 4. 5.]
[1. 2. 3. 4. 5.]]]]
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, image, size):
return super().__call__(image, size, self.align_corners, self.half_pixel_centers)
class ResizeLinear1DGrad(Primitive):
r"""
Compute gradient of `ResizeLinear1D` operator.
.. warning::
This is an experimental API that is subject to change.
Args:
grads (Tensor): A Tensor of type float. 3-D with shape [batch, channel, width].
x (Tensor): A origin input Tensor. 3-D with shape [batch, channel, orig_width], The origin tensor that was resized.
coordinate_transformation_mode (string): Default is 'align_corners'. Describes how to transform the coordinate
in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'.
"""
@prim_arg_register
def __init__(self, coordinate_transformation_mode='align_corners'):
self._set_prim_arg_with_handler("coordinate_transformation_mode", coordinate_transformation_mode, str_to_enum)
def __call__(self, grads, x):
return super().__call__(grads, x, self.coordinate_transformation_mode)
class ResizeLinear1D(Primitive):
r"""
Using the linear interpolate method resize the input tensor 'x'.
For general resize, refer to :func:`mindspore.ops.interpolate` for more details.
.. warning::
- This is an experimental API that is subject to change.
- Currently, the Ascend platform only supports scenarios where the input `size` is Tuple or List.
Args:
coordinate_transformation_mode (str): Default is ``'align_corners'`` . Describes how to transform the
coordinate in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'.
Inputs:
- **x** (Tensor) - A 3-D tensor which to resize, with shape [batch, channel, width]. Must be one of the
following types: float16, float32, float64.
- **size** (Union[Tuple[int], List[int], Tensor[int]]) - describes the new width of `x` .
A tuple or list or 1-D tensor with only one int element :math:`(new\_width)`.
Outputs:
A 3-D tensor which shape is [batch, channel, new_width] with the same type as `x`.
Raises:
TypeError: If dtype of `x` is not in the support list.
TypeError: If `size` is not in Union[Tuple[int], List[int], Tensor[int]].
TypeError: If `coordinate_transformation_mode` is not a string.
TypeError: If `coordinate_transformation_mode` is not in the support list.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor([[[1, 2, 3], [4, 5, 6]]], mindspore.float32)
>>> size = (6,)
>>> resize_linear_1d = ops.ResizeLinear1D(coordinate_transformation_mode="align_corners")
>>> output = resize_linear_1d(x, size)
>>> print(output)
[[[1. 1.4 1.8 2.2 2.6 3.]
[4. 4.4 4.8 5.2 5.6 6.]]]
"""
@prim_arg_register
def __init__(self, coordinate_transformation_mode='align_corners'):
self._set_prim_arg_with_handler("coordinate_transformation_mode", coordinate_transformation_mode, str_to_enum)
def __call__(self, x, size):
return super().__call__(x, size, self.coordinate_transformation_mode)
class ResizeNearestNeighborGrad(Primitive):
r"""
Compute gradient of `ResizeNearestNeighbor` operator.
Note:
The shape of input parameter `size` must be (height, width).
Inputs:
- **align_corners** (bool) - Whether the centers of the 4 corner pixels of the input
and output tensors are aligned. Default: ``False``.
- **half_pixel_centers** (bool, optional) - Whether half pixel center. If set to ``True``,
`align_corners` should be False. Default: ``False``.
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, grads, size):
return super().__call__(grads, size, self.align_corners, self.half_pixel_centers)
[文档]class ResizeNearestNeighbor(Primitive):
r"""
Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest
neighbor algorithm selects the value of the nearest point and does not consider the
values of neighboring points at all, yielding a piecewise-constant interpolant.
Args:
size (Union[tuple, list]): The target size. The dimension of size must be 2.
align_corners (bool): Whether the centers of the 4 corner pixels of the input and output tensors are aligned.
Default: ``False`` .
half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` ,
`align_corners` should be False. Default: ``False`` .
Inputs:
- **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, C, NEW\_H, NEW\_W)`.
The data type is the same as the `input_x`.
Raises:
TypeError: If `size` is neither tuple nor list.
TypeError: If `align_corners` is not a bool.
ValueError: If length of `size` is not equal to 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)
>>> size = (2, 2)
>>> output = ops.ResizeNearestNeighbor(size=size)(input_tensor)
>>> print(output)
[[[[-0.1 0.3]
[ 0.4 0.5]]]]
"""
@prim_arg_register
def __init__(self, size, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("size", type_it('ResizeNearestNeighbor', 'size', size, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT))
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, input_x):
return super().__call__(input_x, self.size, self.align_corners, self.half_pixel_centers)
class ResizeNearestNeighborV2Grad(Primitive):
r"""
Compute gradient of `ResizeNearestNeighborV2` operator.
Args:
grads (Tensor): A 4-D Tensor with shape [batch, channel, height, width].
size (Union[tuple[int], Tensor]): The size for the input image. 2 elements: [`height, width`].
align_corners (bool): Whether the centers of the 4 corner pixels of the input
and output tensors are aligned. Default: ``False``.
half_pixel_centers (bool): Default: ``False``.
Outputs:
A 4-D Tensor , with the same shape and data type as `image`.
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, grads, size):
return super().__call__(grads, size, self.align_corners, self.half_pixel_centers)
class ResizeNearestNeighborV2(Primitive):
r"""
Resizes the input tensor to specific size by using the nearest neighbor algorithm.
The nearest neighbor algorithm selects the value of the nearest point and does not consider the
values of neighboring points at all, yielding a piecewise-constant interpolant.
Args:
align_corners (bool, optional): If ``True`` , the centers of the 4 corner pixels of the input and output
tensors are aligned, preserving the values at the corner pixels. Default: ``False`` .
half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` ,
`align_corners` should be False. Default: ``False`` .
Inputs:
- **x** (Tensor) - 4-D with shape :math:`(batch, channels, height, width)` .
- **size** (Tensor) - The new size for the images. A 1-D int32 Tensor
of 2 elements: [`new_height, new_width`].
Outputs:
- **y** (Tensor) - The resized images. A 4-D with shape
:math:`(batch, channels, new\_height, new\_width)`. It has the same dtype as `x`.
Raises:
TypeError: If `x` or `size` is not a Tensor.
TypeError: If the data type of `size` is not int32.
TypeError: If `align_corners` or `half_pixel_centers` is not bool.
ValueError: If any value of `size` is non positive.
ValueError: If the dimension of `x` is not 4.
ValueError: If the dimension of `size` is not 1.
ValueError: If the elements number of `size` is not 2.
ValueError: If attr `half_pixel_centers` and `align_corners` are True at the same time.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> input_tensor = Tensor(np.ones((1, 1, 4, 4)), mstype.float32)
>>> size = Tensor([2, 2], mstype.int32)
>>> resize = ops.ResizeNearestNeighborV2()
>>> output = resize(input_tensor, size)
>>> print(output)
[[[[1. 1.]
[1. 1.]]]]
>>> print(output.shape)
(1, 1, 2, 2)
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, image, size):
return super().__call__(image, size, self.align_corners, self.half_pixel_centers)
[文档]class ReverseV2(Primitive):
r"""
.. code-block::
prim = ops.ReverseV2(axis)
out = prim(input)
is equivalent to
.. code-block::
ops.flip(input, axis)
Refer to :func:`mindspore.ops.flip` for more details.
"""
@prim_arg_register
def __init__(self, axis):
self._set_prim_arg("axis", type_it('ReverseV2', 'axis', axis, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT))
def __call__(self, input):
return _convert_stub(pyboost_reverse_v2(self, [input, self.axis]))
class RFFTGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input1'),
sig.make_sig('input2'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input1, input2, n=None, dim=-1, norm=None):
return super().__call__(input1, input2, n, dim, norm if norm is None else str_to_enum('RFFTGrad', 'norm', norm))
rfft_grad_op=RFFTGrad()
class RFFT(Primitive):
r"""
.. code-block::
prim = ops.RFFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.rfft(input, n, dim, norm)
Refer to :func:`mindspore.ops.rfft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('RFFT', 'norm', norm))
rfft_op=RFFT()
[文档]class RightShift(Primitive):
r"""
Shift the value of each position of Tensor `input_x` to the right by corresponding bits in Tensor `input_y`.
The inputs are two tensors, dtypes of them must be consistent, and the
shapes of them could be broadcast.
.. math::
\begin{aligned}
&out_{i} =x_{i} >> y_{i}
\end{aligned}
.. warning::
This is an experimental API that is subject to change or deletion.
Inputs:
- **input_x** (Tensor) - The target tensor, will be shifted to the right
by `input_y` bits element-wise. Support all int and uint types.
- **input_y** (Tensor) - Number of bits shifted, the tensor must have the same type as `input_x`.
Outputs:
- **output** (Tensor) - The output tensor, has the same type as `input_x`.
Raises:
TypeError: If `input_x` or `input_y` is not tensor.
TypeError: If `input_x` and `input_y` could not be broadcast.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([1, 2, 3]).astype(np.uint8))
>>> input_y = Tensor(np.array([1, 1, 1]).astype(np.uint8))
>>> output = ops.RightShift()(input_x, input_y)
>>> print(output)
[0 1 1]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, input_y):
return super().__call__(input_x, input_y)
right_shift_op=RightShift()
class Roll(Primitive):
r"""
Rolls the elements of a tensor along an axis.
Refer to :func:`mindspore.ops.roll` for more details.
Args:
shift (Union[list(int), tuple(int), int]): Specifies the number of places by which elements are shifted
positively (towards larger indices) along the specified dimension. Negative shifts will roll the elements
in the opposite direction.
axis (Union[list(int), tuple(int), int]): Specifies the dimension indexes of shape to be rolled.
Inputs:
- **input_x** (Tensor) - Input tensor.
Outputs:
Tensor, has the same shape and type as `input_x`.
Supported Platforms:
``GPU``
Examples:
>>> input_x = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32))
>>> op = ops.Roll(shift=2, axis=0)
>>> output = op(input_x)
>>> print(output)
[3. 4. 0. 1. 2.]
>>> input_x = Tensor(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]).astype(np.float32))
>>> op = ops.Roll(shift=-1, axis=0)
>>> output = op(input_x)
>>> print(output)
[[5. 6. 7. 8. 9.]
[0. 1. 2. 3. 4.]]
"""
@prim_arg_register
def __init__(self, shift, axis):
self._set_prim_arg("shift", type_it('Roll', 'shift', shift, (OpDtype.DT_INT, OpDtype.DT_LIST_INT), OpDtype.DT_TUPLE_INT))
self._set_prim_arg("axis", type_it('Roll', 'axis', axis, (OpDtype.DT_INT, OpDtype.DT_LIST_INT), OpDtype.DT_TUPLE_INT))
def __call__(self, input):
return super().__call__(input, self.shift, self.axis)
[文档]class Round(Primitive):
r"""
.. code-block::
prim = ops.Round()
out = prim(input)
is equivalent to
.. code-block::
ops.round(input)
Refer to :func:`mindspore.ops.round` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
round_op=Round()
class RsqrtGrad(Primitive):
r"""
Computes gradients for the Rsqrt.
Args:
y_backprop (Tensor): Input gradients tensor, has the same dtype and shape as `x`.
x (Tensor): Origin input tensor.
Returns:
Tensor, has the same dtype and shape as `x`.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y_backprop, x):
return super().__call__(y_backprop, x)
rsqrt_grad_op=RsqrtGrad()
[文档]class Rsqrt(Primitive):
r"""
.. code-block::
prim = ops.Rsqrt()
out = prim(input)
is equivalent to
.. code-block::
ops.rsqrt(input)
Refer to :func:`mindspore.ops.rsqrt` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_rsqrt(self, [input]))
rsqrt_op=Rsqrt()
class ScalarCast(Primitive):
r"""
.. code-block::
prim = ops.ScalarCast()
out = prim(input_x, input_y)
is equivalent to
.. code-block::
ops.scalar_cast(input_x, input_y)
Refer to :func:`mindspore.ops.scalar_cast` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, input_y):
return super().__call__(input_x, dtype_to_type_id('ScalarCast', 'input_y', input_y))
scalar_cast_op=ScalarCast()
class ScatterAddExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, index, src):
return _convert_stub(pyboost_scatter_add_ext(self, [input, dim, index, src]))
scatter_add_ext_op=ScatterAddExt()
[文档]class ScatterNd(Primitive):
r"""
.. code-block::
prim = ops.ScatterNd()
out = prim(indices, updates, shape)
is equivalent to
.. code-block::
ops.scatter_nd(indices, updates, shape)
Refer to :func:`mindspore.ops.scatter_nd` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, indices, updates, shape):
return super().__call__(indices, updates, shape)
scatter_nd_op=ScatterNd()
class Scatter(Primitive):
r"""
reverse operation of gather
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, index, src, reduce):
return _convert_stub(pyboost_scatter(self, [input, dim, index, src, reduce]))
scatter_op=Scatter()
[文档]class Select(Primitive):
r"""
.. code-block::
prim = ops.Select()
out = prim(condition, input, other)
is equivalent to
.. code-block::
ops.select(condition, input, other)
Refer to :func:`mindspore.ops.select` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, condition, input, other):
return _convert_stub(pyboost_select(self, [condition, input, other]))
select_op=Select()
class SequenceConcat(Primitive):
r"""
.. code-block::
prim = ops.SequenceConcat(axis)
out = prim(x)
is equivalent to
.. code-block::
ops.sequence_concat(x, axis)
Refer to :func:`mindspore.ops.sequence_concat` for more details.
"""
@prim_arg_register
def __init__(self, axis=0):
self._set_prim_arg("axis", axis)
def __call__(self, x):
return super().__call__(x, self.axis)
class SigmoidGrad(Primitive):
r"""
Gets the gradient of Sigmoid operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y, dy):
return _convert_stub(pyboost_sigmoid_grad(self, [y, dy]))
sigmoid_grad_op=SigmoidGrad()
[文档]class Sigmoid(Primitive):
r"""
.. code-block::
prim = ops.Sigmoid()
out = prim(input)
is equivalent to
.. code-block::
ops.sigmoid(input)
Refer to :func:`mindspore.ops.sigmoid` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_sigmoid(self, [input]))
sigmoid_op=Sigmoid()
class SiLUGrad(Primitive):
r"""
Performs grad of SiLU operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, x):
return _convert_stub(pyboost_silu_grad(self, [dout, x]))
silu_grad_op=SiLUGrad()
class SiLU(Primitive):
r"""
.. code-block::
prim = ops.SiLU()
out = prim(input)
is equivalent to
.. code-block::
ops.silu(input)
Refer to :func:`mindspore.ops.silu` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_silu(self, [input]))
silu_op=SiLU()
[文档]class Sin(Primitive):
r"""
.. code-block::
prim = ops.Sin()
out = prim(input)
is equivalent to
.. code-block::
ops.sin(input)
Refer to :func:`mindspore.ops.sin` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_sin(self, [input]))
sin_op=Sin()
[文档]class Sinc(Primitive):
r"""
.. code-block::
prim = ops.Sinc()
out = prim(input)
is equivalent to
.. code-block::
ops.sinc(input)
Refer to :func:`mindspore.ops.sinc` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
sinc_op=Sinc()
[文档]class Sinh(Primitive):
r"""
.. code-block::
prim = ops.Sinh()
out = prim(input)
is equivalent to
.. code-block::
ops.sinh(input)
Refer to :func:`mindspore.ops.sinh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
sinh_op=Sinh()
class SliceExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, start, end, step):
return _convert_stub(pyboost_slice_ext(self, [input, dim, start, end, step]))
slice_ext_op=SliceExt()
class SoftmaxBackward(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('out'),
sig.make_sig('dim', default=-1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, out, dim=-1):
return _convert_stub(pyboost_softmax_backward(self, [dout, out, dim]))
softmax_backward_op=SoftmaxBackward()
[文档]class Softmax(Primitive):
r"""
Applies the Softmax operation to the input tensor on the specified axis.
Refer to :func:`mindspore.ops.softmax` for more details.
Args:
axis (Union[int, tuple], optional): The axis to perform the Softmax operation. Default: ``-1`` .
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of
additional dimensions.
Outputs:
Tensor, with the same type and shape as the input.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softmax = ops.Softmax()
>>> output = softmax(input)
>>> print(output)
[0.01165623 0.03168492 0.08612854 0.23412167 0.6364086 ]
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", type_it('Softmax', 'axis', axis, OpDtype.DT_INT, OpDtype.DT_TUPLE_INT))
def __call__(self, input):
return _convert_stub(pyboost_softmax(self, [input, self.axis]))
class SoftplusExt(Primitive):
r"""
.. code-block::
prim = ops.SoftplusExt()
out = prim(input, beta, threshold)
is equivalent to
.. code-block::
ops.softplus_ext(input, beta, threshold)
Refer to :func:`mindspore.ops.softplus_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('beta', default=1),
sig.make_sig('threshold', default=20),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, beta=1, threshold=20):
return _convert_stub(pyboost_softplus_ext(self, [input, beta, threshold]))
softplus_ext_op=SoftplusExt()
class SoftplusGradExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('x'),
sig.make_sig('beta', default=1),
sig.make_sig('threshold', default=20),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, x, beta=1, threshold=20):
return _convert_stub(pyboost_softplus_grad_ext(self, [dout, x, beta, threshold]))
softplus_grad_ext_op=SoftplusGradExt()
class SolveTriangular(Primitive):
r"""
.. code-block::
prim = ops.SolveTriangular()
out = prim(a, b, trans, lower, unit_diagonal)
is equivalent to
.. code-block::
ops.solve_triangular(a, b, trans, lower, unit_diagonal)
Refer to :func:`mindspore.ops.solve_triangular` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('a'),
sig.make_sig('b'),
sig.make_sig('trans', default=0),
sig.make_sig('lower', default=False),
sig.make_sig('unit_diagonal', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, a, b, trans=0, lower=False, unit_diagonal=False):
return super().__call__(a, b, trans, lower, unit_diagonal)
solve_triangular_op=SolveTriangular()
[文档]class Split(Primitive):
r"""
Splits the input tensor into output_num of tensors along the given axis and output numbers.
Refer to :func:`mindspore.ops.split` for more details.
Args:
axis (int): Index of the split position. Default: ``0`` .
output_num (int): The number of output tensors. Must be positive int. Default: ``1`` .
Inputs:
- **input_x** (Tensor) - The shape of tensor is :math:`(x_0, x_1, ..., x_{R-1})`, R >= 1.
Outputs:
tuple[Tensor], the shape of each output tensor is the same, which is
:math:`(x_0, x_1, ..., x_{axis}/{output\_num}, ..., x_{R-1})`.
And the data type is the same as `input_x`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> split = ops.Split(1, 2)
>>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)
>>> print(x)
[[1 1 1 1]
[2 2 2 2]]
>>> output = split(x)
>>> print(output)
(Tensor(shape=[2, 2], dtype=Int32, value=
[[1, 1],
[2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value=
[[1, 1],
[2, 2]]))
>>> split = ops.Split(1, 4)
>>> output = split(x)
>>> print(output)
(Tensor(shape=[2, 1], dtype=Int32, value=
[[1],
[2]]), Tensor(shape=[2, 1], dtype=Int32, value=
[[1],
[2]]), Tensor(shape=[2, 1], dtype=Int32, value=
[[1],
[2]]), Tensor(shape=[2, 1], dtype=Int32, value=
[[1],
[2]]))
"""
@prim_arg_register
def __init__(self, axis=0, output_num=1):
self._set_prim_arg("axis", axis)
self._set_prim_arg("output_num", output_num)
def __call__(self, input_x):
return super().__call__(input_x, self.axis, self.output_num)
class SplitTensor(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input_x'),
sig.make_sig('split_int'),
sig.make_sig('axis', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, split_int, axis=0):
return _convert_stub(pyboost_split_tensor(self, [input_x, split_int, axis]))
split_tensor_op=SplitTensor()
class SplitWithSize(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input_x'),
sig.make_sig('split_sections'),
sig.make_sig('axis', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, split_sections, axis=0):
return _convert_stub(pyboost_split_with_size(self, [input_x, split_sections, axis]))
split_with_size_op=SplitWithSize()
class SqrtGrad(Primitive):
r"""
Performs grad of Sqrt operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, y):
return super().__call__(dy, y)
sqrt_grad_op=SqrtGrad()
[文档]class Sqrt(Primitive):
r"""
.. code-block::
prim = ops.Sqrt()
out = prim(x)
is equivalent to
.. code-block::
ops.sqrt(x)
Refer to :func:`mindspore.ops.sqrt` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return _convert_stub(pyboost_sqrt(self, [x]))
sqrt_op=Sqrt()
[文档]class Square(Primitive):
r"""
.. code-block::
prim = ops.Square()
out = prim(input)
is equivalent to
.. code-block::
ops.square(input)
Refer to :func:`mindspore.ops.square` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_square(self, [input]))
square_op=Square()
class StackExt(Primitive):
r"""
.. code-block::
prim = ops.StackExt(dim)
out = prim(tensors)
is equivalent to
.. code-block::
ops.stack_ext(tensors, dim)
Refer to :func:`mindspore.ops.stack_ext` for more details.
"""
@prim_arg_register
def __init__(self, dim=0):
self._set_prim_arg("dim", dim)
def __call__(self, tensors):
return _convert_stub(pyboost_stack_ext(self, [tensors, self.dim]))
[文档]class StridedSlice(Primitive):
r"""
.. code-block::
prim = ops.StridedSlice(begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
out = prim(input_x, begin, end, strides)
is equivalent to
.. code-block::
ops.strided_slice(input_x, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
Refer to :func:`mindspore.ops.strided_slice` for more details.
"""
@prim_arg_register
def __init__(self, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0):
self._set_prim_arg("begin_mask", begin_mask)
self._set_prim_arg("end_mask", end_mask)
self._set_prim_arg("ellipsis_mask", ellipsis_mask)
self._set_prim_arg("new_axis_mask", new_axis_mask)
self._set_prim_arg("shrink_axis_mask", shrink_axis_mask)
def __call__(self, input_x, begin, end, strides):
return super().__call__(input_x, begin, end, strides, self.begin_mask, self.end_mask, self.ellipsis_mask, self.new_axis_mask, self.shrink_axis_mask)
class SubExt(Primitive):
r"""
.. code-block::
prim = ops.SubExt()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.sub_ext(input, other, alpha)
Refer to :func:`mindspore.ops.sub_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', dtype=sig.sig_dtype.T),
sig.make_sig('other', dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1, default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other, alpha=1):
return _convert_stub(pyboost_sub_ext(self, [input, other, alpha]))
sub_ext_op=SubExt()
class Sub(Primitive):
r"""
.. code-block::
prim = ops.Sub()
out = prim(input, other)
is equivalent to
.. code-block::
ops.sub(input, other)
Refer to :func:`mindspore.ops.sub` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return super().__call__(input, other)
sub_op=Sub()
class SumExt(Primitive):
r"""
Calculate sum of Tensor elements over a given dim.
Note:
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
input (Tensor): The input tensor.
dim (Union[None, int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
If ``None`` , sum all the elements of the input tensor.
If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None`` .
keepdim (bool): Whether the output tensor has `dim` retained or not.
If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
Returns:
A Tensor, sum of elements over a given `dim` in `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
TypeError: If `keepdim` is not a bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
>>> out = ops.sum(x)
>>> print(out)
270.0
>>> out = ops.sum(x, dim=2)
>>> print(out)
[[ 6. 12. 18.]
[24. 30. 36.]
[42. 48. 54.]]
>>> out = ops.sum(x, dim=2, keepdim=True)
>>> print(out)
[[[ 6.]
[12.]
[18.]]
[[24.]
[30.]
[36.]]
[[42.]
[48.]
[54.]]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, keepdim=False, dtype=None):
return _convert_stub(pyboost_sum_ext(self, [input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('SumExt', 'dtype', dtype)]))
sum_ext_op=SumExt()
class TanhGrad(Primitive):
r"""
Computes TanhGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y, dy):
return _convert_stub(pyboost_tanh_grad(self, [y, dy]))
tanh_grad_op=TanhGrad()
class Tanh(Primitive):
r"""
.. code-block::
prim = ops.Tanh()
out = prim(input)
is equivalent to
.. code-block::
ops.tanh(input)
Refer to :func:`mindspore.ops.tanh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return _convert_stub(pyboost_tanh(self, [input]))
tanh_op=Tanh()
class TensorCopySlices(Primitive):
r"""
Copy continues memory.
Inputs:
- **x** (Tensor) - The target Tensor.
- **value** (Tensor) - The tensor to update x.
- **begin** (tuple[int]) - A tuple which represents the location where to start. Only
constant value is allowed.
- **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
Only constant value is allowed.
- **strides** (tuple[int]) - A tuple which represents the stride is continuously added
before reaching the maximum location. Only constant value is allowed.
Outputs:
- **y** (Tensor), has the same shape and data type of x.
Examples:
>>> import numpy as np
>>> from mindspore.ops.operations import _inner_ops
>>> copy_slices = _inner_ops.TensorCopySlices()
>>> out = copy_slices(Tensor(np.zeros((5, 5))), Tensor(np.ones((2, 5))), (3, 0), (5, 5), (1, 1))
>>> print(out)
[[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, value, begin, end, strides):
return super().__call__(x, value, begin, end, strides)
tensor_copy_slices_op=TensorCopySlices()
[文档]class TensorShape(Primitive):
r"""
Returns the shape of the input tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> output = ops.TensorShape()(input_x)
>>> print(output)
[3 2 1]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x):
return super().__call__(input_x)
tensor_shape_op=TensorShape()
class TopkExt(Primitive):
r"""
.. code-block::
prim = ops.TopkExt()
out = prim(input, k, dim, largest, sorted)
is equivalent to
.. code-block::
ops.topk_ext(input, k, dim, largest, sorted)
Refer to :func:`mindspore.ops.topk_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('k'),
sig.make_sig('dim', default=-1),
sig.make_sig('largest', default=True),
sig.make_sig('sorted', default=True),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, k, dim=-1, largest=True, sorted=True):
return _convert_stub(pyboost_topk_ext(self, [input, k, dim, largest, sorted]))
topk_ext_op=TopkExt()
[文档]class Trace(Primitive):
r"""
.. code-block::
prim = ops.Trace()
out = prim(input)
is equivalent to
.. code-block::
ops.trace(input)
Refer to :func:`mindspore.ops.trace` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
trace_op=Trace()
[文档]class Transpose(Primitive):
r"""
.. code-block::
prim = ops.Transpose()
out = prim(input, input_perm)
is equivalent to
.. code-block::
ops.transpose(input, input_perm)
Refer to :func:`mindspore.ops.transpose` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, input_perm):
return _convert_stub(pyboost_transpose(self, [input, input_perm]))
transpose_op=Transpose()
[文档]class Tril(Primitive):
r"""
.. code-block::
prim = ops.Tril(diagonal)
out = prim(input)
is equivalent to
.. code-block::
ops.tril(input, diagonal)
Refer to :func:`mindspore.ops.tril` for more details.
"""
@prim_arg_register
def __init__(self, diagonal=0):
self._set_prim_arg("diagonal", diagonal)
def __call__(self, input):
return _convert_stub(pyboost_tril(self, [input, self.diagonal]))
[文档]class Triu(Primitive):
r"""
.. code-block::
prim = ops.Triu(diagonal)
out = prim(input)
is equivalent to
.. code-block::
ops.triu(input, diagonal)
Refer to :func:`mindspore.ops.triu` for more details.
"""
@prim_arg_register
def __init__(self, diagonal=0):
self._set_prim_arg("diagonal", diagonal)
def __call__(self, input):
return _convert_stub(pyboost_triu(self, [input, self.diagonal]))
class TupleToTensor(Primitive):
r"""
.. code-block::
prim = ops.TupleToTensor()
out = prim(input_tuple, dtype)
is equivalent to
.. code-block::
ops.tuple_to_tensor(input_tuple, dtype)
Refer to :func:`mindspore.ops.tuple_to_tensor` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input_tuple'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_tuple, dtype=None):
return super().__call__(input_tuple, dtype if dtype is None else dtype_to_type_id('TupleToTensor', 'dtype', dtype))
tuple_to_tensor_op=TupleToTensor()
class UniformExt(Primitive):
r"""
Generates random numbers according to the Uniform random number distribution.
Inputs:
- **tensor** (Tensor) - The tensor of random tensor to be generated.
- **a** (float) - Lower bound of the random numbers. Default: 0.0.
- **b** (float) - Upper bound of the random numbers. Default: 0.0.
- **seed** (int) - Seed for random number generation. Default: 0.
- **offset** (int) - Positional offset in the tensor to start filling with random numbers. Default: 0.
Raises:
TypeError: If `a` or `b` is not a float.
TypeError: If `tensor` is not a Tensor.
ValueError: If `a` is larger than `b`.
Outputs:
- **output** (Tensor) - With the same type and shape as the 'tensor'.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations.random_ops import UniformExt
>>> x = Tensor(np.random.randn(3,4), mstype.float64)
>>> uniform = UniformExt()
>>> y = uniform(x, a=1.0, b=2.0, seed=10, offset=5)
>>> print(y.shape)
(3, 4)
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, tensor, a, b, seed, offset):
return _convert_stub(pyboost_uniform_ext(self, [tensor, a, b, seed, offset]))
uniform_ext_op=UniformExt()
[文档]class UnsortedSegmentSum(Primitive):
r"""
.. code-block::
prim = ops.UnsortedSegmentSum()
out = prim(input_x, segment_ids, num_segments)
is equivalent to
.. code-block::
ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
Refer to :func:`mindspore.ops.unsorted_segment_sum` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, segment_ids, num_segments):
return super().__call__(input_x, segment_ids, num_segments)
unsorted_segment_sum_op=UnsortedSegmentSum()
class UpsampleBilinear2DGrad(Primitive):
r"""
Upsample the 2-D gradient data with bilinear interpolation algorithm.
Note:
One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, H, W]. Must be one of the following types:
float16, float32, float64.
- **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 4 elements:
[batch, channels, height, width]. Must:
input_size[0] == dy.shape[0]
input_size[1] == dy.shape[1].
- **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``.
It contains 2 elements: height, width, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0],
dy.shape[3] == output_size[1].
- **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 2 elements: scale_height, scale_width. Must:
dy.shape[2] == floor(input_size[2] * scales[0],
dy.shape[3] == floor(input_size[3] * scales[1].
- **align_corners** (bool): An optional bool. Default: ``False``.
Outputs:
- **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False):
return _convert_stub(pyboost_upsample_bilinear2d_grad(self, [dy, input_size, output_size, scales, align_corners]))
upsample_bilinear2d_grad_op=UpsampleBilinear2DGrad()
class UpsampleBilinear2D(Primitive):
r"""
Performs upsampling with trilinear interpolation across 2dims for 4dim input Tensor.
This operator scale up the volumetric input with specified `output_size` or `scales` factors,
using trilinear upscaling algorithm.
Note:
One of `scales` and `output_size` must be specified. And it is an error if both are specified.
Inputs:
- **x** (Tensor) - 4D tensor of shape :math:`(N, C, H_{in}, W_{in})`. Supporting types:
float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of 2 int elements
:math:`(output\_height, output\_width)`. Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of 2 float
elements :math:`(scale\_height, scale\_width)`. Default: ``None``.
- **align_corners** (bool, optional): An optional bool. Default: ``False``.
If ``True``, the input and output tensors are aligned by the center points of their corner pixels,
preserving the values at the corner pixels.
If ``False`` , the input and output tensors are aligned by the corner points of their corner pixels,
and the interpolation use edge value padding for out of boundary values.
Outputs:
- **y** (Tensor) - Upsampled output with the same data type as `x`, whose shape is
:math:`(N, C, H_{out}, W_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not in [float16, float32, float64].
TypeError: If type of `align_corners` is not bool.
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 4D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 2 when `scales` is specified.
ValueError: If size of `output_size` is not equal 2 when `output_size` is specified.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> net = ops.UpsampleTrilinear3D()
>>> in_x = Tensor(input_data=np.random.randn(2, 3, 4, 512, 256))
>>> output_size=[4, 64, 48]
>>> out = net(in_x, output_size, None)
>>> print(out.shape)
(2, 3, 4, 64, 48)
>>>
>>> net = ops.auto_generate.UpsampleBilinear2D()
>>> in_x = Tensor(np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], [[0.7, 0.8, 0.9], [1.0, 1.1, 1.2]]]]).astype(np.float32))
>>> output_size=[4, 5]
>>> out = net(in_x, output_size, None, True)
>>> print(out)
[[[[0.1000, 0.1500, 0.2000, 0.2500, 0.3000],
[0.2000, 0.2500, 0.3000, 0.3500, 0.4000],
[0.3000, 0.3500, 0.4000, 0.4500, 0.5000],
[0.4000, 0.4500, 0.5000, 0.5500, 0.6000]],
[[0.7000, 0.7500, 0.8000, 0.8500, 0.9000],
[0.8000, 0.8500, 0.9000, 0.9500, 1.0000],
[0.9000, 0.9500, 1.0000, 1.0500, 1.1000],
[1.0000, 1.0500, 1.1000, 1.1500, 1.2000]]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None, align_corners=False):
return _convert_stub(pyboost_upsample_bilinear2d(self, [x, output_size, scales, align_corners]))
upsample_bilinear2d_op=UpsampleBilinear2D()
class UpsampleLinear1DGrad(Primitive):
r"""
Upsample the 1-D gradient data with linear interpolation algorithm.
Note:
One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, L]. Must be one of the following types:
float16, float32, float64.
- **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 3 elements:
[batch, channels, length]. Must:
input_size[0] == dy.shape[0]
input_size[1] == dy.shape[1].
- **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``.
It contains 1 elements: length, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0].
- **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 1 elements: length_depth. Must:
dy.shape[2] == floor(input_size[2] * scales[0].
- **align_corners** (bool): An optional bool. Default: ``False``.
Outputs:
- **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False):
return _convert_stub(pyboost_upsample_linear1d_grad(self, [dy, input_size, output_size, scales, align_corners]))
upsample_linear1d_grad_op=UpsampleLinear1DGrad()
class UpsampleLinear1D(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None, align_corners=False):
return _convert_stub(pyboost_upsample_linear1d(self, [x, output_size, scales, align_corners]))
upsample_linear1d_op=UpsampleLinear1D()
class UpsampleNearest1DGrad(Primitive):
r"""
Upsample the 1-D gradient data with the nearest neighbor interpolation algorithm.
Note:
Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, L], Must be one of the following types:
float16, float32, float64.
- **input_size** (tuple[int]): An required tuple[int], which contain 3 elements:
[min_batch, channels, length].
Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1].
- **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``.
It contains 1 elements: length, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0].
- **scales** (tuple[float]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 1 elements: scale_length. Must:
dy.shape[2] == floor(input_size[2] * scales[0].
Outputs:
- **dx**- (Tensor) - A 3-D tensor. Has the same type as `dy`, shape depends on `input_size`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None):
return _convert_stub(pyboost_upsample_nearest1d_grad(self, [dy, input_size, output_size, scales]))
upsample_nearest1d_grad_op=UpsampleNearest1DGrad()
class UpsampleNearest1D(Primitive):
r"""
Performs nearest neighbor upsampling operation.
This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest
neighbor algorithm.
One of `output_size` or `scales` must be given, and can not specified both at the same time.
Inputs:
- **x** (Tensor) - 3D tensor of shape :math:`(N, C, L_{in})`.
Supporting types: [uint8, float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size.
Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors.
Default: ``None``.
Outputs:
- **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is
:math:`(N, C, L_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not int [uint8, float16, float32, float64].
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 3D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 1 when `scales` is specified.
ValueError: If size of `output_size` is not equal 1 when `output_size` is specified.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor(np.arange(10).reshape(1, 2, 5), mstype.float32)
>>> output_size = [8,]
>>> net = ops.auto_generate.UpsampleNearest1D()
>>> output = net(x, output_size, None)
>>> print(output)
[[[0., 0., 1., 1., 2., 3., 3., 4.],
[5., 5., 6., 6., 7., 8., 8., 9.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None):
return _convert_stub(pyboost_upsample_nearest1d(self, [x, output_size, scales]))
upsample_nearest1d_op=UpsampleNearest1D()
class UpsampleNearest2DGrad(Primitive):
r"""
Upsample the 2-D gradient data with the nearest neighbor interpolation algorithm.
Note:
Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, H, W], Must be one of the following types:
float16, float32, float64.
- **input_size** (tuple[int]): An required tuple[int], which contain 4 elements:
[min_batch, channels, height, width].
Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1].
- **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``.
It contains 2 elements: height, width, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0],
dy.shape[3] == output_size[1].
- **scales** (tuple[float]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 2 elements: scale_height, scale_width. Must:
dy.shape[2] == floor(input_size[2] * scales[0],
dy.shape[3] == floor(input_size[3] * scales[1].
Outputs:
- **dx**- (Tensor) - A 4-D tensor. Has the same type as `dy`, shape depends on `input_size`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None):
return _convert_stub(pyboost_upsample_nearest2d_grad(self, [dy, input_size, output_size, scales]))
upsample_nearest2d_grad_op=UpsampleNearest2DGrad()
class UpsampleNearest2D(Primitive):
r"""
Performs nearest neighbor upsampling operation.
This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest
neighbor algorithm.
One of `output_size` or `scales` must be given, and can not specified both at the same time.
Inputs:
- **x** (Tensor) - 4D tensor of shape :math:`(N, C, H_{in}, W_{in})`.
Supporting types: [uint8, float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size.
Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors.
Default: ``None``.
Outputs:
- **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is
:math:`(N, C, H_{out}, W_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not int [uint8, float16, float32, float64].
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 4D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 2 when `scales` is specified.
ValueError: If size of `output_size` is not equal 2 when `output_size` is specified.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor(np.arange(12).astype(np.float32).reshape(1, 2, 2, 3))
>>> output_size = [4, 4]
>>> net = ops.auto_generate.UpsampleNearest2D()
>>> output = net(x, output_size, None)
>>> print(output)
[[[[0., 0., 1., 2.],
[0., 0., 1., 2.],
[3., 3., 4., 5.],
[3., 3., 4., 5.]],
[[6., 6., 7., 8.],
[6., 6., 7., 8.],
[9., 9., 10., 10.],
[9., 9., 10., 10.]]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None):
return _convert_stub(pyboost_upsample_nearest2d(self, [x, output_size, scales]))
upsample_nearest2d_op=UpsampleNearest2D()
class UpsampleNearest3DGrad(Primitive):
r"""
Upsample the 3-D gradient data with the nearest neighbor interpolation algorithm.
Note:
Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, D, H, W], Must be one of the following types:
float16, float32, float64.
- **input_size** (tuple[int]): An required tuple[int], which contain 5 elements:
[min_batch, channels, depth, height, width].
Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1].
- **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``.
It contains 3 elements: depth, height, width, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0],
dy.shape[3] == output_size[1],
dy.shape[4] == output_size[2].
- **scales** (tuple[float]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. Must:
dy.shape[2] == floor(input_size[2] * scales[0],
dy.shape[3] == floor(input_size[3] * scales[1],
dy.shape[4] == floor(input_size[4] * scales[2].
Outputs:
- **dx**- (Tensor) - A 5-D tensor. Has the same type as `dy`, shape depends on `input_size`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None):
return _convert_stub(pyboost_upsample_nearest3d_grad(self, [dy, input_size, output_size, scales]))
upsample_nearest3d_grad_op=UpsampleNearest3DGrad()
[文档]class UpsampleNearest3D(Primitive):
r"""
Performs nearest neighbor upsampling operation.
This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest
neighbor algorithm.
One of `output_size` or `scales` must be given, and can not specified both at the same time.
Inputs:
- **x** (Tensor) - 5D tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`.
Supporting types: [float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size.
Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors.
Default: ``None``.
Outputs:
- **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is
:math:`(N, C, D_{out}, H_{out}, W_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not int [float16, float32, float64].
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 5D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 3 when `scales` is specified.
ValueError: If size of `output_size` is not equal 3 when `output_size` is specified.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
... .reshape([1, 1, 2, 2, 4]), mstype.float32)
>>> output_size = [3, 4, 5]
>>> net = ops.UpsampleNearest3D()
>>> output = net(x, output_size, None)
>>> print(output)
[[[[[ 1. 1. 2. 3. 4.]
[ 1. 1. 2. 3. 4.]
[ 5. 5. 6. 7. 8.]
[ 5. 5. 6. 7. 8.]]
[[ 1. 1. 2. 3. 4.]
[ 1. 1. 2. 3. 4.]
[ 5. 5. 6. 7. 8.]
[ 5. 5. 6. 7. 8.]]
[[ 9. 9. 10. 11. 12.]
[ 9. 9. 10. 11. 12.]
[13. 13. 14. 15. 16.]
[13. 13. 14. 15. 16.]]]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None):
return _convert_stub(pyboost_upsample_nearest3d(self, [x, output_size, scales]))
upsample_nearest3d_op=UpsampleNearest3D()
class UpsampleTrilinear3DGrad(Primitive):
r"""
Upsample the 3-D gradient data with trilinear interpolation algorithm.
Note:
One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, D, H, W]. Must be one of the following types:
float16, float32, float64.
- **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 5 elements:
[batch, channels, depth, height, width]. Must:
input_size[0] == dy.shape[0]
input_size[1] == dy.shape[1].
- **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``.
It contains 3 elements: depth, height, width, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0],
dy.shape[3] == output_size[1],
dy.shape[4] == output_size[2].
- **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. Must:
dy.shape[2] == floor(input_size[2] * scales[0],
dy.shape[3] == floor(input_size[3] * scales[1],
dy.shape[4] == floor(input_size[4] * scales[2].
- **align_corners** (bool): An optional bool. Default: ``False``.
Outputs:
- **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self, align_corners=False):
self._set_prim_arg("align_corners", align_corners)
def __call__(self, dy, input_size, output_size=None, scales=None):
return _convert_stub(pyboost_upsample_trilinear3d_grad(self, [dy, input_size, output_size, scales, self.align_corners]))
[文档]class UpsampleTrilinear3D(Primitive):
r"""
Performs upsampling with trilinear interpolation across 3dims for 5dim input Tensor.
This operator scale up the volumetric input with specified `output_size` or `scales` factors,
using trilinear upscaling algorithm.
Note:
One of `scales` and `output_size` must be specified. And it is an error if both are specified.
Args:
align_corners (bool, optional): An optional bool. Default: ``False``.
If ``True``, the input and output tensors are aligned by the center points of their corner pixels,
preserving the values at the corner pixels.
If ``False`` , the input and output tensors are aligned by the corner points of their corner pixels,
and the interpolation use edge value padding for out of boundary values.
Inputs:
- **x** (Tensor) - 5D tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`. Supporting types:
[float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of 3 int elements
:math:`(output\_depth, output\_height, output\_width)`. Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of 3 float
elements :math:`(scale\_depth, scale\_height, scale\_width)`. Default: ``None``.
Outputs:
- **y** (Tensor) - Upsampled output with the same data type as `x`, whose shape is
:math:`(N, C, D_{out}, H_{out}, W_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not in [float16, float32, float64].
TypeError: If type of `align_corners` is not bool.
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 5D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 3 when `scales` is specified.
ValueError: If size of `output_size` is not equal 3 when `output_size` is specified.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> net = ops.UpsampleTrilinear3D()
>>> in_x = Tensor(input_data=np.random.randn(2, 3, 4, 512, 256))
>>> output_size=[4, 64, 48]
>>> out = net(in_x, output_size, None)
>>> print(out.shape)
(2, 3, 4, 64, 48)
>>>
>>> net = ops.UpsampleTrilinear3D()
>>> in_x = Tensor(np.arange(1, 5, dtype=np.float32).reshape((1, 1, 1, 2, 2)))
>>> output_size=[2, 4, 4]
>>> out = net(in_x, output_size, None)
>>> print(out)
[[[[[1. 1.25 1.75 2. ]
[1.5 1.75 2.25 2.5 ]
[2.5 2.75 3.25 3.5 ]
[3. 3.25 3.75 4. ]]
[[1. 1.25 1.75 2. ]
[1.5 1.75 2.25 2.5 ]
[2.5 2.75 3.25 3.5 ]
[3. 3.25 3.75 4. ]]]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self, align_corners=False):
self._set_prim_arg("align_corners", align_corners)
def __call__(self, x, output_size=None, scales=None):
return _convert_stub(pyboost_upsample_trilinear3d(self, [x, output_size, scales, self.align_corners]))
class View(Primitive):
r"""
.. code-block::
prim = ops.View()
out = prim(input, shape)
is equivalent to
.. code-block::
ops.view(input, shape)
Refer to :func:`mindspore.ops.view` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, shape):
return super().__call__(input, shape)
view_op=View()
class WeightQuantBatchMatmul(Primitive):
r"""
.. code-block::
prim = ops.WeightQuantBatchMatmul(transpose_x, transpose_weight, antiquant_group_size)
out = prim(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias)
is equivalent to
.. code-block::
ops.weight_quant_batch_matmul(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size)
Refer to :func:`mindspore.ops.weight_quant_batch_matmul` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('weight'),
sig.make_sig('antiquant_scale'),
sig.make_sig('antiquant_offset', default=None),
sig.make_sig('quant_scale', default=None),
sig.make_sig('quant_offset', default=None),
sig.make_sig('bias', default=None),
)
@prim_arg_register
def __init__(self, transpose_x=False, transpose_weight=False, antiquant_group_size=0):
self._set_prim_arg("transpose_x", transpose_x)
self._set_prim_arg("transpose_weight", transpose_weight)
self._set_prim_arg("antiquant_group_size", antiquant_group_size)
def __call__(self, x, weight, antiquant_scale, antiquant_offset=None, quant_scale=None, quant_offset=None, bias=None):
return _convert_stub(pyboost_weight_quant_batch_matmul(self, [x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, self.transpose_x, self.transpose_weight, self.antiquant_group_size]))
class ZerosLikeExt(Primitive):
r"""
Returns a Tensor with a value of 0 and its shape and data type is the same as the input.
Refer to :func:`mindspore.ops.zeros_like` for more details.
Args:
- **input_x** (Tensor) - Tensor of any dimension.
Returns:
Tensor, has the same shape and type as `input_x` but filled with ones.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dtype=None):
return _convert_stub(pyboost_zeros_like_ext(self, [input, dtype if dtype is None else dtype_to_type_id('ZerosLikeExt', 'dtype', dtype)]))
zeros_like_ext_op=ZerosLikeExt()
[文档]class ZerosLike(Primitive):
r"""
Returns a Tensor with a value of 0 and its shape and data type is the same as the input.
Inputs:
- **input_x** (Tensor) - Input Tensor of any dimension.
Outputs:
Tensor, has the same shape and data type as `input_x` but filled with zeros.
Raises:
TypeError: If `input_x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> zeroslike = ops.ZerosLike()
>>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
>>> output = zeroslike(input_x)
>>> print(output)
[[0. 0.]
[0. 0.]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
zeros_like_op=ZerosLike()