Source code for mindspore.ops.auto_generate.gen_ops_prim

# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""Operators definition generated by gen_ops.py, includes primitive classes."""

from mindspore.ops.primitive import Primitive, prim_arg_register
from mindspore.ops import signature as sig
from mindspore.common import dtype as mstype
from mindspore.common._decorator import deprecated
from mindspore.ops._primitive_cache import _get_cache_prim
from mindspore.ops.auto_generate.gen_arg_dtype_cast import type_it
from mindspore.ops.auto_generate.gen_arg_handler import *
from mindspore._c_expression import OpDtype
from mindspore.common._stub_tensor import _convert_stub
from mindspore._c_expression import pyboost_abs
from mindspore._c_expression import pyboost_acos_ext
from mindspore._c_expression import pyboost_acosh_ext
from mindspore._c_expression import pyboost_adamw
from mindspore._c_expression import pyboost_adaptive_avg_pool1d
from mindspore._c_expression import pyboost_adaptive_avg_pool2d_ext
from mindspore._c_expression import pyboost_adaptive_avg_pool2d_grad_ext
from mindspore._c_expression import pyboost_add_ext
from mindspore._c_expression import pyboost_add_layer_norm_grad
from mindspore._c_expression import pyboost_add_layernorm_v2
from mindspore._c_expression import pyboost_add
from mindspore._c_expression import pyboost_addmm
from mindspore._c_expression import pyboost_arange
from mindspore._c_expression import pyboost_argmax_ext
from mindspore._c_expression import pyboost_argmax_with_value
from mindspore._c_expression import pyboost_argmin_ext
from mindspore._c_expression import pyboost_argmin_with_value
from mindspore._c_expression import pyboost_argsort
from mindspore._c_expression import pyboost_asin_ext
from mindspore._c_expression import pyboost_asinh_ext
from mindspore._c_expression import pyboost_atan2_ext
from mindspore._c_expression import pyboost_atan_ext
from mindspore._c_expression import pyboost_atanh
from mindspore._c_expression import pyboost_avg_pool2d_grad
from mindspore._c_expression import pyboost_avg_pool2d
from mindspore._c_expression import pyboost_baddbmm
from mindspore._c_expression import pyboost_batch_mat_mul
from mindspore._c_expression import pyboost_batch_norm_ext
from mindspore._c_expression import pyboost_batch_norm_grad_ext
from mindspore._c_expression import pyboost_bernoulli_ext
from mindspore._c_expression import pyboost_binary_cross_entropy_grad
from mindspore._c_expression import pyboost_binary_cross_entropy
from mindspore._c_expression import pyboost_binary_cross_entropy_with_logits_backward
from mindspore._c_expression import pyboost_binary_cross_entropy_with_logits
from mindspore._c_expression import pyboost_bincount_ext
from mindspore._c_expression import pyboost_bitwise_and_scalar
from mindspore._c_expression import pyboost_bitwise_and_tensor
from mindspore._c_expression import pyboost_bitwise_or_scalar
from mindspore._c_expression import pyboost_bitwise_or_tensor
from mindspore._c_expression import pyboost_bitwise_xor_scalar
from mindspore._c_expression import pyboost_bitwise_xor_tensor
from mindspore._c_expression import pyboost_bmm_ext
from mindspore._c_expression import pyboost_broadcast_to
from mindspore._c_expression import pyboost_cast
from mindspore._c_expression import pyboost_ceil
from mindspore._c_expression import pyboost_chunk
from mindspore._c_expression import pyboost_clamp_scalar
from mindspore._c_expression import pyboost_clamp_tensor
from mindspore._c_expression import pyboost_clone
from mindspore._c_expression import pyboost_col2im_ext
from mindspore._c_expression import pyboost_col2im_grad
from mindspore._c_expression import pyboost_concat
from mindspore._c_expression import pyboost_constant_pad_nd
from mindspore._c_expression import pyboost_contiguous
from mindspore._c_expression import pyboost_convolution_grad
from mindspore._c_expression import pyboost_convolution
from mindspore._c_expression import pyboost_copy_ext
from mindspore._c_expression import pyboost_copy
from mindspore._c_expression import pyboost_cos
from mindspore._c_expression import pyboost_cosh
from mindspore._c_expression import pyboost_count_nonzero
from mindspore._c_expression import pyboost_cross
from mindspore._c_expression import pyboost_cummax
from mindspore._c_expression import pyboost_cummin_ext
from mindspore._c_expression import pyboost_cumsum_ext
from mindspore._c_expression import pyboost_custom_ext
from mindspore._c_expression import pyboost_dense
from mindspore._c_expression import pyboost_div
from mindspore._c_expression import pyboost_divmod
from mindspore._c_expression import pyboost_dot
from mindspore._c_expression import pyboost_dropout_do_mask_ext
from mindspore._c_expression import pyboost_dropout_ext
from mindspore._c_expression import pyboost_dropout_gen_mask_ext
from mindspore._c_expression import pyboost_dropout_grad_ext
from mindspore._c_expression import pyboost_elu_ext
from mindspore._c_expression import pyboost_elu_grad_ext
from mindspore._c_expression import pyboost_embedding_dense_backward
from mindspore._c_expression import pyboost_embedding
from mindspore._c_expression import pyboost_equal
from mindspore._c_expression import pyboost_erf
from mindspore._c_expression import pyboost_erfc
from mindspore._c_expression import pyboost_erfinv
from mindspore._c_expression import pyboost_exp
from mindspore._c_expression import pyboost_expm1
from mindspore._c_expression import pyboost_eye
from mindspore._c_expression import pyboost_ffn_ext
from mindspore._c_expression import pyboost_fill_scalar
from mindspore._c_expression import pyboost_fill_tensor
from mindspore._c_expression import pyboost_flash_attention_score_grad
from mindspore._c_expression import pyboost_flash_attention_score
from mindspore._c_expression import pyboost_flatten_ext
from mindspore._c_expression import pyboost_floor
from mindspore._c_expression import pyboost_gather_d_grad_v2
from mindspore._c_expression import pyboost_gather_d
from mindspore._c_expression import pyboost_gcd
from mindspore._c_expression import pyboost_gelu_grad
from mindspore._c_expression import pyboost_gelu
from mindspore._c_expression import pyboost_greater_equal
from mindspore._c_expression import pyboost_greater
from mindspore._c_expression import pyboost_grid_sampler_2d_grad
from mindspore._c_expression import pyboost_grid_sampler_2d
from mindspore._c_expression import pyboost_grid_sampler_3d_grad
from mindspore._c_expression import pyboost_grid_sampler_3d
from mindspore._c_expression import pyboost_group_norm_grad
from mindspore._c_expression import pyboost_group_norm
from mindspore._c_expression import pyboost_histc_ext
from mindspore._c_expression import pyboost_hshrink_grad
from mindspore._c_expression import pyboost_hshrink
from mindspore._c_expression import pyboost_hsigmoid_grad
from mindspore._c_expression import pyboost_hsigmoid
from mindspore._c_expression import pyboost_hswish_grad
from mindspore._c_expression import pyboost_hswish
from mindspore._c_expression import pyboost_im2col_ext
from mindspore._c_expression import pyboost_incre_flash_attention
from mindspore._c_expression import pyboost_index_add_ext
from mindspore._c_expression import pyboost_index_select
from mindspore._c_expression import pyboost_inner_comm_all_gather
from mindspore._c_expression import pyboost_inner_comm_all_reduce
from mindspore._c_expression import pyboost_inner_comm_all_to_all_v
from mindspore._c_expression import pyboost_inner_comm_irecv
from mindspore._c_expression import pyboost_inner_comm_isend
from mindspore._c_expression import pyboost_inner_comm_reduce_scatter
from mindspore._c_expression import pyboost_inplace_add_ext
from mindspore._c_expression import pyboost_inplace_addmm
from mindspore._c_expression import pyboost_inplace_adds_ext
from mindspore._c_expression import pyboost_inplace_clamp_scalar
from mindspore._c_expression import pyboost_inplace_clamp_tensor
from mindspore._c_expression import pyboost_inplace_fill_scalar
from mindspore._c_expression import pyboost_inplace_fill_tensor
from mindspore._c_expression import pyboost_inplace_floor
from mindspore._c_expression import pyboost_inplace_zero
from mindspore._c_expression import pyboost_isclose
from mindspore._c_expression import pyboost_isfinite
from mindspore._c_expression import pyboost_l1_loss_backward_ext
from mindspore._c_expression import pyboost_l1_loss_ext
from mindspore._c_expression import pyboost_layer_norm_ext
from mindspore._c_expression import pyboost_layer_norm_grad_ext
from mindspore._c_expression import pyboost_leaky_relu_ext
from mindspore._c_expression import pyboost_leaky_relu_grad_ext
from mindspore._c_expression import pyboost_less_equal
from mindspore._c_expression import pyboost_less
from mindspore._c_expression import pyboost_lin_space_ext
from mindspore._c_expression import pyboost_log1p
from mindspore._c_expression import pyboost_log
from mindspore._c_expression import pyboost_log_softmax_ext
from mindspore._c_expression import pyboost_log_softmax_grad
from mindspore._c_expression import pyboost_log_softmax
from mindspore._c_expression import pyboost_logaddexp
from mindspore._c_expression import pyboost_logical_and
from mindspore._c_expression import pyboost_logical_not
from mindspore._c_expression import pyboost_logical_or
from mindspore._c_expression import pyboost_logical_xor
from mindspore._c_expression import pyboost_logsigmoid_grad
from mindspore._c_expression import pyboost_logsigmoid
from mindspore._c_expression import pyboost_logsumexp
from mindspore._c_expression import pyboost_masked_fill
from mindspore._c_expression import pyboost_masked_select_grad
from mindspore._c_expression import pyboost_masked_select
from mindspore._c_expression import pyboost_matmul_ext
from mindspore._c_expression import pyboost_matmul
from mindspore._c_expression import pyboost_matrix_inverse_ext
from mindspore._c_expression import pyboost_max
from mindspore._c_expression import pyboost_max_pool_grad_with_indices
from mindspore._c_expression import pyboost_max_pool_grad_with_mask
from mindspore._c_expression import pyboost_max_pool_with_indices
from mindspore._c_expression import pyboost_max_pool_with_mask
from mindspore._c_expression import pyboost_maximum
from mindspore._c_expression import pyboost_mean_ext
from mindspore._c_expression import pyboost_median_dim
from mindspore._c_expression import pyboost_median_ext
from mindspore._c_expression import pyboost_min
from mindspore._c_expression import pyboost_minimum
from mindspore._c_expression import pyboost_mish_ext
from mindspore._c_expression import pyboost_mish_grad_ext
from mindspore._c_expression import pyboost_mm_ext
from mindspore._c_expression import pyboost_mse_loss_ext
from mindspore._c_expression import pyboost_mse_loss_grad_ext
from mindspore._c_expression import pyboost_mul
from mindspore._c_expression import pyboost_muls
from mindspore._c_expression import pyboost_multinomial_ext
from mindspore._c_expression import pyboost_mv
from mindspore._c_expression import pyboost_nan_to_num
from mindspore._c_expression import pyboost_ne_scalar
from mindspore._c_expression import pyboost_neg
from mindspore._c_expression import pyboost_nllloss_2d_grad
from mindspore._c_expression import pyboost_nllloss_2d
from mindspore._c_expression import pyboost_nllloss_grad
from mindspore._c_expression import pyboost_nllloss
from mindspore._c_expression import pyboost_non_zero_ext
from mindspore._c_expression import pyboost_non_zero
from mindspore._c_expression import pyboost_norm
from mindspore._c_expression import pyboost_normal_float_float
from mindspore._c_expression import pyboost_normal_float_tensor
from mindspore._c_expression import pyboost_normal_tensor_float
from mindspore._c_expression import pyboost_normal_tensor_tensor
from mindspore._c_expression import pyboost_not_equal
from mindspore._c_expression import pyboost_one_hot_ext
from mindspore._c_expression import pyboost_ones_like_ext
from mindspore._c_expression import pyboost_ones
from mindspore._c_expression import pyboost_outer
from mindspore._c_expression import pyboost_polar
from mindspore._c_expression import pyboost_pow
from mindspore._c_expression import pyboost_prelu_grad
from mindspore._c_expression import pyboost_prelu
from mindspore._c_expression import pyboost_prod_ext
from mindspore._c_expression import pyboost_prompt_flash_attention
from mindspore._c_expression import pyboost_rand_ext
from mindspore._c_expression import pyboost_rand_like_ext
from mindspore._c_expression import pyboost_randint_like
from mindspore._c_expression import pyboost_randint
from mindspore._c_expression import pyboost_randn_like
from mindspore._c_expression import pyboost_randn
from mindspore._c_expression import pyboost_randperm_ext
from mindspore._c_expression import pyboost_reciprocal
from mindspore._c_expression import pyboost_reduce_all
from mindspore._c_expression import pyboost_reduce_any
from mindspore._c_expression import pyboost_reflection_pad_1d_grad
from mindspore._c_expression import pyboost_reflection_pad_1d
from mindspore._c_expression import pyboost_reflection_pad_2d_grad
from mindspore._c_expression import pyboost_reflection_pad_2d
from mindspore._c_expression import pyboost_reflection_pad_3d_grad
from mindspore._c_expression import pyboost_reflection_pad_3d
from mindspore._c_expression import pyboost_relu_grad
from mindspore._c_expression import pyboost_relu
from mindspore._c_expression import pyboost_remainder_scalar_tensor
from mindspore._c_expression import pyboost_remainder_tensor_scalar
from mindspore._c_expression import pyboost_remainder_tensor_tensor
from mindspore._c_expression import pyboost_repeat_interleave_grad
from mindspore._c_expression import pyboost_repeat_interleave_int
from mindspore._c_expression import pyboost_repeat_interleave_tensor
from mindspore._c_expression import pyboost_replication_pad_1d_grad
from mindspore._c_expression import pyboost_replication_pad_1d
from mindspore._c_expression import pyboost_replication_pad_2d_grad
from mindspore._c_expression import pyboost_replication_pad_2d
from mindspore._c_expression import pyboost_replication_pad_3d_grad
from mindspore._c_expression import pyboost_replication_pad_3d
from mindspore._c_expression import pyboost_reshape
from mindspore._c_expression import pyboost_reverse_v2
from mindspore._c_expression import pyboost_rms_norm_grad
from mindspore._c_expression import pyboost_rms_norm
from mindspore._c_expression import pyboost_roll
from mindspore._c_expression import pyboost_rotary_position_embedding_grad
from mindspore._c_expression import pyboost_rotary_position_embedding
from mindspore._c_expression import pyboost_round
from mindspore._c_expression import pyboost_rsqrt
from mindspore._c_expression import pyboost_scatter_add_ext
from mindspore._c_expression import pyboost_scatter
from mindspore._c_expression import pyboost_scatter_value
from mindspore._c_expression import pyboost_searchsorted
from mindspore._c_expression import pyboost_select_ext
from mindspore._c_expression import pyboost_select
from mindspore._c_expression import pyboost_select_v2
from mindspore._c_expression import pyboost_selu_ext
from mindspore._c_expression import pyboost_selu_grad
from mindspore._c_expression import pyboost_sigmoid_grad
from mindspore._c_expression import pyboost_sigmoid
from mindspore._c_expression import pyboost_sign
from mindspore._c_expression import pyboost_silent_check_v2
from mindspore._c_expression import pyboost_silu_grad
from mindspore._c_expression import pyboost_silu
from mindspore._c_expression import pyboost_sin
from mindspore._c_expression import pyboost_sinc
from mindspore._c_expression import pyboost_sinh
from mindspore._c_expression import pyboost_slice_ext
from mindspore._c_expression import pyboost_smooth_l1_loss_grad
from mindspore._c_expression import pyboost_smooth_l1_loss
from mindspore._c_expression import pyboost_softmax_backward
from mindspore._c_expression import pyboost_softmax
from mindspore._c_expression import pyboost_softplus_ext
from mindspore._c_expression import pyboost_softplus_grad_ext
from mindspore._c_expression import pyboost_softshrink_grad
from mindspore._c_expression import pyboost_softshrink
from mindspore._c_expression import pyboost_sort_ext
from mindspore._c_expression import pyboost_split_tensor
from mindspore._c_expression import pyboost_split_with_size
from mindspore._c_expression import pyboost_sqrt
from mindspore._c_expression import pyboost_square
from mindspore._c_expression import pyboost_stack_ext
from mindspore._c_expression import pyboost_sub_ext
from mindspore._c_expression import pyboost_sub
from mindspore._c_expression import pyboost_sum_ext
from mindspore._c_expression import pyboost_swiglu_grad
from mindspore._c_expression import pyboost_swiglu
from mindspore._c_expression import pyboost_t_ext
from mindspore._c_expression import pyboost_tan
from mindspore._c_expression import pyboost_tanh_grad
from mindspore._c_expression import pyboost_tanh
from mindspore._c_expression import pyboost_tile
from mindspore._c_expression import pyboost_topk_ext
from mindspore._c_expression import pyboost_trace_ext
from mindspore._c_expression import pyboost_transpose_ext
from mindspore._c_expression import pyboost_transpose
from mindspore._c_expression import pyboost_tril_ext
from mindspore._c_expression import pyboost_triu
from mindspore._c_expression import pyboost_trunc
from mindspore._c_expression import pyboost_type_as
from mindspore._c_expression import pyboost_uniform_ext
from mindspore._c_expression import pyboost_unique2
from mindspore._c_expression import pyboost_unique_dim
from mindspore._c_expression import pyboost_upsample_bicubic2d_grad
from mindspore._c_expression import pyboost_upsample_bicubic2d
from mindspore._c_expression import pyboost_upsample_bilinear2d_grad
from mindspore._c_expression import pyboost_upsample_bilinear2d
from mindspore._c_expression import pyboost_upsample_linear1d_grad
from mindspore._c_expression import pyboost_upsample_linear1d
from mindspore._c_expression import pyboost_upsample_nearest1d_grad
from mindspore._c_expression import pyboost_upsample_nearest1d
from mindspore._c_expression import pyboost_upsample_nearest2d_grad
from mindspore._c_expression import pyboost_upsample_nearest2d
from mindspore._c_expression import pyboost_upsample_nearest3d_grad
from mindspore._c_expression import pyboost_upsample_nearest3d
from mindspore._c_expression import pyboost_upsample_trilinear3d_grad
from mindspore._c_expression import pyboost_upsample_trilinear3d
from mindspore._c_expression import pyboost_view_as
from mindspore._c_expression import pyboost_xlogy_scalar_other
from mindspore._c_expression import pyboost_xlogy_scalar_self
from mindspore._c_expression import pyboost_xlogy
from mindspore._c_expression import pyboost_zeros_like_ext
from mindspore._c_expression import pyboost_zeros
from mindspore._c_expression import pyboost_add_rmsnorm_quant_v2
from mindspore._c_expression import pyboost_dynamic_quant_ext
from mindspore._c_expression import pyboost_grouped_matmul
from mindspore._c_expression import pyboost_moe_finalize_routing
from mindspore._c_expression import pyboost_quant_batch_matmul
from mindspore._c_expression import pyboost_quant_v2
from mindspore._c_expression import pyboost_weight_quant_batch_matmul


class ACosGrad(Primitive):
    r"""
    Computes ACosGrad of input element-wise.

    Returns:
        Tensor, has the same type as input.
    """
    @prim_arg_register
    def __init__(self):
        pass

    def __call__(self, x, dout):
          return super().__call__(x, dout)


a_cos_grad_op=ACosGrad()


class AbsGrad(Primitive):
    r"""
    Computes gradients for abs operation.
    """
    @prim_arg_register
    def __init__(self):
        pass

    def __call__(self, x, dout):
          return super().__call__(x, dout)


abs_grad_op=AbsGrad()


[docs]class Abs(Primitive): r""" .. code-block:: prim = ops.Abs() out = prim(input) is equivalent to .. code-block:: ops.abs(input) Refer to :func:`mindspore.ops.abs` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_abs(self, [input]))
abs_op=Abs() class AcosExt(Primitive): r""" .. code-block:: prim = ops.AcosExt() out = prim(input) is equivalent to .. code-block:: ops.acos_ext(input) Refer to :func:`mindspore.ops.acos_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_acos_ext(self, [input])) acos_ext_op=AcosExt()
[docs]class ACos(Primitive): r""" .. code-block:: prim = ops.ACos() out = prim(input) is equivalent to .. code-block:: ops.acos(input) Refer to :func:`mindspore.ops.acos` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
acos_op=ACos() class AcoshExt(Primitive): r""" .. code-block:: prim = ops.AcoshExt() out = prim(input) is equivalent to .. code-block:: ops.acosh_ext(input) Refer to :func:`mindspore.ops.acosh_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_acosh_ext(self, [input])) acosh_ext_op=AcoshExt() class AcoshGrad(Primitive): r""" Performs grad of Acosh operation. """ @prim_arg_register def __init__(self): pass def __call__(self, out, dout): return super().__call__(out, dout) acosh_grad_op=AcoshGrad()
[docs]class Acosh(Primitive): r""" .. code-block:: prim = ops.Acosh() out = prim(input) is equivalent to .. code-block:: ops.acosh(input) Refer to :func:`mindspore.ops.acosh` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
acosh_op=Acosh()
[docs]class AdamWeightDecay(Primitive): r""" Updates gradients by the Adaptive Moment Estimation algorithm with weight decay (AdamWeightDecay). The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_. The AdamWeightDecay variant was proposed in `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`_. The updating formulas are as follows, .. math:: \begin{array}{ll} \\ m = \beta_1 * m + (1 - \beta_1) * g \\ v = \beta_2 * v + (1 - \beta_2) * g * g \\ update = \frac{m}{\sqrt{v} + \epsilon} \\ update = \begin{cases} update + weight\_decay * w & \text{ if } weight\_decay > 0 \\ update & \text{ otherwise } \end{cases} \\ w = w - lr * update \end{array} :math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents `gradient`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`, :math:`lr` represents `learning_rate`, :math:`w` represents `var`, :math:`decay` represents `weight_decay`, :math:`\epsilon` represents `epsilon`. Args: use_locking (bool): Whether to enable a lock to protect variable tensors from being updated. If ``True`` , updates of the var, m, and v tensors will be protected by a lock. If ``False`` , the result is unpredictable. Default: ``False`` . Inputs: - **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions. The data type can be float16 or float32. - **m** (Parameter) - The 1st moment vector in the updating formula, it should have the the shape as `var`. The data type can be float16 or float32. - **v** (Parameter) - The 2nd moment vector in the updating formula, it should have the same shape as `m`. - **lr** (float) - :math:`lr` in the updating formula. The paper suggested value is :math:`10^{-8}`, the data type should be float32. - **beta1** (float) - The exponential decay rate for the 1st moment estimations, the data type should be float32. The paper suggested value is :math:`0.9` - **beta2** (float) - The exponential decay rate for the 2nd moment estimations, the data type should be float32. The paper suggested value is :math:`0.999` - **epsilon** (float) - Term added to the denominator to improve numerical stability, the data type should be float32. - **decay** (float) - The weight decay value, must be a scalar tensor with float32 data type. Default: ``0.0`` . - **gradient** (Tensor) - Gradient, has the same shape as `var`. Outputs: Tuple of 3 Tensor, the updated parameters. - **var** (Tensor) - The same shape and data type as `var`. - **m** (Tensor) - The same shape and data type as `m`. - **v** (Tensor) - The same shape and data type as `v`. Raises: TypeError: If `use_locking` is not a bool. TypeError: If `lr`, `beta1`, `beta2`, `epsilon` or `decay` is not a float32. TypeError: If `var`, `m` or `v` is not a Parameter with dtype float16 or float32. TypeError: If `gradient` is not a Tensor. ValueError: If `epsilon` <= 0. ValueError: If `beta1`, `beta2` is not in range (0.0,1.0). ValueError: If `decay` < 0. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> import mindspore.nn as nn >>> from mindspore import Tensor, Parameter, ops >>> class Net(nn.Cell): ... def __init__(self): ... super(Net, self).__init__() ... self.adam_weight_decay = ops.AdamWeightDecay() ... self.var = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="var") ... self.m = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="m") ... self.v = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="v") ... def construct(self, lr, beta1, beta2, epsilon, decay, grad): ... out = self.adam_weight_decay(self.var, self.m, self.v, lr, beta1, beta2, ... epsilon, decay, grad) ... return out >>> net = Net() >>> gradient = Tensor(np.ones([2, 2]).astype(np.float32)) >>> output = net(0.001, 0.9, 0.999, 1e-8, 0.0, gradient) >>> print(net.var.asnumpy()) [[0.999 0.999] [0.999 0.999]] """ __mindspore_signature__ = ( sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1), sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1), sig.make_sig('lr', dtype=sig.sig_dtype.T2), sig.make_sig('beta1', dtype=sig.sig_dtype.T2), sig.make_sig('beta2', dtype=sig.sig_dtype.T2), sig.make_sig('epsilon', dtype=sig.sig_dtype.T2), sig.make_sig('decay', dtype=sig.sig_dtype.T2), sig.make_sig('gradient', dtype=sig.sig_dtype.T), ) @prim_arg_register def __init__(self, use_locking=False): self._set_prim_arg("use_locking", use_locking) self.add_prim_attr("side_effect_mem", True) def __call__(self, var, m, v, lr, beta1, beta2, epsilon, decay, gradient): return super().__call__(var, m, v, lr, beta1, beta2, epsilon, decay, gradient, self.use_locking)
class AdamW(Primitive): r""" Implements Adam Weight Decay algorithm. .. math:: \begin{aligned} &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2 \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)}, \: \epsilon \text{ (epsilon)} \\ &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad}, \: \textit{maximize} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0 \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex] &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}\textbf{else} \\ &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ &\hspace{5mm}\textbf{if} \: amsgrad \\ &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max}, \widehat{v_t}) \\ &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\ &\hspace{5mm}\textbf{else} \\ &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ &\bf{return} \: \theta_t \\[-1.ex] \end{aligned} .. warning:: This is an experimental optimizer API that is subject to change. This module must be used with lr scheduler module in `LRScheduler Class <https://www.mindspore.cn/docs/en/master/api_python/mindspore.experimental.html#lrscheduler-class>`_ . Inputs: - **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions. The data type can be float16 or float32. - **m** (Parameter) - The 1st moment vector in the updating formula, it should have the the shape as `var`. The data type can be float16 or float32. - **v** (Parameter) - The 2nd moment vector in the updating formula, it should have the same shape as `m`. - **max_v** (Parameter) - The 2nd moment vector in the updating formula, it should have the same shape as `m`. - **gradient** (Tensor) - Gradient, has the same shape as `var` - **step** (Tensor) - step - **lr** (float) - :math:`lr` in the updating formula. The paper suggested value is :math:`10^{-8}`, the data type should be float. - **beta1** (float) - The exponential decay rate for the 1st moment estimations, the data type should be float. The paper suggested value is :math:`0.9` - **beta2** (float) - The exponential decay rate for the 2nd moment estimations, the data type should be float. The paper suggested value is :math:`0.999` - **decay** (float) - weight decay (L2 penalty), must be a scalar tensor with float data type. - **eps** (float) - Term added to the denominator to improve numerical stability, the data type should be float. - **amsgrad** (bool) - whether to use the AMSGrad algorithm. Default: ``False``. - **maximize** (bool) - maximize the params based on the objective, instead of minimizing. Default: ``False``. . Outputs: Tuple of 3 Tensor, the updated parameters. - **var** (Tensor) - The same shape and data type as `var`. - **m** (Tensor) - The same shape and data type as `m`. - **v** (Tensor) - The same shape and data type as `v`. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = ( sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1), sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1), sig.make_sig('max_v', dtype=sig.sig_dtype.T1), sig.make_sig('gradient', dtype=sig.sig_dtype.T), sig.make_sig('step', dtype=sig.sig_dtype.T2), sig.make_sig('lr', dtype=sig.sig_dtype.T3), sig.make_sig('beta1', dtype=sig.sig_dtype.T3), sig.make_sig('beta2', dtype=sig.sig_dtype.T3), sig.make_sig('decay', dtype=sig.sig_dtype.T3), sig.make_sig('eps', dtype=sig.sig_dtype.T3), sig.make_sig('amsgrad', dtype=sig.sig_dtype.T4, default=False), sig.make_sig('maximize', dtype=sig.sig_dtype.T5, default=False), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad=False, maximize=False): return _convert_stub(pyboost_adamw(self, [var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad, maximize])) adamw_op=AdamW() class AdaptiveAvgPool1D(Primitive): r""" .. code-block:: prim = ops.AdaptiveAvgPool1D() out = prim(input, output_size) is equivalent to .. code-block:: ops.adaptive_avg_pool1d(input, output_size) Refer to :func:`mindspore.ops.adaptive_avg_pool1d` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, output_size): return _convert_stub(pyboost_adaptive_avg_pool1d(self, [input, output_size])) adaptive_avg_pool1d_op=AdaptiveAvgPool1D() class AdaptiveAvgPool2DExt(Primitive): r""" Performs 2D adaptive average pooling on a multi-plane input signal. That is, for any input size, the size of the specified output is H x W. The number of output features is equal to the number of input features. The input and output data format can be "NCHW" and "CHW". N is the batch size, C is the number of channels, H is the feature height, and W is the feature width. For adaptive average pooling for 2D: .. math:: \begin{align} h_{start} &= floor(i * H_{in} / H_{out})\\ h_{end} &= ceil((i + 1) * H_{in} / H_{out})\\ w_{start} &= floor(j * W_{in} / W_{out})\\ w_{end} &= ceil((j + 1) * W_{in} / W_{out})\\ Output(i,j) &= \frac{\sum Input[h_{start}:h_{end}, w_{start}:w_{end}]}{(h_{end}- h_{start}) * (w_{end}- w_{start})} \end{align} .. warning:: This is an experimental API that is subject to change or deletion. Args: input (Tensor): The input of adaptive_avg_pool2d, which is a 3D or 4D tensor, with float16 or float32 data type. output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(H, W)`, or an int H for :math:`(H, H)`. :math:`H` and :math:`W` can be int or None. If it is None, it means the output size is the same as the input size. Returns: Tensor, with the same type as the `input`. Shape of the output is `input_shape[:len(input_shape) - len(out_shape)] + out_shape`. .. math:: out\_shape = \begin{cases} input\_shape[-2] + output\_size[1], & \text{if } output\_size text{ is (None, w);}\\ output\_size[0] + input\_shape[-1], & \text{if } output\_size text{ is (h, None);}\\ input\_shape[-2:], & \text{if } output\_size text{ is (None, None);}\\ (h, h), & \text{if } output\_size text{ is h;}\\ (h, w), & \text{if } output\_size text{ is (h, w)} \end{cases} Raises: ValueError: If `output_size` is a tuple and the length of `output_size` is not 2. TypeError: If `input` is not a Tensor. TypeError: If dtype of `input` is not float16, float32 or float64. ValueError: If the dimension of `input` is less than or equal to the dimension of `output_size`. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, mint >>> # case 1: output_size=(None, 2) >>> input = Tensor(np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], ... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], ... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]), mindspore.float32) >>> output = mint.nn.functional.adaptive_avg_pool2d(input, (None, 2)) >>> print(output) [[[1.5 2.5] [4.5 5.5] [7.5 8.5]] [[1.5 2.5] [4.5 5.5] [7.5 8.5]] [[1.5 2.5] [4.5 5.5] [7.5 8.5]]] """ @prim_arg_register def __init__(self): pass def __call__(self, input, output_size): return _convert_stub(pyboost_adaptive_avg_pool2d_ext(self, [input, to_2d_paddings('AdaptiveAvgPool2DExt', 'output_size', output_size)])) adaptive_avg_pool2d_ext_op=AdaptiveAvgPool2DExt() class AdaptiveAvgPool2DGradExt(Primitive): r""" .. code-block:: prim = ops.AdaptiveAvgPool2DGradExt() out = prim(grad_output, x) is equivalent to .. code-block:: ops.adaptive_avg_pool2d_grad_ext(grad_output, x) Refer to :func:`mindspore.ops.adaptive_avg_pool2d_grad_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, grad_output, x): return _convert_stub(pyboost_adaptive_avg_pool2d_grad_ext(self, [grad_output, x])) adaptive_avg_pool2d_grad_ext_op=AdaptiveAvgPool2DGradExt() class AddExt(Primitive): r""" .. code-block:: prim = ops.AddExt() out = prim(input, other, alpha) is equivalent to .. code-block:: ops.add_ext(input, other, alpha) Refer to :func:`mindspore.ops.add_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input', dtype=sig.sig_dtype.T), sig.make_sig('other', dtype=sig.sig_dtype.T), sig.make_sig('alpha', dtype=sig.sig_dtype.T1, default=1), ) @prim_arg_register def __init__(self): pass def __call__(self, input, other, alpha=1): return _convert_stub(pyboost_add_ext(self, [input, other, alpha])) add_ext_op=AddExt() class AddLayerNormGrad(Primitive): r""" .. code-block:: prim = ops.AddLayerNormGrad() out = prim(dy, x1, x2, rstd, mean, gamma, dsumOptional) is equivalent to .. code-block:: ops.add_layer_norm_grad(dy, x1, x2, rstd, mean, gamma, dsumOptional) Refer to :func:`mindspore.ops.add_layer_norm_grad` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, dy, x1, x2, rstd, mean, gamma, dsumOptional): return _convert_stub(pyboost_add_layer_norm_grad(self, [dy, x1, x2, rstd, mean, gamma, dsumOptional])) add_layer_norm_grad_op=AddLayerNormGrad() class AddLayerNormV2(Primitive): r""" .. code-block:: prim = ops.AddLayerNormV2() out = prim(x1, x2, gamma, beta, epsilon, additionalOut) is equivalent to .. code-block:: ops.add_layernorm_v2(x1, x2, gamma, beta, epsilon, additionalOut) Refer to :func:`mindspore.ops.add_layernorm_v2` for more details. """ __mindspore_signature__ = ( sig.make_sig('x1'), sig.make_sig('x2'), sig.make_sig('gamma'), sig.make_sig('beta'), sig.make_sig('epsilon', default=1e-5), sig.make_sig('additionalOut', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, x1, x2, gamma, beta, epsilon=1e-5, additionalOut=False): return _convert_stub(pyboost_add_layernorm_v2(self, [x1, x2, gamma, beta, epsilon, additionalOut])) add_layernorm_v2_op=AddLayerNormV2()
[docs]class Add(Primitive): r""" .. code-block:: prim = ops.Add() out = prim(input, other) is equivalent to .. code-block:: ops.add(input, other) Refer to :func:`mindspore.ops.add` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_add(self, [input, other]))
add_op=Add()
[docs]class Addcdiv(Primitive): r""" Adds the element-wise division of `x1` by `x2`, multiplied by `value` to `input_data`. It computes the following operation: .. math:: y[i] = input\_data[i] + value[i] * (x1[i] / x2[i]) Inputs: - **input_data** (Tensor) - The tensor to be added. - **x1** (Tensor) - The numerator tensor. - **x2** (Tensor) - The denominator tensor. - **value** (Tensor) - The multiplier for tensor x1/x2. Outputs: Tensor, has the same shape and dtype as x1/x2. Raises: TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor. TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same. ValueError: If `x1` could not be broadcast to `x2`. ValueError: If `value` could not be broadcast to `x1/x2`. ValueError: If `input_data` could not be broadcast to `value*(x1/x2)`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32) >>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32) >>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32) >>> value = Tensor([1], mindspore.float32) >>> addcdiv = ops.Addcdiv() >>> y = addcdiv(input_data, x1, x2, value) >>> print(y) [1.25 1.6666667 2.5 5. ] """ @prim_arg_register def __init__(self): pass def __call__(self, input, tensor1, tensor2, value): return super().__call__(input, tensor1, tensor2, value)
addcdiv_op=Addcdiv()
[docs]class Addcmul(Primitive): r""" Adds the element-wise product of `x1` by `x2`, multiplied by `value` to `input_data`. It computes the following operation: .. math:: output[i] = input\_data[i] + value[i] * (x1[i] * x2[i]) Inputs: - **input_data** (Tensor) - The tensor to be added. - **x1** (Tensor) - The tensor to be multiplied. - **x2** (Tensor) - The tensor to be multiplied. - **value** (Tensor) - The multiplier for tensor x1*x2. Outputs: Tensor, has the same shape and dtype as x1*x2. Raises: TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor. TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same. ValueError: If `x1` could not be broadcast to `x2`. ValueError: If `value` could not be broadcast to `x1` * `x2`. ValueError: If `input_data` could not be broadcast to `value*(x1*x2)`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_data = Tensor(np.array([1, 1, 1]), mindspore.float32) >>> x1 = Tensor(np.array([[1], [2], [3]]), mindspore.float32) >>> x2 = Tensor(np.array([[1, 2, 3]]), mindspore.float32) >>> value = Tensor([1], mindspore.float32) >>> addcmul = ops.Addcmul() >>> y = addcmul(input_data, x1, x2, value) >>> print(y) [[ 2. 3. 4.] [ 3. 5. 7.] [ 4. 7. 10.]] """ @prim_arg_register def __init__(self): pass def __call__(self, input, tensor1, tensor2, value): return super().__call__(input, tensor1, tensor2, value)
addcmul_op=Addcmul() class Addmm(Primitive): r""" .. code-block:: prim = ops.Addmm() out = prim(input, mat1, mat2, beta, alpha) is equivalent to .. code-block:: ops.addmm(input, mat1, mat2, beta, alpha) Refer to :func:`mindspore.ops.addmm` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, mat1, mat2, beta, alpha): return _convert_stub(pyboost_addmm(self, [input, mat1, mat2, beta, alpha])) addmm_op=Addmm() class AddN(Primitive): r""" .. code-block:: prim = ops.AddN() out = prim(x) is equivalent to .. code-block:: ops.addn(x) Refer to :func:`mindspore.ops.addn` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, x): return super().__call__(x) addn_op=AddN()
[docs]class Angle(Primitive): r""" .. code-block:: prim = ops.Angle() out = prim(input) is equivalent to .. code-block:: ops.angle(input) Refer to :func:`mindspore.ops.angle` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
angle_op=Angle() class ApplyAdamW(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1), sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1), sig.make_sig('beta1_power', dtype=sig.sig_dtype.T2), sig.make_sig('beta2_power', dtype=sig.sig_dtype.T3), sig.make_sig('lr', dtype=sig.sig_dtype.T4), sig.make_sig('weight_decay', dtype=sig.sig_dtype.T4), sig.make_sig('beta1', dtype=sig.sig_dtype.T4), sig.make_sig('beta2', dtype=sig.sig_dtype.T4), sig.make_sig('epsilon', dtype=sig.sig_dtype.T4), sig.make_sig('grad', dtype=sig.sig_dtype.T), sig.make_sig('max_grad_norm', dtype=sig.sig_dtype.T5, default=None), sig.make_sig('amsgrad', dtype=sig.sig_dtype.T6, default=False), sig.make_sig('maximize', dtype=sig.sig_dtype.T7, default=False), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, var, m, v, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, max_grad_norm=None, amsgrad=False, maximize=False): return super().__call__(var, m, v, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, max_grad_norm, amsgrad, maximize) apply_adamw_op=ApplyAdamW() class ApplyCamePart1(Primitive): r""" Computes Part 1 of the CAME Optimizer. Args: - **grad** (Tensor) - The shape = 2D :math:`(..., n, m)`. A Tensor of types: float16, float32, bfloat16. - **eps** (float) - data type must be float. Returns: - **sum_grad_r** (Tensor) - A Tensor of shape :math:`(..., n)` - **sum_grad_c** (Tensor) - A Tensor of shape :math:`(..., m)` - **sum_grad_rc** (Tensor) - A Tensor of of shape:math:`(..., m)` Raises: TypeError: If `grad` is not a Tensor. Supported Platforms: ``Ascend`` Examples: >>> import mindspore as ms >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore.ops.operations import _inner_ops as P >>> grad = Tensor(np.ones([1024, 64]), dtype=ms.float32) >>> apply_came_part1 = P.ApplyCamePart1() >>> output = apply_came_part1(grad, 1.1) >>> print(output[0].asnumpy()) (1024,) """ @prim_arg_register def __init__(self): pass def __call__(self, grad, eps): return super().__call__(grad, eps) apply_came_part1_op=ApplyCamePart1() class ApplyCamePart2(Primitive): r""" Computes Part 2 of the CAME Optimizer. Args: - **grad** (Tensor) - The shape = 2D :math:`(..., n, m)`. A Tensor of types: float16, float32, bfloat16. - **sum_grad_r** (Tensor) - The shape = 1D :math:`(..., n)`. A Tensor of types: float32. - **sum_grad_c** (Tensor) - The shape = 1D :math:`(..., m)`. A Tensor of types: float32. - **sum_grad_rc** (Tensor) - The shape = 1D :math:`(...)`. A Tensor of types: float32. - **r** (Tensor) - The shape = 1D :math:`(..., n)`. The Tensor has the same data type as `grad`. - **c** (Tensor) - The shape = 1D :math:`(..., m)`. The Tensor has the same data type as `grad`. - **beta2** (float) - data type must be float. - **sum_r** (Tensor) - The shape = 1D :math:`(..., 1)`. 'None' is currently supported. A Tensor of types: float32. - **global_shape** (Tensor) - the shape = 1D :math:`(2)`. 'None' is currently supported. A Tensor of types: int64. Returns: - **r** (Tensor) - A Tensor of shape :math:`(..., n)` - **c** (Tensor) - A Tensor of shape :math:`(..., m)` - **u** (Tensor) - A Tensor of of shape:math:`(..., n, m)` - **sum_square_u** (Tensor) - A Tensor of of shape:math:`(1)` Raises: TypeError: If `grad` is not a Tensor. Supported Platforms: ``Ascend`` Examples: >>> import mindspore as ms >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore.ops.operations import _inner_ops as P >>> apply_came_part2 = P.ApplyCamePart2() >>> grad = Tensor(np.ones([1024, 64]), dtype=ms.float32) >>> sum_grad_r = Tensor(np.ones([1024]), dtype=ms.float32) >>> sum_grad_c = Tensor(np.ones([64]), dtype=ms.float32) >>> sum_grad_rc = Tensor(np.array([64]), dtype=ms.float32) >>> r = Tensor(np.ones([1024]), dtype=ms.float32) >>> c = Tensor(np.ones([64]), dtype=ms.float32) >>> beta2 = 0.5 >>> sum_r = Tensor(np.array([128]), dtype=ms.float32) >>> global_shape = (1024, 64) >>> output = apply_came_part2(grad, sum_grad_r, sum_grad_c, sum_grad_rc, r, c, beta2, sum_r, global_shape) >>> print(output[0].shape) (1024,) """ __mindspore_signature__ = ( sig.make_sig('grad'), sig.make_sig('sum_grad_r'), sig.make_sig('sum_grad_c'), sig.make_sig('sum_grad_rc'), sig.make_sig('r'), sig.make_sig('c'), sig.make_sig('beta2'), sig.make_sig('sum_r', default=None), sig.make_sig('global_shape', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, grad, sum_grad_r, sum_grad_c, sum_grad_rc, r, c, beta2, sum_r=None, global_shape=None): return super().__call__(grad, sum_grad_r, sum_grad_c, sum_grad_rc, r, c, beta2, sum_r, global_shape) apply_came_part2_op=ApplyCamePart2() class ApplyCamePart3(Primitive): r""" Computes Part 3 of the CAME Optimizer. Args: - **u** (Parameter) - The shape = 2D :math:`(..., n, m)`. A Tensor of types: float16, float32, bfloat16. - **m** (Parameter) - The shape = 2D :math:`(..., n, m)`. A Tensor of types: float16, float32, bfloat16. - **eps** (float) - data type must be float. - **beta1** (float) - data type must be float. - **clip_threshold** (float) - data type must be float. - **sum_square_u** (Tensor) - The shape = 1D :math:`(1)`. A Tensor of types: float32. - **global_shape** (Tensor) - the shape = 1D :math:`(2)`. 'None' is currently supported. A Tensor of types: int64. - **use_first_moment** (bool). Returns: - **m** (Tensor) - A Tensor of shape :math:`(..., n, m)` - **sum_u_r** (Tensor) - A Tensor of shape :math:`(..., n)` - **sum_u_c** (Tensor) - A Tensor of of shape:math:`(..., m)` - **sum_u_rc** (Tensor) - A Tensor of of shape:math:`(...)` Raises: TypeError: If `u` is not a Tensor. Supported Platforms: ``Ascend`` Examples: >>> import mindspore as ms >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore.ops.operations import _inner_ops as P >>> apply_came_part3 = P.ApplyCamePart3() >>> u = Tensor(np.ones([1024, 64]), dtype=ms.float32) >>> m = Tensor(np.ones([1024, 64]), dtype=ms.float32) >>> eps = 0.8 >>> beta1 = 0.5 >>> clip_threshold = 0.5 >>> sum_square_u = Tensor(np.array([128]), dtype=ms.float32) >>> global_shape = (1024, 64) >>> use_first_moment = False >>> output = apply_came_part3(u, m, eps, beta1, clip_threshold, sum_square_u, global_shape, use_first_moment) >>> print(output[0].shape) (1024, 64) """ __mindspore_signature__ = ( sig.make_sig('u'), sig.make_sig('m'), sig.make_sig('eps'), sig.make_sig('beta1'), sig.make_sig('clip_threshold'), sig.make_sig('sum_square_u'), sig.make_sig('global_shape', default=None), sig.make_sig('use_first_moment', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, u, m, eps, beta1, clip_threshold, sum_square_u, global_shape=None, use_first_moment=False): return super().__call__(u, m, eps, beta1, clip_threshold, sum_square_u, global_shape, use_first_moment) apply_came_part3_op=ApplyCamePart3() class ApplyCamePart4(Primitive): r""" Computes Part 4 of the CAME Optimizer. Args: - **param** (Parameter) - The shape = 2D :math:`(..., n, m)`. A Tensor of types: float16, float32, bfloat16. - **m** (Parameter) - The shape = 2D :math:`(..., n, m)`. The Tensor has the same data type as `param`. - **r** (Tensor) - The shape = 1D :math:`(..., n)`. The Tensor has the same data type as `param`. - **c** (Tensor) - The shape = 1D :math:`(..., m)`. The Tensor has the same data type as `param`. - **weight_decay** (Tensor) - The shape = 1D :math:`(1)`. A Tensor of types: float32. - **lr** (Tensor) - The shape = 1D :math:`(1)`. A Tensor of types: float32. - **beta3** (float) - data type must be float. - **sum_r** (Tensor) - The shape = 1D :math:`(..., 1)`. 'None' is currently supported. A Tensor of types: float32. - **sum_u_r** (Tensor) - The shape = 1D :math:`(..., n)`. A Tensor of types: float32. - **sum_u_c** (Tensor) - The shape = 1D :math:`(..., m)`. A Tensor of types: float32. - **sum_u_rc** (Tensor) - The shape = 1D :math:`(...)`. A Tensor of types: float32. - **global_shape** (Tensor) - the shape = 1D :math:`(2)`. 'None' is currently supported. A Tensor of types: int64. Returns: - **param** (Tensor) - A Tensor of shape :math:`(..., n, m)` - **r** (Tensor) - A Tensor of shape :math:`(..., n)` - **c** (Tensor) - A Tensor of of shape:math:`(..., m)` Raises: TypeError: If `param` is not a Tensor. Supported Platforms: ``Ascend`` Examples: >>> import mindspore as ms >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore.ops.operations import _inner_ops as P >>> apply_came_part4 = P.ApplyCamePart4() >>> param = Tensor(np.ones([1024, 64]), dtype=ms.float32) >>> m = Tensor(np.ones([1024, 64]), dtype=ms.float32) >>> r = Tensor(np.ones([1024]), dtype=ms.float32) >>> c = Tensor(np.ones([64]), dtype=ms.float32) >>> weight_decay = Tensor([0.8]) >>> lr = Tensor([0.5]) >>> beta3 = 0.5 >>> sum_r = Tensor(np.array([128.]), dtype=ms.float32) >>> sum_u_r = Tensor(np.ones([1024]), dtype=ms.float32) >>> sum_u_c = Tensor(np.ones([64]), dtype=ms.float32) >>> sum_u_rc = Tensor(np.array([128.]), dtype=ms.float32) >>> global_shape = (1024, 64) >>> output = apply_came_part4(param, m, r, c, weight_decay, lr, beta3, \ ... sum_r, sum_u_r, sum_u_c, sum_u_rc, global_shape) >>> print(output[0].shape) (1024, 64) """ __mindspore_signature__ = ( sig.make_sig('param'), sig.make_sig('m'), sig.make_sig('r'), sig.make_sig('c'), sig.make_sig('weight_decay'), sig.make_sig('lr'), sig.make_sig('beta3'), sig.make_sig('sum_r'), sig.make_sig('sum_u_r'), sig.make_sig('sum_u_c'), sig.make_sig('sum_u_rc'), sig.make_sig('global_shape', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, param, m, r, c, weight_decay, lr, beta3, sum_r, sum_u_r, sum_u_c, sum_u_rc, global_shape=None): return super().__call__(param, m, r, c, weight_decay, lr, beta3, sum_r, sum_u_r, sum_u_c, sum_u_rc, global_shape) apply_came_part4_op=ApplyCamePart4() class ApplyRotaryPosEmb(Primitive): r""" .. code-block:: prim = ops.ApplyRotaryPosEmb(cos_format) out = prim(query, key, cos, sin, position_ids) is equivalent to .. code-block:: ops.apply_rotary_pos_emb_(query, key, cos, sin, position_ids, cos_format) Refer to :func:`mindspore.ops.apply_rotary_pos_emb_` for more details. """ @prim_arg_register def __init__(self, cos_format=0): self._set_prim_arg("cos_format", cos_format) def __call__(self, query, key, cos, sin, position_ids): return super().__call__(query, key, cos, sin, position_ids, self.cos_format) class Arange(Primitive): r""" Creates a sequence of numbers that begins at `start` and extends by increments of `step` up to but not including `end`. Inputs: start (number): The first number in the sequence. Must have type: int32 ,int64, float32, or float64. end (number): Upper end of the sequence, exclusive. Must have type: int32 ,int64, float32, or float64. step (number): Number that increments `start`. Must have type: int32 ,int64, float32, or float64. dtype (mindspore.dtype, optional): Specified dtype of the result tensor. Default: ``None`` . Supported values are: int32, int64, float32, float64, and bfloat16. Outputs: A 1-D Tensor with the required dtype. When dtype is ``None``, then: If `start`, `end` and `step` are all integers, the Tensor dtype is int64. If at least one of `start`, `end` and `step` is floating-point numbers, the Tensor dtype is float32. Raises: TypeError: If the datatype of `start`, `end` or `step` is not supported. ValueError: If `step` = 0. ValueError: If `start` >= `end` when `step` > 0. ValueError: If `start` <= `end` when `step` < 0. Supported Platforms: `Ascend` Examples: >>> from mindspore import ops >>> start = 0 >>> end = 10 >>> step = 4 >>> net = ops.Arange() >>> output = net(start, end, step) >>> print(output) [0 4 8] """ __mindspore_signature__ = ( sig.make_sig('start', dtype=sig.sig_dtype.T), sig.make_sig('end', dtype=sig.sig_dtype.T), sig.make_sig('step', dtype=sig.sig_dtype.T), sig.make_sig('dtype', dtype=sig.sig_dtype.T1, default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, start, end, step, dtype=None): return _convert_stub(pyboost_arange(self, [start, end, step, dtype if dtype is None else dtype_to_type_id('Arange', 'dtype', dtype)])) arange_op=Arange() class ArgMaxExt(Primitive): r""" .. code-block:: prim = ops.ArgMaxExt() out = prim(input, dim, keepdim) is equivalent to .. code-block:: ops.argmax_ext(input, dim, keepdim) Refer to :func:`mindspore.ops.argmax_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=None), sig.make_sig('keepdim', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=None, keepdim=False): return _convert_stub(pyboost_argmax_ext(self, [input, dim, keepdim])) argmax_ext_op=ArgMaxExt()
[docs]class Argmax(Primitive): r""" Returns the indices of the maximum value along a specified `axis` of a Tensor. Refer to :func:`mindspore.ops.argmax` for more details. Args: axis (int): Axis where the Argmax operation applies to. Default: ``-1`` . output_type (:class:`mindspore.dtype`): Output data type. Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` . Inputs: - **input_x** (Tensor) - The input tensor. :math:`(N, *)` where :math:`*` means, any number of additional dimensions. Outputs: Tensor, indices of the max value of input tensor across the axis. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32)) >>> output = ops.Argmax(output_type=mindspore.int32)(input_x) >>> print(output) [1 0 0] """ @prim_arg_register def __init__(self, axis=-1, output_type=mstype.int32): self._set_prim_arg("axis", axis) self._set_prim_arg_with_handler("output_type", output_type, dtype_to_type_id) def __call__(self, input_x): return super().__call__(input_x, self.axis, self.output_type)
[docs]class ArgMaxWithValue(Primitive): r""" Calculates the maximum value along with the given axis for the input tensor, and returns the maximum values and indices. Note: In auto_parallel and semi_auto_parallel mode, the first output index can not be used. .. warning:: - If there are multiple maximum values, the index of the first maximum value is used. - The value range of `axis` is [-dims, dims - 1]. "dims" is the dimension length of `input`. Also see :func:`mindspore.ops.max`. Args: axis (int): The dimension to reduce. Default: ``0`` . keep_dims (bool): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the input, the output will reduce dimension if ``false`` . Default: ``False`` . Inputs: - **input** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as :math:`(input_1, input_2, ..., input_N)`. Outputs: tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input tensor. - **index** (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keep_dims` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1}, ..., input_N)`. Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1}, ..., input_N)` . - **values** (Tensor) - The maximum value of input tensor, with the same shape as `index`, and same dtype as `input`. Raises: TypeError: If `keep_dims` is not a bool. TypeError: If `axis` is not an int. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32) >>> index, output = ops.ArgMaxWithValue()(input_x) >>> print(index, output) 3 0.7 >>> index, output = ops.ArgMaxWithValue(keep_dims=True)(input_x) >>> print(index, output) [3] [0.7] """ @prim_arg_register def __init__(self, axis=0, keep_dims=False): self._set_prim_arg("axis", axis) self._set_prim_arg("keep_dims", keep_dims) def __call__(self, input): return _convert_stub(pyboost_argmax_with_value(self, [input, self.axis, self.keep_dims]))
class ArgMinExt(Primitive): r""" .. code-block:: prim = ops.ArgMinExt() out = prim(input, dim, keepdim) is equivalent to .. code-block:: ops.argmin_ext(input, dim, keepdim) Refer to :func:`mindspore.ops.argmin_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=None), sig.make_sig('keepdim', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=None, keepdim=False): return _convert_stub(pyboost_argmin_ext(self, [input, dim, keepdim])) argmin_ext_op=ArgMinExt()
[docs]class Argmin(Primitive): r""" Returns the indices of the minimum value along a specified `axis` of a Tensor. If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is :math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`. Args: axis (int): Axis where the Argmin operation applies to. Default: ``-1`` . output_type (:class:`mindspore.dtype`): Output data type. Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` . Inputs: - **input_x** (Tensor) - Input tensor. The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions. Outputs: Tensor, which is the minimum index in the specified axis of input Tensor. Raises: TypeError: If `axis` is not an int. TypeError: If `output_type` is neither int32 nor int64. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32) >>> index = ops.Argmin()(input_x) >>> print(index) 2 """ @prim_arg_register def __init__(self, axis=-1, output_type=mstype.int32): self._set_prim_arg("axis", axis) self._set_prim_arg_with_handler("output_type", output_type, dtype_to_type_id) def __call__(self, x): return super().__call__(x, self.axis, self.output_type)
[docs]class ArgMinWithValue(Primitive): r""" Calculates the minimum value along with the given axis for the input tensor, and returns the minimum values and indices. Note: In auto_parallel and semi_auto_parallel mode, the first output index can not be used. .. warning:: - If there are multiple minimum values, the index of the first minimum value is used. - The value range of `axis` is [-dims, dims - 1]. "dims" is the dimension length of `input`. Also see :func:`mindspore.ops.min`. Args: axis (int): The dimension to reduce. Default: ``0`` . keep_dims (bool): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the input, the output will reduce dimension if ``false`` . Default: ``False`` . Inputs: - **input** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as :math:`(input_1, input_2, ..., input_N)` .Complex tensor is not supported. Outputs: tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input tensor. - **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int64. If `keep_dims` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1}, ..., input_N)`. Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1}, ..., input_N)` . - **values** (Tensor) - The minimum value of input tensor, with the same shape as `index`, and same dtype as `input`. Raises: TypeError: If `input` is not Tensor. TypeError: If `keep_dims` is not a bool. TypeError: If `axis` is not an int. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32) >>> index, output = ops.ArgMinWithValue()(x) >>> print(index, output) 0 0.0 >>> index, output = ops.ArgMinWithValue(keep_dims=True)(x) >>> print(index, output) [0] [0.0] """ @prim_arg_register def __init__(self, axis=0, keep_dims=False): self._set_prim_arg("axis", axis) self._set_prim_arg("keep_dims", keep_dims) def __call__(self, input): return _convert_stub(pyboost_argmin_with_value(self, [input, self.axis, self.keep_dims]))
class ArgSort(Primitive): r""" .. code-block:: prim = ops.ArgSort() out = prim(input, dim, descending) is equivalent to .. code-block:: ops.argsort_ext(input, dim, descending) Refer to :func:`mindspore.ops.argsort_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=-1), sig.make_sig('descending', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=-1, descending=False): return _convert_stub(pyboost_argsort(self, [input, dim, descending])) argsort_op=ArgSort() class AsinExt(Primitive): r""" .. code-block:: prim = ops.AsinExt() out = prim(input) is equivalent to .. code-block:: ops.asin_ext(input) Refer to :func:`mindspore.ops.asin_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_asin_ext(self, [input])) asin_ext_op=AsinExt() class AsinGrad(Primitive): r""" Computes AsinGrad of input element-wise. Returns: Tensor, has the same type as input. """ @prim_arg_register def __init__(self): pass def __call__(self, x, dout): return super().__call__(x, dout) asin_grad_op=AsinGrad()
[docs]class Asin(Primitive): r""" .. code-block:: prim = ops.Asin() out = prim(input) is equivalent to .. code-block:: ops.asin(input) Refer to :func:`mindspore.ops.asin` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
asin_op=Asin() class AsinhExt(Primitive): r""" .. code-block:: prim = ops.AsinhExt() out = prim(input) is equivalent to .. code-block:: ops.asinh_ext(input) Refer to :func:`mindspore.ops.asinh_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_asinh_ext(self, [input])) asinh_ext_op=AsinhExt() class AsinhGrad(Primitive): r""" Performs grad of Asinh operation. """ @prim_arg_register def __init__(self): pass def __call__(self, out, dout): return super().__call__(out, dout) asinh_grad_op=AsinhGrad()
[docs]class Asinh(Primitive): r""" .. code-block:: prim = ops.Asinh() out = prim(input) is equivalent to .. code-block:: ops.asinh(input) Refer to :func:`mindspore.ops.asinh` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
asinh_op=Asinh()
[docs]class AssignAdd(Primitive): r""" .. code-block:: prim = ops.AssignAdd() out = prim(variable, value) is equivalent to .. code-block:: ops.assign_add(variable, value) Refer to :func:`mindspore.ops.assign_add` for more details. """ __mindspore_signature__ = ( sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), sig.make_sig('value', dtype=sig.sig_dtype.T), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, variable, value): return super().__call__(variable, value)
assign_add_op=AssignAdd()
[docs]class Assign(Primitive): r""" .. code-block:: prim = ops.Assign() out = prim(variable, value) is equivalent to .. code-block:: ops.assign(variable, value) Refer to :func:`mindspore.ops.assign` for more details. """ __mindspore_signature__ = ( sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), sig.make_sig('value', dtype=sig.sig_dtype.T), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, variable, value): return super().__call__(variable, value)
assign_op=Assign() class Atan2Ext(Primitive): r""" .. code-block:: prim = ops.Atan2Ext() out = prim(input, other) is equivalent to .. code-block:: ops.atan2_ext(input, other) Refer to :func:`mindspore.ops.atan2_ext` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_atan2_ext(self, [input, other])) atan2_ext_op=Atan2Ext()
[docs]class Atan2(Primitive): r""" .. code-block:: prim = ops.Atan2() out = prim(input, other) is equivalent to .. code-block:: ops.atan2(input, other) Refer to :func:`mindspore.ops.atan2` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return super().__call__(input, other)
atan2_op=Atan2() class AtanExt(Primitive): r""" .. code-block:: prim = ops.AtanExt() out = prim(input) is equivalent to .. code-block:: ops.atan_ext(input) Refer to :func:`mindspore.ops.atan_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_atan_ext(self, [input])) atan_ext_op=AtanExt() class AtanGrad(Primitive): r""" Computes AtanGrad of input element-wise. Returns: Tensor, has the same type as input. """ @prim_arg_register def __init__(self): pass def __call__(self, x, dout): return super().__call__(x, dout) atan_grad_op=AtanGrad()
[docs]class Atan(Primitive): r""" .. code-block:: prim = ops.Atan() out = prim(input) is equivalent to .. code-block:: ops.atan(input) Refer to :func:`mindspore.ops.atan` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
atan_op=Atan()
[docs]class Atanh(Primitive): r""" .. code-block:: prim = ops.Atanh() out = prim(input) is equivalent to .. code-block:: ops.atanh(input) Refer to :func:`mindspore.ops.atanh` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_atanh(self, [input]))
atanh_op=Atanh() class AvgPool2DGrad(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('grad'), sig.make_sig('image'), sig.make_sig('kernel_size'), sig.make_sig('stride'), sig.make_sig('padding', default=0), sig.make_sig('ceil_mode', default=False), sig.make_sig('count_include_pad', default=True), sig.make_sig('divisor_override', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, grad, image, kernel_size, stride, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None): return _convert_stub(pyboost_avg_pool2d_grad(self, [grad, image, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override])) avg_pool2d_grad_op=AvgPool2DGrad() class AvgPool2D(Primitive): r""" Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes. Typically the input is of shape :math:`(N, C, H_{in}, W_{in})` , outputs regional average in the :math:`(H_{in}, W_{in})` -dimension. Given kernel size :math:`(kH, kW)` and `stride` , the operation is as follows. .. math:: \text{output}(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \text{input}(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n) Inputs: input (Tensor): Tensor of shape :math:`(N, C, H_{in}, W_{in})` . kernel_size (Union[int, tuple[int], list[int]]): The size of kernel used to take the average value. Can be a single number or a tuple :math:`(kH, kW)` . stride (Union[int, tuple[int], list[int]]): The distance of kernel moving. Can be a single number or a tuple :math:`(sH, sW)` . padding (Union(int, tuple[int], list[int])): Implicit zero padding to be added on both sides. Can be a single number or a tuple :math:`(padH, padW)` . Default: 0. ceil_mode (bool): If True, apply ceil instead of floor to compute the output shape. Default: ``False``. count_include_pad (bool): If True, include the zero-padding in the averaging calculation. Default: ``True`` . divisor_override (int): If specified, it will be used as divisor in the averaging calculation, otherwise `kernel_size` will be used. Default: ``None``. Outputs: Tensor, with shape :math:`(N, C, H_{out}, W_{out})`. .. math:: H_{out} = \frac{H_{in} + 2 \times padding[0] - kernel_size[0]}{stride[0]} + 1 W_{out} = \frac{W_{in} + 2 \times padding[1] - kernel_size[1]}{stride[1]} + 1 Raises: TypeError: If `input` is not a Tensor. TypeError: If `kernel_size` or `stride` is neither int nor tuple. TypeError: If `ceil_mode` or `count_include_pad` is not a bool. TypeError: If `divisor_override` is not an int or None. ValueError: If the dimension of `input` is not equal to `3` or `4`. ValueError: If `kernel_size` or `stride` is less than 1. ValueError: If value of `padding` is less than `0`. ValueError: If `kernel_size`, `padding` or `stride` is a tuple whose length is not equal to `1` or `2`. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32) >>> output = ops.auto_generate.AvgPool2D()(x, 2, 1) >>> print(output) [[[[ 2.5 3.5 4.5] [ 6.5 7.5 8.5]] [[14.5 15.5 16.5] [18.5 19.5 20.5]] [[26.5 27.5 28.5] [30.5 31.5 32.5]]]] """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('kernel_size'), sig.make_sig('stride'), sig.make_sig('padding', default=0), sig.make_sig('ceil_mode', default=False), sig.make_sig('count_include_pad', default=True), sig.make_sig('divisor_override', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, kernel_size, stride, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None): return _convert_stub(pyboost_avg_pool2d(self, [input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override])) avg_pool2d_op=AvgPool2D() class AvgPoolGrad(Primitive): r""" Gradients of the avg pool operation. """ @prim_arg_register def __init__(self, kernel_size=1, strides=1, pad_mode='VALID', data_format='NCHW'): self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size) self._set_prim_arg_with_handler("strides", strides, to_strides) self._set_prim_arg_with_handler("pad_mode", pad_mode, str_to_enum) self._set_prim_arg_with_handler("data_format", data_format, str_to_enum) def __call__(self, x, out, dout): return super().__call__(x, out, dout, self.kernel_size, self.strides, self.pad_mode, self.data_format)
[docs]class AvgPool(Primitive): r""" Average pooling operation. Refer to :func:`mindspore.ops.avg_pool2d` for more details. Args: kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value, is an int number that represents height and width of the kernel, or a tuple of two int numbers that represent height and width respectively. Default: ``1`` . strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents the height and width of movement are both strides, or a tuple of two int numbers that represent height and width of movement respectively. Default: ``1`` . pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to: ``"SAME"`` or ``"VALID"`` . Default: ``"VALID"`` . - ``"SAME"``: Pad the input around its edges so that the shape of input and output are the same when `stride` is set to ``1``. The amount of padding to is calculated by the operator internally, If the amount is even, it is uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side. - ``"valid"``: No padding is applied to the input, and the output returns the maximum possible height and width. Extra pixels that could not complete a full stride will be discarded. data_format (str, optional): The format of input and output data. It should be ``'NHWC'`` or ``'NCHW'`` . Default: ``'NCHW'`` . Inputs: - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. Supported dtypes: float16, float32, float64. Outputs: Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`. Raises: TypeError: If `kernel_size` or `strides` is neither int nor tuple. TypeError: If dtype of `x` is not float16, float32 or float64. ValueError: If `kernel_size` or `strides` is less than 1. ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive. ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'. ValueError: If length of shape of `x` is not equal to 4. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops, nn >>> class Net(nn.Cell): ... def __init__(self): ... super(Net, self).__init__() ... self.avgpool_op = ops.AvgPool(pad_mode='VALID', kernel_size=2, strides=1) ... ... def construct(self, x): ... result = self.avgpool_op(x) ... return result ... >>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32) >>> net = Net() >>> output = net(x) >>> print(output) [[[[ 2.5 3.5 4.5] [ 6.5 7.5 8.5]] [[14.5 15.5 16.5] [18.5 19.5 20.5]] [[26.5 27.5 28.5] [30.5 31.5 32.5]]]] """ @prim_arg_register def __init__(self, kernel_size=1, strides=1, pad_mode='VALID', data_format='NCHW'): self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size) self._set_prim_arg_with_handler("strides", strides, to_strides) self._set_prim_arg_with_handler("pad_mode", pad_mode, str_to_enum) self._set_prim_arg_with_handler("data_format", data_format, str_to_enum) def __call__(self, x): return super().__call__(x, self.kernel_size, self.strides, self.pad_mode, self.data_format)
class Baddbmm(Primitive): r""" .. code-block:: prim = ops.Baddbmm() out = prim(input, batch1, batch2, beta, alpha) is equivalent to .. code-block:: ops.baddbmm(input, batch1, batch2, beta, alpha) Refer to :func:`mindspore.ops.baddbmm` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, batch1, batch2, beta, alpha): return _convert_stub(pyboost_baddbmm(self, [input, batch1, batch2, beta, alpha])) baddbmm_op=Baddbmm()
[docs]class BatchMatMul(Primitive): r""" Computes matrix multiplication between two tensors by batch. .. math:: \text{output}[..., :, :] = \text{matrix}(x[..., :, :]) * \text{matrix}(y[..., :, :]) The rank of both two input tensors must be same and not less than `2`. Args: transpose_a (bool): If ``True`` , the last two dimensions of `x` is transposed before multiplication. Default: ``False`` . transpose_b (bool): If ``True`` , the last two dimensions of `y` is transposed before multiplication. Default: ``False`` . Inputs: - **x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`, where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the size of the last two dimensions. If `transpose_a` is ``True`` , its shape must be :math:`(*B, C, N)`. - **y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If `transpose_b` is ``True`` , its shape must be :math:`(*B, M, C)`. Outputs: Tensor, the shape of the output tensor is :math:`(*B, N, M)`. Raises: TypeError: If `transpose_a` or `transpose_b` is not a bool. ValueError: If length of shape of `x` is not equal to length of shape of `y` or length of shape of inputs is less than 2. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32) >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32) >>> batmatmul = ops.BatchMatMul() >>> output = batmatmul(x, y) >>> print(output.shape) (2, 4, 1, 4) >>> x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32) >>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32) >>> batmatmul = ops.BatchMatMul(transpose_a=True) >>> output = batmatmul(x, y) >>> print(output.shape) (2, 4, 1, 4) """ @prim_arg_register def __init__(self, transpose_a=False, transpose_b=False): self._set_prim_arg("transpose_a", transpose_a) self._set_prim_arg("transpose_b", transpose_b) def __call__(self, x, y): return _convert_stub(pyboost_batch_mat_mul(self, [x, y, self.transpose_a, self.transpose_b]))
class BatchNormExt(Primitive): r""" Batch Normalization for input data and updated parameters. Batch Normalization is widely used in convolutional neural networks. This operation applies Batch Normalization over inputs to avoid internal covariate shift as described in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the features using a mini-batch of data and the learned parameters can be described in the following formula, .. math:: y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta where :math:`\gamma` is weight, :math:`\beta` is bias, :math:`\epsilon` is epsilon, :math:`mean` is the mean of :math:`x`, :math:`variance` is the variance of :math:`x`. Args: input (Tensor): Tensor of shape :math:`(N, C, *)`, where :math:`*` means, any number of additional dimensions. with bfloat16, float16 or float32 data type. For Atlas training products, the shape must be 2-4 dimensions currently. weight (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type. bias (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type. running_mean (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type. running_var (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type. training (bool, optional): If `training` is ``True`` , `mean` and `variance` are computed during training. If `training` is ``False`` , they're loaded from checkpoint during inference. Default: ``False`` . momentum (float, optional): The hyper parameter to compute moving average for running_mean and running_var (e.g. :math:`new\_running\_mean = (1 - momentum) * running\_mean + momentum * current\_mean`). Default: ``0.1`` epsilon (float, optional): A small value added for numerical stability. Default: ``1e-5``. returns: Tensor, the normalized inputs, has the same shape and dtype as `input`. Raises: TypeError: If `training` is not a bool. TypeError: If dtype of `epsilon` or `momentum` is not float. TypeError: If `input`, `weight`, `bias`, `running_mean` or `running_var` is not a Tensor. TypeError: If dtype of `input`, `weight` is not bfloat16, float16 or float32. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.ones([2, 2]), mindspore.float32) >>> weight = Tensor(np.ones([2]), mindspore.float32) >>> bias = Tensor(np.ones([2]), mindspore.float32) >>> running_mean = Tensor(np.ones([2]), mindspore.float32) >>> running_var = Tensor(np.ones([2]), mindspore.float32) >>> output = ops.batch_norm_ext(input_x, weight, bias, running_mean, running_var) >>> print(output) [[1. 1.] [1. 1.]] """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('weight'), sig.make_sig('bias'), sig.make_sig('running_mean', default=None), sig.make_sig('runnning_var', default=None), sig.make_sig('training', default=False), sig.make_sig('momentum', default=0.1), sig.make_sig('epsilon', default=1e-5), ) @prim_arg_register def __init__(self): pass def __call__(self, input, weight, bias, running_mean=None, runnning_var=None, training=False, momentum=0.1, epsilon=1e-5): return _convert_stub(pyboost_batch_norm_ext(self, [input, weight, bias, running_mean, runnning_var, training, momentum, epsilon])) batch_norm_ext_op=BatchNormExt() class BatchNormGradExt(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('dout'), sig.make_sig('input'), sig.make_sig('weight'), sig.make_sig('running_mean', default=None), sig.make_sig('running_var', default=None), sig.make_sig('saved_mean', default=None), sig.make_sig('saved_rstd', default=None), ) @prim_arg_register def __init__(self, training=False, eps=1e-5): self._set_prim_arg("training", training) self._set_prim_arg("eps", eps) def __call__(self, dout, input, weight, running_mean=None, running_var=None, saved_mean=None, saved_rstd=None): return _convert_stub(pyboost_batch_norm_grad_ext(self, [dout, input, weight, running_mean, running_var, saved_mean, saved_rstd, self.training, self.eps])) class BatchNormGradGrad(Primitive): r""" Performs grad of BatchNormGrad operation. """ @prim_arg_register def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'): self._set_prim_arg("is_training", is_training) self._set_prim_arg("epsilon", epsilon) self._set_prim_arg_with_handler("data_format", data_format, str_to_enum) def __call__(self, x, dy, scale, saved_mean, saved_variance, dout_dx, dout_dscale, dout_dbias): return super().__call__(x, dy, scale, saved_mean, saved_variance, dout_dx, dout_dscale, dout_dbias, self.is_training, self.epsilon, self.data_format) class BatchNormGrad(Primitive): r""" Performs grad of BatchNorm operation. """ @prim_arg_register def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'): self._set_prim_arg("is_training", is_training) self._set_prim_arg("epsilon", epsilon) self._set_prim_arg_with_handler("data_format", data_format, str_to_enum) def __call__(self, dout, x, scale, saved_mean, saved_variance, reserve): return super().__call__(dout, x, scale, saved_mean, saved_variance, reserve, self.is_training, self.epsilon, self.data_format) class BernoulliExt(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, seed, offset): return _convert_stub(pyboost_bernoulli_ext(self, [input, seed, offset])) bernoulli_ext_op=BernoulliExt() class Betainc(Primitive): r""" Calculates the regularized incomplete beta function :math:`I_{x}(a, b)`. It is defined as the ratio of the incomplete beta function to the complete beta function: .. math:: I_{x}(a, b)=\frac{B(x ; a, b)}{B(a, b)} where .. math:: B(x ; a, b)=\int_{0}^{x} t^{a-1}(1-t)^{b-1} dt is the incomplete beta function and .. math:: B(a, b) = \int_0^1 t^{a-1} (1-t)^{b-1} dt is the complete beta function. Inputs: - **a** (Tensor) - Peak location of beta distribution. A Tensor of types: float32, float64. - **b** (Tensor) - Spread of the beta distribution. A Tensor, must have the same dtype and shape as `a` . - **x** (Tensor) - Upper limit of integration of the incomplete beta function. A Tensor, must have the same dtype and shape as `a` . Outputs: A Tensor, has the same dtype and shape as `a` . Raises: TypeError: If dtype of `a` is not float32 nor float64. TypeError: If either dtype of `b` and `x` is not the same as the `a`. ValueError: If either shape of `b` and `x` is not the same as the `a`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> a = Tensor(np.array([0.3, 0.1, 0.4]), mindspore.float32) >>> b = Tensor(np.array([0.4, 0.5, 0.9]), mindspore.float32) >>> x = Tensor(np.array([0.2, 0.6, 0.5]), mindspore.float32) >>> betainc = ops.Betainc() >>> print(betainc(a, b, x)) [0.41462693 0.8706035 0.7298298 ] """ @prim_arg_register def __init__(self): pass def __call__(self, a, b, x): return super().__call__(a, b, x) betainc_op=Betainc() class BiasAddGrad(Primitive): r""" Computes gradients of BiasAdd. """ @prim_arg_register def __init__(self, data_format='NCHW'): self._set_prim_arg_with_handler("data_format", data_format, str_to_enum) def __call__(self, dout): return super().__call__(dout, self.data_format)
[docs]class BiasAdd(Primitive): r""" Returns the sum of the input Tensor and the bias Tensor. Before adding, the bias Tensor will be broadcasted to be consistent with the shape of the input Tensor. Args: data_format (str, optional): The format of input and output data. It should be ``"NHWC"`` , ``"NCHW"`` or ``"NCDHW"`` . Default is ``"NCHW"`` . Inputs: - **input_x** (Tensor) - The input tensor. The shape can be 2-5 dimensions. Supported dtypes: - Ascend/CPU: all Number type. - GPU: float16, float32, int8. - **bias** (Tensor) - The bias tensor, with shape :math:`(C)`. C must be the same as channel dimension C of `input_x`. It has the same type as `input_x`. Outputs: Tensor, with the same shape and data type as `input_x`. Raises: TypeError: If `data_format` is not a str. ValueError: If value of `data_format` is not in the range of ['NHWC','NCHW','NCDHW']. TypeError: If `input_x` or `bias` is not a Tensor. TypeError: If dtype of `input_x` or `bias` is inconsistent. TypeError: If dimension of `input_x` is not in the range [2, 5]. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32) >>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32) >>> bias_add = ops.BiasAdd() >>> output = bias_add(input_x, bias) >>> print(output.shape) (2, 3) """ @prim_arg_register def __init__(self, data_format='NCHW'): self._set_prim_arg_with_handler("data_format", data_format, str_to_enum) def __call__(self, input_x, bias): return super().__call__(input_x, bias, self.data_format)
class BinaryCrossEntropyGrad(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('target'), sig.make_sig('grad_output'), sig.make_sig('weight', default=None), ) @prim_arg_register def __init__(self, reduction='mean'): self._set_prim_arg_with_handler("reduction", reduction, str_to_enum) def __call__(self, input, target, grad_output, weight=None): return _convert_stub(pyboost_binary_cross_entropy_grad(self, [input, target, grad_output, weight, self.reduction]))
[docs]class BinaryCrossEntropy(Primitive): r""" Computes the binary cross entropy between the logits and the labels. Sets logits as :math:`x`, labels as :math:`y`, output as :math:`\ell(x, y)`. Let, .. math:: L = \{l_1,\dots,l_N\}^\top, \quad l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right] In which, :math:`L` indicates the loss of all batch_sizes, :math:`l` indicates the loss of one batch_size, and n indicates one batch_size in the 1-N range, :math:`w_n` indicates the weight of :math:`n`-th batch of binary cross entropy. Then, .. math:: \ell(x, y) = \begin{cases} L, & \text{if reduction} = \text{'none';}\\ \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} \end{cases} .. warning:: - The value of :math:`x` must range from 0 to 1. Args: reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` , ``'sum'`` . Default: ``'mean'`` . - ``'none'``: no reduction will be applied. - ``'mean'``: compute and return the weighted mean of elements in the output. - ``'sum'``: the output elements will be summed. Inputs: - **logits** (Tensor) - The predictive value whose data type must be float16 or float32, The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions. - **labels** (Tensor) - The target value which has the same shape and data type as `logits`. And the data type is float16 or float32. - **weight** (Tensor, optional) - A rescaling weight applied to the loss of each batch element. And it must have the same shape and data type as `logits`. Default: ``None`` . Outputs: Tensor or Scalar. Returns Tensor that has the same dtype and shape as `logits` if `reduction` is 'none'. Otherwise, returns a scalar Tensor. Raises: TypeError: If dtype of `logits`, `labels` or `weight` (if given) is neither float16 nor float32. ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``. ValueError: If shape of `labels` is not the same as `logits` or `weight` (if given). TypeError: If `logits`, `labels` or `weight` is not a Tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, nn, ops >>> class Net(nn.Cell): ... def __init__(self): ... super(Net, self).__init__() ... self.binary_cross_entropy = ops.BinaryCrossEntropy() ... def construct(self, logits, labels, weight): ... result = self.binary_cross_entropy(logits, labels, weight) ... return result ... >>> net = Net() >>> logits = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32) >>> labels = Tensor(np.array([0., 1., 0.]), mindspore.float32) >>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32) >>> output = net(logits, labels, weight) >>> print(output) 0.38240486 """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('target'), sig.make_sig('weight', default=None), ) @prim_arg_register def __init__(self, reduction='mean'): self._set_prim_arg_with_handler("reduction", reduction, str_to_enum) def __call__(self, input, target, weight=None): return _convert_stub(pyboost_binary_cross_entropy(self, [input, target, weight, self.reduction]))
class BinaryCrossEntropyWithLogitsBackward(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('grad_output'), sig.make_sig('input'), sig.make_sig('target'), sig.make_sig('weight', default=None), sig.make_sig('posWeight', default=None), sig.make_sig('reduction', default='mean'), ) @prim_arg_register def __init__(self): pass def __call__(self, grad_output, input, target, weight=None, posWeight=None, reduction='mean'): return _convert_stub(pyboost_binary_cross_entropy_with_logits_backward(self, [grad_output, input, target, weight, posWeight, str_to_enum('BinaryCrossEntropyWithLogitsBackward', 'reduction', reduction)])) binary_cross_entropy_with_logits_backward_op=BinaryCrossEntropyWithLogitsBackward()
[docs]class BCEWithLogitsLoss(Primitive): r""" Adds sigmoid activation function to `input` as logits, and uses the given logits to compute binary cross entropy between the logits and the target. Sets input `input` as :math:`X`, input `target` as :math:`Y`, input weight as :math:`W`, output as :math:`L`. Then, .. math:: \begin{array}{ll} \\ p_{ij} = sigmoid(X_{ij}) = \frac{1}{1 + e^{-X_{ij}}} \\ L_{ij} = -[Y_{ij}log(p_{ij}) + (1 - Y_{ij})log(1 - p_{ij})] \end{array} :math:`i` indicates the :math:`i^{th}` sample, :math:`j` indicates the category. Then, .. math:: \ell(x, y) = \begin{cases} L, & \text{if reduction} = \text{'none';}\\ \operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{'sum'.} \end{cases} :math:`\ell` indicates the method of calculating the loss. There are three methods: the first method is to provide the loss value directly, the second method is to calculate the average value of all losses, and the third method is to calculate the sum of all losses. This operator will multiply the output by the corresponding weight. The tensor `weight` assigns different weights to each piece of data in the batch, and the tensor `pos_weight` adds corresponding weights to the positive examples of each category. In addition, it can trade off recall and precision by adding weights to positive examples. In the case of multi-label classification the loss can be described as: .. math:: \begin{array}{ll} \\ p_{ij,c} = sigmoid(X_{ij,c}) = \frac{1}{1 + e^{-X_{ij,c}}} \\ L_{ij,c} = -[P_{c}Y_{ij,c} * log(p_{ij,c}) + (1 - Y_{ij,c})log(1 - p_{ij,c})] \end{array} where c is the class number (c>1 for multi-label binary classification, c=1 for single-label binary classification), n is the number of the sample in the batch and :math:`P_c` is the weight of the positive answer for the class c. :math:`P_c>1` increases the recall, :math:`P_c<1` increases the precision. Args: reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` , ``'sum'`` . Default: ``'mean'`` . - ``'none'``: no reduction will be applied. - ``'mean'``: compute and return the weighted mean of elements in the output. - ``'sum'``: the output elements will be summed. Inputs: - **input** (Tensor) - Input `input`. Tensor of shape :math:`(N, *)` where :math:`*` means, any number of additional dimensions. Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported). - **target** (Tensor) - Ground truth label, has the same shape as `input`. Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported). - **weight** (Tensor) - A rescaling weight applied to the loss of each batch element. It can be broadcast to a tensor with shape of `input`. Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported). - **pos_weight** (Tensor) - A weight of positive examples. Must be a vector with length equal to the number of classes. It can be broadcast to a tensor with shape of `input`. Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported). Outputs: Tensor or Scalar, if `reduction` is ``'none'``, it's a tensor with the same shape and type as input `input`. Otherwise, the output is a scalar. Raises: TypeError: If any input is not Tensor. TypeError: If data type of any input is not float16, float32 or bfloat16. TypeError: If data type of `reduction` is not string. ValueError: If `weight` or `pos_weight` can not be broadcast to a tensor with shape of `input`. ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input = Tensor(np.array([[-0.8, 1.2, 0.7], [-0.1, -0.4, 0.7]]), mindspore.float32) >>> target = Tensor(np.array([[0.3, 0.8, 1.2], [-0.6, 0.1, 2.2]]), mindspore.float32) >>> weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32) >>> pos_weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32) >>> loss = ops.BCEWithLogitsLoss() >>> output = loss(input, target, weight, pos_weight) >>> print(output) 0.3463612 """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('target'), sig.make_sig('weight', default=None), sig.make_sig('posWeight', default=None), ) @prim_arg_register def __init__(self, reduction='mean'): self._set_prim_arg_with_handler("reduction", reduction, str_to_enum) def __call__(self, input, target, weight=None, posWeight=None): return _convert_stub(pyboost_binary_cross_entropy_with_logits(self, [input, target, weight, posWeight, self.reduction]))
class BincountExt(Primitive): r""" .. code-block:: prim = ops.BincountExt() out = prim(input, weights, minlength) is equivalent to .. code-block:: ops.bincount_ext(input, weights, minlength) Refer to :func:`mindspore.ops.bincount_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('weights', default=None), sig.make_sig('minlength', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, input, weights=None, minlength=0): return _convert_stub(pyboost_bincount_ext(self, [input, weights, minlength])) bincount_ext_op=BincountExt() class BitwiseAndScalar(Primitive): r""" Returns bitwise `and` of tensor and scalar element-wise. Inputs: - **input** (Tensor) - The input tensor must be of integral or Boolean types. - **other** (number.Number) - The second input scalar with same type as the `input`. Outputs: Tensor, has the same type as the `input`. Supported Platforms: ``Ascend`` """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_bitwise_and_scalar(self, [input, other])) bitwise_and_scalar_op=BitwiseAndScalar() class BitwiseAndTensor(Primitive): r""" Returns bitwise `and` of two tensors element-wise. Inputs: - **input** (Tensor) - The input tensor must be of integral or Boolean types. - **other** (Tensor) - The second input tensor with same type as the `input`. Outputs: Tensor, has the same type as the `input`. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_bitwise_and_tensor(self, [input, other])) bitwise_and_tensor_op=BitwiseAndTensor() class BitwiseOrScalar(Primitive): r""" Returns bitwise `or` of tensor and scalar element-wise. Inputs: - **input** (Tensor) - The input tensor must be of integral or Boolean types. - **other** (number.Number) - The second input scalar with same type as the `input`. Outputs: Tensor, has the same type as the `input`. Supported Platforms: ``Ascend`` """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_bitwise_or_scalar(self, [input, other])) bitwise_or_scalar_op=BitwiseOrScalar() class BitwiseOrTensor(Primitive): r""" Returns bitwise `or` of two tensors element-wise. Inputs: - **input** (Tensor) - The input tensor must be of integral or Boolean types. - **other** (Tensor) - The second input tensor with same type as the `input`. Outputs: Tensor, has the same type as the `input`. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_bitwise_or_tensor(self, [input, other])) bitwise_or_tensor_op=BitwiseOrTensor() class BitwiseXorScalar(Primitive): r""" Returns bitwise `xor` of tensor and scalar element-wise. Inputs: - **input** (Tensor) - The input tensor must be of integral or Boolean types. - **other** (number.Number) - The second input scalar with same type as the `input`. Outputs: Tensor, has the same type as the `input`. Supported Platforms: ``Ascend`` """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_bitwise_xor_scalar(self, [input, other])) bitwise_xor_scalar_op=BitwiseXorScalar() class BitwiseXorTensor(Primitive): r""" Returns bitwise `xor` of two tensors element-wise. Inputs: - **input** (Tensor) - The input tensor must be of integral or Boolean types. - **other** (Tensor) - The second input tensor with same type as the `input`. Outputs: Tensor, has the same type as the `input`. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_bitwise_xor_tensor(self, [input, other])) bitwise_xor_tensor_op=BitwiseXorTensor() class BatchMatMulExt(Primitive): r""" .. code-block:: prim = ops.BatchMatMulExt() out = prim(input, mat2) is equivalent to .. code-block:: ops.bmm_ext(input, mat2) Refer to :func:`mindspore.ops.bmm_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, mat2): return _convert_stub(pyboost_bmm_ext(self, [input, mat2])) bmm_ext_op=BatchMatMulExt() class BoolNot(Primitive): r""" Returns bool_not `not` of bool input. .. note:: The inputs can be constant/variable value. Usage is the same as 'not' in Python. This primitive only have 'CPU' implementation, for other platform, it runs using heterogeneous. Inputs: - **x** (Scalar) - A constant or variable scalar, the type can be bool. Outputs: Scalar, the type is bool. Raises: TypeError: If `x` are not bool scalar. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` """ @prim_arg_register def __init__(self): pass def __call__(self, x): return super().__call__(x) bool_not_op=BoolNot()
[docs]class BroadcastTo(Primitive): r""" .. code-block:: prim = ops.BroadcastTo(shape) out = prim(input) is equivalent to .. code-block:: ops.broadcast_to(input, shape) Refer to :func:`mindspore.ops.broadcast_to` for more details. """ @prim_arg_register def __init__(self, shape): self._set_prim_arg("shape", type_it('BroadcastTo', 'shape', shape, (OpDtype.DT_LIST_INT, OpDtype.DT_TENSOR), OpDtype.DT_TUPLE_INT)) def __call__(self, input): return _convert_stub(pyboost_broadcast_to(self, [input, self.shape]))
[docs]class Ceil(Primitive): r""" .. code-block:: prim = ops.Ceil() out = prim(input) is equivalent to .. code-block:: ops.ceil(input) Refer to :func:`mindspore.ops.ceil` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_ceil(self, [input]))
ceil_op=Ceil()
[docs]class CeLU(Primitive): r""" .. code-block:: prim = ops.CeLU(alpha) out = prim(x) is equivalent to .. code-block:: ops.celu(x, alpha) Refer to :func:`mindspore.ops.celu` for more details. """ @prim_arg_register def __init__(self, alpha=1.0): self._set_prim_arg("alpha", alpha) def __call__(self, x): return super().__call__(x, self.alpha)
class CholeskyGrad(Primitive): r""" Computes the reverse mode backpropgated gradient of the Cholesky algorithm. Args: - **x** (Tensor) - A tensor with float32 or float64 data type. - **grad** (Tensor) - A tensor with float32 or float64 data type. `grad` should have the same dtype with `x`. Outputs: Tensor, has the same dtype as `a` and `x`. Raises: TypeError: If x is not Tensor. TypeError: If grad is not Tensor. TypeError: If dtype of input x and grad is not float64 nor float32, TypeError: If x has different dtype with grad. ValueError: If input tensor's last two dims are not equal, ValueError: If the shape of x and grad mismatch. Supported Platforms: ``Ascend`` """ @prim_arg_register def __init__(self): pass def __call__(self, x, grad): return super().__call__(x, grad) cholesky_grad_op=CholeskyGrad() class CholeskyInverse(Primitive): r""" Returns the inverse of the positive definite matrix using cholesky matrix factorization given its Cholesky factor. Refer to :func:`mindspore.ops.cholesky_inverse` for more details. Args: upper(bool, optional): Whether to return a lower or upper triangular matrix. Default: ``False`` . Inputs: - **x** (Tensor) - The input tensor whose rank is 2. Supported dtypes: float32, float64. Outputs: Tensor, has the same shape and dtype as `x`. Supported Platforms: ``GPU`` ``CPU`` Examples: >>> x = Tensor(np.array([[1, 1], [1, 2]), mindspore.float32) >>> y = ops.CholeskyInverse()(x) >>> print(y) [[ 5.0 -3.0 ] [-3.0 2.0 ]] """ @prim_arg_register def __init__(self, upper=False): self._set_prim_arg("upper", upper) def __call__(self, input_x): return super().__call__(input_x, self.upper)
[docs]class Cholesky(Primitive): r""" Performs the Cholesky decomposition on a single or a batch of symmetric positive-definite matrices. .. warning:: This is an experimental API that is subject to change or deletion. Refer to :func:`mindspore.ops.cholesky` for more details. Args: upper (bool, optional): Flag that indicates whether to return a upper or lower triangular matrix. Default: ``False`` . Inputs: - **input_x** (Tensor) - Tensor of shape :math:`(*, N, N)`, where :math:`*` is zero or more batch dimensions consisting of symmetric positive-definite matrices, with float32 or float64 data type. Outputs: Tensor, has the same shape and data type as `input_x`. Supported Platforms: ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([[1.0, 1.0], [1.0, 2.0]]), mindspore.float32) >>> output = ops.Cholesky()(input_x) >>> print(output) [[1. 0.] [1. 1.]] """ @prim_arg_register def __init__(self, upper=False): self._set_prim_arg("upper", upper) def __call__(self, input_x): return super().__call__(input_x, self.upper)
class Chunk(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('chunks'), sig.make_sig('dim', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, input, chunks, dim=0): return _convert_stub(pyboost_chunk(self, [input, chunks, dim])) chunk_op=Chunk() class ClampScalar(Primitive): r""" .. code-block:: prim = ops.ClampScalar() out = prim(input, min, max) is equivalent to .. code-block:: ops.clamp_scalar(input, min, max) Refer to :func:`mindspore.ops.clamp_scalar` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('min', default=None), sig.make_sig('max', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, min=None, max=None): return _convert_stub(pyboost_clamp_scalar(self, [input, min, max])) clamp_scalar_op=ClampScalar() class ClampTensor(Primitive): r""" .. code-block:: prim = ops.ClampTensor() out = prim(input, min, max) is equivalent to .. code-block:: ops.clamp_tensor(input, min, max) Refer to :func:`mindspore.ops.clamp_tensor` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('min', default=None), sig.make_sig('max', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, min=None, max=None): return _convert_stub(pyboost_clamp_tensor(self, [input, min, max])) clamp_tensor_op=ClampTensor() class Clone(Primitive): r""" .. code-block:: prim = ops.Clone() out = prim(input) is equivalent to .. code-block:: ops.clone(input) Refer to :func:`mindspore.ops.clone` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_clone(self, [input])) clone_op=Clone() class Col2ImExt(Primitive): r""" .. code-block:: prim = ops.Col2ImExt() out = prim(input, output_size, kernel_size, dilation, padding, stride) is equivalent to .. code-block:: ops.fold_ext(input, output_size, kernel_size, dilation, padding, stride) Refer to :func:`mindspore.ops.fold_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('output_size'), sig.make_sig('kernel_size'), sig.make_sig('dilation', default=1), sig.make_sig('padding', default=0), sig.make_sig('stride', default=1), ) @prim_arg_register def __init__(self): pass def __call__(self, input, output_size, kernel_size, dilation=1, padding=0, stride=1): return _convert_stub(pyboost_col2im_ext(self, [input, to_pair('Col2ImExt', 'output_size', output_size), to_pair('Col2ImExt', 'kernel_size', kernel_size), to_pair('Col2ImExt', 'dilation', dilation), to_pair('Col2ImExt', 'padding', padding), to_pair('Col2ImExt', 'stride', stride)])) col2im_ext_op=Col2ImExt() class Col2ImGrad(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('kernel_size'), sig.make_sig('dilation', default=1), sig.make_sig('padding', default=0), sig.make_sig('stride', default=1), ) @prim_arg_register def __init__(self): pass def __call__(self, input, kernel_size, dilation=1, padding=0, stride=1): return _convert_stub(pyboost_col2im_grad(self, [input, to_pair('Col2ImGrad', 'kernel_size', kernel_size), to_pair('Col2ImGrad', 'dilation', dilation), to_pair('Col2ImGrad', 'padding', padding), to_pair('Col2ImGrad', 'stride', stride)])) col2im_grad_op=Col2ImGrad()
[docs]class Complex(Primitive): r""" Returns a complex Tensor from the real part and the imag part. .. warning:: This is an experimental API that is subject to change or deletion. Inputs: - **real** (Tensor) - The real input tensor. types: float32, float64. - **imag** (Tensor) - The imag input tensor. types: float32, float64. Outputs: Tensor, has the complex type. Raises: TypeError: If the dtype of input is not one of: float32, float64. TypeError: If the dtypes of two inputs are not same. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> real = Tensor(np.array([1]), mindspore.float32) >>> imag = Tensor(np.array([2]), mindspore.float32) >>> complex = ops.Complex() >>> output = complex(real, imag) >>> print(output) [1.+2.j] """ @prim_arg_register def __init__(self): pass def __call__(self, real, imag): return super().__call__(real, imag)
complex_op=Complex()
[docs]class Concat(Primitive): r""" .. code-block:: prim = ops.Concat(axis) out = prim(tensors) is equivalent to .. code-block:: ops.cat(tensors, axis) Refer to :func:`mindspore.ops.cat` for more details. """ @prim_arg_register def __init__(self, axis=0): self._set_prim_arg("axis", axis) def __call__(self, tensors): return _convert_stub(pyboost_concat(self, [tensors, self.axis]))
[docs]class Conj(Primitive): r""" .. code-block:: prim = ops.Conj() out = prim(input) is equivalent to .. code-block:: ops.conj(input) Refer to :func:`mindspore.ops.conj` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
conj_op=Conj() class ConstantPadND(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('padding'), sig.make_sig('value', default=0.0), ) @prim_arg_register def __init__(self): pass def __call__(self, input, padding, value=0.0): return _convert_stub(pyboost_constant_pad_nd(self, [input, padding, value])) constant_pad_nd_op=ConstantPadND() class Contiguous(Primitive): r""" .. code-block:: prim = ops.Contiguous() out = prim(input) is equivalent to .. code-block:: ops.contiguous(input) Refer to :func:`mindspore.ops.contiguous` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_contiguous(self, [input])) contiguous_op=Contiguous() class ConvolutionGrad(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('dout'), sig.make_sig('input'), sig.make_sig('weight'), sig.make_sig('bias', default=None), ) @prim_arg_register def __init__(self, stride=1, padding=0, dilation=1, transposed=False, output_padding=0, groups=1, output_mask=()): self._set_prim_arg_with_handler("stride", stride, to_strides) self._set_prim_arg_with_handler("padding", padding, to_2d_paddings) self._set_prim_arg_with_handler("dilation", dilation, to_dilations) self._set_prim_arg("transposed", transposed) self._set_prim_arg_with_handler("output_padding", output_padding, to_output_padding) self._set_prim_arg("groups", groups) self._set_prim_arg("output_mask", output_mask) def __call__(self, dout, input, weight, bias=None): return _convert_stub(pyboost_convolution_grad(self, [dout, input, weight, bias, self.stride, self.padding, self.dilation, self.transposed, self.output_padding, self.groups, self.output_mask])) class Convolution(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('weight'), sig.make_sig('bias', default=None), ) @prim_arg_register def __init__(self, stride=1, padding=0, dilation=1, transposed=False, output_padding=0, groups=1): self._set_prim_arg_with_handler("stride", stride, to_strides) self._set_prim_arg_with_handler("padding", padding, to_2d_paddings) self._set_prim_arg_with_handler("dilation", dilation, to_dilations) self._set_prim_arg("transposed", transposed) self._set_prim_arg_with_handler("output_padding", output_padding, to_output_padding) self._set_prim_arg("groups", groups) def __call__(self, input, weight, bias=None): return _convert_stub(pyboost_convolution(self, [input, weight, bias, self.stride, self.padding, self.dilation, self.transposed, self.output_padding, self.groups])) class CopyExt(Primitive): r""" .. code-block:: prim = ops.CopyExt() out = prim(variable, value) is equivalent to .. code-block:: ops.copy_ext(variable, value) Refer to :func:`mindspore.ops.copy_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('variable', sig.sig_rw.RW_WRITE), sig.make_sig('value'), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, variable, value): return _convert_stub(pyboost_copy_ext(self, [variable, value])) copy_ext_op=CopyExt() class Copy(Primitive): r""" .. code-block:: prim = ops.Copy() out = prim(input) is equivalent to .. code-block:: ops.copy(input) Refer to :func:`mindspore.ops.copy` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_copy(self, [input])) copy_op=Copy() class Correlate(Primitive): r""" .. code-block:: prim = ops.Correlate(mode) out = prim(a, v) is equivalent to .. code-block:: ops.correlate(a, v, mode) Refer to :func:`mindspore.ops.correlate` for more details. """ @prim_arg_register def __init__(self, mode='valid'): self._set_prim_arg_with_handler("mode", mode, str_to_enum) def __call__(self, a, v): return super().__call__(a, v, self.mode)
[docs]class Cos(Primitive): r""" .. code-block:: prim = ops.Cos() out = prim(input) is equivalent to .. code-block:: ops.cos(input) Refer to :func:`mindspore.ops.cos` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_cos(self, [input]))
cos_op=Cos()
[docs]class Cosh(Primitive): r""" .. code-block:: prim = ops.Cosh() out = prim(input) is equivalent to .. code-block:: ops.cosh(input) Refer to :func:`mindspore.ops.cosh` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_cosh(self, [input]))
cosh_op=Cosh() class CountNonZero(Primitive): r""" .. code-block:: prim = ops.CountNonZero() out = prim(input, dim) is equivalent to .. code-block:: ops.count_nonzero(input, dim) Refer to :func:`mindspore.ops.count_nonzero` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=None): return _convert_stub(pyboost_count_nonzero(self, [input, dim])) count_nonzero_op=CountNonZero()
[docs]class Cross(Primitive): r""" Returns the cross product of vectors in dimension `dim` of input and other. .. warning:: This is an experimental API that is subject to change or deletion. Refer to :func:`mindspore.ops.cross` for more details. Args: dim (int): Specified dim along which to compute cross product with. Default: ``-65530`` . Inputs: - **input** (Tensor) - Input Tensor. - **other** (Tensor) - Another input Tensor, must have the same shape and the same type as `input`, and the size of their `dim` dimension should be 3. Outputs: Tensor, has the same shape and type as inputs. Supported Platforms: ``Ascend`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore import dtype as mstype >>> from mindspore import ops >>> cross = ops.Cross(dim = 0) >>> x1 = Tensor([1, 2, 3], mstype.int8) >>> x2 = Tensor([1, 2, 3], mstype.int8) >>> output = cross(x1, x2) >>> print(output) [0 0 0] """ @prim_arg_register def __init__(self, dim=-65530): self._set_prim_arg("dim", dim) def __call__(self, input, other): return _convert_stub(pyboost_cross(self, [input, other, self.dim]))
[docs]class CumProd(Primitive): r""" Computes the cumulative product of the tensor x along axis. For example, if input is a vector of size N, the result will also be a vector of size N, with elements. .. math:: y_i = x_1 * x_2 * x_3 * ... * x_i Args: exclusive (bool): If ``True`` , perform exclusive cumulative product. Default: ``False`` . reverse (bool): If ``True`` , reverse the result along axis. Default: ``False`` . Inputs: - **x** (Tensor[Number]) - The input Tensor with shape :math:`(N, *)` where :math:`*` means any number of additional dimensions. - **axis** (int) - The dimensions to compute the cumulative product. Only constant value is allowed. Outputs: Tensor, has the same shape and dtype as the `x`. Raises: TypeError: If `exclusive` or `reverse` is not a bool. TypeError: If `axis` is not an int. ValueError: If `axis` is None. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> a, b, c, = 1, 2, 3 >>> x = Tensor(np.array([a, b, c]).astype(np.float32)) >>> op0 = ops.CumProd() >>> output0 = op0(x, 0) # output=[a, a * b, a * b * c] >>> op1 = ops.CumProd(exclusive=True) >>> output1 = op1(x, 0) # output=[1, a, a * b] >>> op2 = ops.CumProd(reverse=True) >>> output2 = op2(x, 0) # output=[a * b * c, b * c, c] >>> op3 = ops.CumProd(exclusive=True, reverse=True) >>> output3 = op3(x, 0) # output=[b * c, c, 1] >>> print(output0) [1. 2. 6.] >>> print(output1) [1. 1. 2.] >>> print(output2) [6. 6. 3.] >>> print(output3) [6. 3. 1.] >>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [5, 3, 5]]).astype(np.float32)) >>> output4 = op0(x, 0) >>> output5 = op0(x, 1) >>> print(output4) [[ 1. 2. 3.] [ 4. 10. 18.] [20. 30. 90.]] >>> print(output5) [[ 1. 2. 6.] [ 4. 20. 120.] [ 5. 15. 75.]] """ @prim_arg_register def __init__(self, exclusive=False, reverse=False): self._set_prim_arg("exclusive", exclusive) self._set_prim_arg("reverse", reverse) def __call__(self, x, axis): return super().__call__(x, axis, self.exclusive, self.reverse)
[docs]class CumSum(Primitive): r""" Computes the cumulative sum of input tensor along axis. .. math:: y_i = x_1 + x_2 + x_3 + ... + x_i Args: exclusive (bool): By default, this op performs an inclusive cumsum, which means that the first element of the input is identical to the first element of the output. Default: ``False`` . reverse (bool): If ``True`` , perform inverse cumulative sum. Default: ``False`` . Inputs: - **input** (Tensor) - The input Tensor with shape :math:`(N, *)` where :math:`*` means any number of additional dimensions. - **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed. Must be in the range [-rank(input), rank(input)). Outputs: Tensor, the shape of the output tensor is consistent with the input tensor's. Raises: TypeError: If `exclusive` or `reverse` is not a bool. TypeError: If `axis` is not an int. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32)) >>> cumsum = ops.CumSum() >>> # case 1: along the axis 0 >>> y = cumsum(x, 0) >>> print(y) [[ 3. 4. 6. 10.] [ 4. 10. 13. 19.] [ 8. 13. 21. 26.] [ 9. 16. 28. 35.]] >>> # case 2: along the axis 1 >>> y = cumsum(x, 1) >>> print(y) [[ 3. 7. 13. 23.] [ 1. 7. 14. 23.] [ 4. 7. 15. 22.] [ 1. 4. 11. 20.]] >>> # Next demonstrate exclusive and reverse, along axis 1 >>> # case 3: exclusive = True >>> cumsum = ops.CumSum(exclusive=True) >>> y = cumsum(x, 1) >>> print(y) [[ 0. 3. 7. 13.] [ 0. 1. 7. 14.] [ 0. 4. 7. 15.] [ 0. 1. 4. 11.]] >>> # case 4: reverse = True >>> cumsum = ops.CumSum(reverse=True) >>> y = cumsum(x, 1) >>> print(y) [[23. 20. 16. 10.] [23. 22. 16. 9.] [22. 18. 15. 7.] [20. 19. 16. 9.]] """ @prim_arg_register def __init__(self, exclusive=False, reverse=False): self._set_prim_arg("exclusive", exclusive) self._set_prim_arg("reverse", reverse) def __call__(self, input, axis): return super().__call__(input, axis, self.exclusive, self.reverse)
[docs]class Cummax(Primitive): r""" .. code-block:: prim = ops.Cummax(axis) out = prim(input) is equivalent to .. code-block:: ops.cummax(input, axis) Refer to :func:`mindspore.ops.cummax` for more details. """ @prim_arg_register def __init__(self, axis): self._set_prim_arg("axis", axis) def __call__(self, input): return _convert_stub(pyboost_cummax(self, [input, self.axis]))
class CumminExt(Primitive): r""" .. code-block:: prim = ops.CumminExt() out = prim(input, dim) is equivalent to .. code-block:: ops.cummin_ext(input, dim) Refer to :func:`mindspore.ops.cummin_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, dim): return _convert_stub(pyboost_cummin_ext(self, [input, dim])) cummin_ext_op=CumminExt()
[docs]class Cummin(Primitive): r""" Returns the cumulative minimum of elements and the index. .. warning:: This is an experimental API that is subject to change or deletion. Refer to :func:`mindspore.ops.cummin` for more detail. Args: axis (int): The axis to accumulate the tensor's value. Must be in the range [-rank(input), rank(input)). Inputs: - **input** (Tensor) - The input tensor. Outputs: A tuple of 2 Tensors(values, indices), containing the cumulative minimum of elements and the index, the shape of each output tensor is the same as input `input`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> from mindspore import Tensor, ops >>> import mindspore >>> a = Tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220], mindspore.float32) >>> axis = 0 >>> output = ops.Cummin(axis)(a) >>> print(output[0]) [-0.2284 -0.6628 -0.6628 -0.6628 -1.3298 -1.3298] >>> print(output[1]) [0 1 1 1 4 4] """ @prim_arg_register def __init__(self, axis): self._set_prim_arg("axis", axis) def __call__(self, input): return super().__call__(input, self.axis)
class CumsumExt(Primitive): r""" .. code-block:: prim = ops.CumsumExt() out = prim(input, dim, dtype) is equivalent to .. code-block:: ops.cumsum_ext(input, dim, dtype) Refer to :func:`mindspore.ops.cumsum_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim, dtype=None): return _convert_stub(pyboost_cumsum_ext(self, [input, dim, dtype if dtype is None else dtype_to_type_id('CumsumExt', 'dtype', dtype)])) cumsum_ext_op=CumsumExt() class DCT(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('type', default=2), sig.make_sig('n', default=None), sig.make_sig('axis', default=-1), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, x, type=2, n=None, axis=-1, norm=None): return super().__call__(x, type, n, axis, norm if norm is None else str_to_enum('DCT', 'norm', norm)) dct_op=DCT() class DCTN(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('type', default=2), sig.make_sig('s', default=None), sig.make_sig('axes', default=None), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, x, type=2, s=None, axes=None, norm=None): return super().__call__(x, type, s, axes, norm if norm is None else str_to_enum('DCTN', 'norm', norm)) dctn_op=DCTN() class DecoderKVCache(Primitive): r""" .. code-block:: prim = ops.DecoderKVCache() out = prim(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len) is equivalent to .. code-block:: ops.decoder_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len) Refer to :func:`mindspore.ops.decoder_k_v_cache` for more details. """ @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len): return super().__call__(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len) decoder_k_v_cache_op=DecoderKVCache()
[docs]class Dense(Primitive): r""" .. code-block:: prim = ops.Dense() out = prim(input, weight, bias) is equivalent to .. code-block:: ops.dense(input, weight, bias) Refer to :func:`mindspore.ops.dense` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('weight'), sig.make_sig('bias', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, weight, bias=None): return _convert_stub(pyboost_dense(self, [input, weight, bias]))
dense_op=Dense()
[docs]class Diag(Primitive): r""" .. code-block:: prim = ops.Diag() out = prim(input) is equivalent to .. code-block:: ops.diag(input) Refer to :func:`mindspore.ops.diag` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
diag_op=Diag() class Diagonal(Primitive): r""" .. code-block:: prim = ops.Diagonal(offset, dim1, dim2) out = prim(input) is equivalent to .. code-block:: ops.diagonal(input, offset, dim1, dim2) Refer to :func:`mindspore.ops.diagonal` for more details. """ @prim_arg_register def __init__(self, offset=0, dim1=0, dim2=1): self._set_prim_arg("offset", offset) self._set_prim_arg("dim1", dim1) self._set_prim_arg("dim2", dim2) def __call__(self, input): return super().__call__(input, self.offset, self.dim1, self.dim2)
[docs]class Div(Primitive): r""" Computes the quotient of dividing the first input tensor by the second input tensor element-wise. Refer to :func:`mindspore.ops.div` for more details. Note: - One of the two inputs must be a Tensor, when the two inputs have different shapes, they must be able to broadcast to a common shape. - The two inputs can not be bool type at the same time, [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type. - The two inputs comply with the implicit type conversion rules to make the data types consistent. Inputs: - **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or a bool or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_. - **y** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or a bool or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_. Outputs: Tensor, the shape is the same as the one of the input `x` , `y` after broadcasting, and the data type is the one with higher precision or higher digits among the two inputs. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> # case 1 :has same data type and shape of the two inputs >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32) >>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) >>> div = ops.Div() >>> output = div(x, y) >>> print(output) [-1.3333334 2.5 2. ] >>> # case 2 : different data type and shape of the two inputs >>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32) >>> y = Tensor(2, mindspore.int32) >>> output = div(x, y) >>> print(output) [-2. 2.5 3.] >>> print(output.dtype) Float32 """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, x, y): return _convert_stub(pyboost_div(self, [x, y]))
div_op=Div() class DivMod(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('x', dtype=sig.sig_dtype.T), sig.make_sig('y', dtype=sig.sig_dtype.T), sig.make_sig('rounding_mode', dtype=sig.sig_dtype.T1, default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, x, y, rounding_mode=None): return _convert_stub(pyboost_divmod(self, [x, y, rounding_mode if rounding_mode is None else str_to_enum('DivMod', 'rounding_mode', rounding_mode)])) divmod_op=DivMod() class Dot(Primitive): r""" .. code-block:: prim = ops.Dot() out = prim(input, other) is equivalent to .. code-block:: ops.dot(input, other) Refer to :func:`mindspore.ops.dot` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_dot(self, [input, other])) dot_op=Dot() class DropoutDoMaskExt(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, mask, p): return _convert_stub(pyboost_dropout_do_mask_ext(self, [input, mask, p])) dropout_do_mask_ext_op=DropoutDoMaskExt() class DropoutExt(Primitive): r""" """ @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_hidden", True) def __call__(self, input, p, seed, offset): return _convert_stub(pyboost_dropout_ext(self, [input, p, seed, offset])) dropout_ext_op=DropoutExt() class DropoutGenMaskExt(Primitive): r""" """ @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_hidden", True) def __call__(self, shape, p, seed, offset, dtype): return _convert_stub(pyboost_dropout_gen_mask_ext(self, [shape, p, seed, offset, dtype_to_type_id('DropoutGenMaskExt', 'dtype', dtype)])) dropout_gen_mask_ext_op=DropoutGenMaskExt() class DropoutGradExt(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, mask, p): return _convert_stub(pyboost_dropout_grad_ext(self, [input, mask, p])) dropout_grad_ext_op=DropoutGradExt()
[docs]class Dropout(Primitive): r""" During training, randomly zeroes some of the elements of the input tensor with probability :math:`1 - keep\_prob` from a Bernoulli distribution. It plays the role of reducing neuron correlation and avoid overfitting. Refer to :func:`mindspore.ops.dropout` for more details. .. warning:: The Ascend backend does not support the reproducibility of random numbers, so the `Seed0` and `Seed1` parameter have no effect. Args: keep_prob (float, optional): The keep rate, between 0 and 1, e.g. keep_prob = 0.9, means dropping out 10% of input units. Default: ``0.5`` . Seed0 (int, optional): Seed0 value for random generating. Default: ``0`` . Seed1 (int, optional): Seed1 value for random generating. Default: ``0`` . Inputs: - **x** (Tensor) - The input Tensor of shape :math:`(*, N)`, with data type of float16, float32 or float64. Outputs: - **output** (Tensor) - With the same shape and data type as `x`. - **mask** (Tensor) - The mask applied to `x`. - On GPU and CPU, `mask` has the same shape and data type as `x`. - On Ascend, to achieve a better performance, it is denoted as a 1-D Tensor with Uint8 data type. It has shape :math:`(byte\_counts, )` where :math:`byte\_counts` is the number of bytes needed to mask the input `x`, :math:`byte\_counts` is calculated using the following formula: .. math:: byte\_counts = \text{ceil}(\text{cumprod}(x.shape) / 128) * 16 If shape of `x` is :math:`(2, 3, 4, 5, 6)`, the shape of `mask` will be :math:`(96, )`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> dropout = ops.Dropout(keep_prob=0.5) >>> x = Tensor(np.ones([1, 2, 3, 4, 5]), mindspore.float32) >>> output, mask = dropout(x) >>> print(output.shape, mask.shape, mask.dtype) (1, 2, 3, 4, 5) (16,) UInt8 """ @prim_arg_register def __init__(self, keep_prob=0.5, Seed0=0, Seed1=0): self._set_prim_arg("keep_prob", keep_prob) self._set_prim_arg("Seed0", Seed0) self._set_prim_arg("Seed1", Seed1) self.add_prim_attr("side_effect_hidden", True) def __call__(self, x): return super().__call__(x, self.keep_prob, self.Seed0, self.Seed1)
class Eig(Primitive): r""" Computes the eigenvalues and eigenvectors of a square matrix(batch square matrices). Args: compute_v (bool, optional): If ``True`` , compute both eigenvalues and eigenvectors; If `False`, just eigenvalues will be computed. Default: ``False`` . Inputs: - **x** (Tensor) - Square matrices of shape :math:`(*, N, N)`, with float32, float64, complex64 or complex128 data type. Outputs: - **eigen_values** (Tensor) - Shape :math:`(*, N)`. Each inner most vector represents eigenvalues of the corresponding matrix. The eigenvalues may not have an order. - **eigen_vectors** (Tensor) - If `compute_v` is `False`, it's an empty tensor. Otherwise, this tensor has shape :math:`(*, N, N)`, whose columns represent normalized (unit length) eigenvectors of corresponding eigenvalues. Raises: TypeError: If `compute_v` is not a bool. TypeError: If dtype of `x` is not one of: float64, float32, complex64 or complex128. TypeError: If `x` is not a Tensor. ValueError: If `x` is not a square(batch squares). Supported Platforms: ``Ascend`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([[1.0, 0.0], [0.0, 2.0]]), mindspore.float32) >>> u, v = ops.Eig(True)(x) >>> print(u) [1.+0.j 2.+0.j] >>> print(v) [[1.+0.j 0.+0.j] [0.+0.j 1.+0.j]] """ @prim_arg_register def __init__(self, compute_v=False): self._set_prim_arg("compute_v", compute_v) def __call__(self, x): return super().__call__(x, self.compute_v) class EluExt(Primitive): r""" .. code-block:: prim = ops.EluExt(alpha) out = prim(input) is equivalent to .. code-block:: ops.elu_ext(input, alpha) Refer to :func:`mindspore.ops.elu_ext` for more details. """ @prim_arg_register def __init__(self, alpha=1.0): self._set_prim_arg("alpha", alpha) def __call__(self, input): return _convert_stub(pyboost_elu_ext(self, [input, self.alpha])) class EluGradExt(Primitive): r""" Gradients of EluExt operation. """ __mindspore_signature__ = ( sig.make_sig('dout'), sig.make_sig('x'), sig.make_sig('alpha', default=1.0), ) @prim_arg_register def __init__(self): pass def __call__(self, dout, x, alpha=1.0): return _convert_stub(pyboost_elu_grad_ext(self, [dout, x, alpha])) elu_grad_ext_op=EluGradExt() class EluGrad(Primitive): r""" Gradients of Elu operation. """ @prim_arg_register def __init__(self): pass def __call__(self, dout, out): return super().__call__(dout, out) elu_grad_op=EluGrad()
[docs]class Elu(Primitive): r""" .. code-block:: prim = ops.Elu(alpha) out = prim(input_x) is equivalent to .. code-block:: ops.elu(input_x, alpha) Refer to :func:`mindspore.ops.elu` for more details. """ @prim_arg_register def __init__(self, alpha=1.0): self._set_prim_arg("alpha", alpha) def __call__(self, input_x): return super().__call__(input_x, self.alpha)
class EmbeddingApplyAdaGrad(Primitive): r""" .. code-block:: prim = ops.EmbeddingApplyAdaGrad() out = prim(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) is equivalent to .. code-block:: ops.embedding_apply_ada_grad(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) Refer to :func:`mindspore.ops.embedding_apply_ada_grad` for more details. """ __mindspore_signature__ = ( sig.make_sig('var_handle'), sig.make_sig('lr'), sig.make_sig('grad'), sig.make_sig('keys'), sig.make_sig('global_step'), sig.make_sig('embedding_dim'), sig.make_sig('mask_zero', default=(0,)), sig.make_sig('padding_key', default=(0,)), sig.make_sig('padding_key_mask', default=(1,)), sig.make_sig('completion_key', default=(0,)), sig.make_sig('completion_key_mask', default=(1,)), sig.make_sig('_embedding_dim', default=1), sig.make_sig('_max_key_num', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("_process_node_engine_id", 'PS') def __call__(self, var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1): return super().__call__(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) embedding_apply_ada_grad_op=EmbeddingApplyAdaGrad() class EmbeddingApplyAdam(Primitive): r""" .. code-block:: prim = ops.EmbeddingApplyAdam() out = prim(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) is equivalent to .. code-block:: ops.embedding_apply_adam(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) Refer to :func:`mindspore.ops.embedding_apply_adam` for more details. """ __mindspore_signature__ = ( sig.make_sig('var_handle'), sig.make_sig('beta1_power'), sig.make_sig('beta2_power'), sig.make_sig('lr'), sig.make_sig('beta1'), sig.make_sig('beta2'), sig.make_sig('epsilon'), sig.make_sig('grad'), sig.make_sig('keys'), sig.make_sig('global_step'), sig.make_sig('embedding_dim'), sig.make_sig('mask_zero', default=(0,)), sig.make_sig('padding_key', default=(0,)), sig.make_sig('padding_key_mask', default=(1,)), sig.make_sig('completion_key', default=(0,)), sig.make_sig('completion_key_mask', default=(1,)), sig.make_sig('_embedding_dim', default=1), sig.make_sig('_max_key_num', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("_process_node_engine_id", 'PS') def __call__(self, var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1): return super().__call__(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) embedding_apply_adam_op=EmbeddingApplyAdam() class EmbeddingApplyAdamW(Primitive): r""" .. code-block:: prim = ops.EmbeddingApplyAdamW() out = prim(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) is equivalent to .. code-block:: ops.embedding_apply_adam_w(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) Refer to :func:`mindspore.ops.embedding_apply_adam_w` for more details. """ __mindspore_signature__ = ( sig.make_sig('var_handle'), sig.make_sig('beta1_power'), sig.make_sig('beta2_power'), sig.make_sig('lr'), sig.make_sig('weight_decay'), sig.make_sig('beta1'), sig.make_sig('beta2'), sig.make_sig('epsilon'), sig.make_sig('grad'), sig.make_sig('keys'), sig.make_sig('max_grad_norm'), sig.make_sig('global_step'), sig.make_sig('embedding_dim'), sig.make_sig('ams_grad', default=(0,)), sig.make_sig('mask_zero', default=(0,)), sig.make_sig('padding_key', default=(0,)), sig.make_sig('padding_key_mask', default=(1,)), sig.make_sig('completion_key', default=(0,)), sig.make_sig('completion_key_mask', default=(1,)), sig.make_sig('_embedding_dim', default=1), sig.make_sig('_max_key_num', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("_process_node_engine_id", 'PS') def __call__(self, var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad=(0,), mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1): return super().__call__(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) embedding_apply_adam_w_op=EmbeddingApplyAdamW() class EmbeddingApplyFtrl(Primitive): r""" .. code-block:: prim = ops.EmbeddingApplyFtrl() out = prim(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) is equivalent to .. code-block:: ops.embedding_apply_ftrl(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) Refer to :func:`mindspore.ops.embedding_apply_ftrl` for more details. """ __mindspore_signature__ = ( sig.make_sig('var_handle'), sig.make_sig('lr'), sig.make_sig('lr_power'), sig.make_sig('lambda1'), sig.make_sig('lambda2'), sig.make_sig('grad'), sig.make_sig('keys'), sig.make_sig('global_step'), sig.make_sig('embedding_dim'), sig.make_sig('mask_zero', default=(0,)), sig.make_sig('padding_key', default=(0,)), sig.make_sig('padding_key_mask', default=(1,)), sig.make_sig('completion_key', default=(0,)), sig.make_sig('completion_key_mask', default=(1,)), sig.make_sig('_embedding_dim', default=1), sig.make_sig('_max_key_num', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("_process_node_engine_id", 'PS') def __call__(self, var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1): return super().__call__(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) embedding_apply_ftrl_op=EmbeddingApplyFtrl() class EmbeddingApplyRmsprop(Primitive): r""" .. code-block:: prim = ops.EmbeddingApplyRmsprop() out = prim(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) is equivalent to .. code-block:: ops.embedding_apply_rmsprop(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) Refer to :func:`mindspore.ops.embedding_apply_rmsprop` for more details. """ __mindspore_signature__ = ( sig.make_sig('var_handle'), sig.make_sig('lr'), sig.make_sig('rho'), sig.make_sig('momentum'), sig.make_sig('epsilon'), sig.make_sig('grad'), sig.make_sig('keys'), sig.make_sig('global_step'), sig.make_sig('embedding_dim'), sig.make_sig('mask_zero', default=(0,)), sig.make_sig('padding_key', default=(0,)), sig.make_sig('padding_key_mask', default=(1,)), sig.make_sig('completion_key', default=(0,)), sig.make_sig('completion_key_mask', default=(1,)), sig.make_sig('_embedding_dim', default=1), sig.make_sig('_max_key_num', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("_process_node_engine_id", 'PS') def __call__(self, var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1): return super().__call__(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) embedding_apply_rmsprop_op=EmbeddingApplyRmsprop() class EmbeddingApplySgd(Primitive): r""" .. code-block:: prim = ops.EmbeddingApplySgd() out = prim(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) is equivalent to .. code-block:: ops.embedding_apply_sgd(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) Refer to :func:`mindspore.ops.embedding_apply_sgd` for more details. """ __mindspore_signature__ = ( sig.make_sig('var_handle'), sig.make_sig('lr'), sig.make_sig('grad'), sig.make_sig('keys'), sig.make_sig('global_step'), sig.make_sig('embedding_dim'), sig.make_sig('mask_zero', default=(0,)), sig.make_sig('padding_key', default=(0,)), sig.make_sig('padding_key_mask', default=(1,)), sig.make_sig('completion_key', default=(0,)), sig.make_sig('completion_key_mask', default=(1,)), sig.make_sig('_embedding_dim', default=1), sig.make_sig('_max_key_num', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("_process_node_engine_id", 'PS') def __call__(self, var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1): return super().__call__(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num) embedding_apply_sgd_op=EmbeddingApplySgd() class EmbeddingDenseBackward(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('grad'), sig.make_sig('indices'), sig.make_sig('num_weights'), sig.make_sig('padding_idx', default=None), sig.make_sig('scale_grad_by_freq', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, grad, indices, num_weights, padding_idx=None, scale_grad_by_freq=False): return _convert_stub(pyboost_embedding_dense_backward(self, [grad, indices, num_weights, padding_idx, scale_grad_by_freq])) embedding_dense_backward_op=EmbeddingDenseBackward() class EmbeddingFeatureMappingExport(Primitive): r""" .. code-block:: prim = ops.EmbeddingFeatureMappingExport() out = prim(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id) is equivalent to .. code-block:: ops.embedding_feature_mapping_export(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id) Refer to :func:`mindspore.ops.embedding_feature_mapping_export` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id): return super().__call__(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id) embedding_feature_mapping_export_op=EmbeddingFeatureMappingExport() class EmbeddingFeatureMappingFileSize(Primitive): r""" .. code-block:: prim = ops.EmbeddingFeatureMappingFileSize() out = prim(file_path, table_name, global_step, embedding_dim, only_offset_flag) is equivalent to .. code-block:: ops.embedding_feature_mapping_file_size(file_path, table_name, global_step, embedding_dim, only_offset_flag) Refer to :func:`mindspore.ops.embedding_feature_mapping_file_size` for more details. """ __mindspore_signature__ = ( sig.make_sig('file_path'), sig.make_sig('table_name'), sig.make_sig('global_step'), sig.make_sig('embedding_dim'), sig.make_sig('only_offset_flag', default=True), ) @prim_arg_register def __init__(self): pass def __call__(self, file_path, table_name, global_step, embedding_dim, only_offset_flag=True): return super().__call__(file_path, table_name, global_step, embedding_dim, only_offset_flag) embedding_feature_mapping_file_size_op=EmbeddingFeatureMappingFileSize() class EmbeddingFeatureMappingFind(Primitive): r""" .. code-block:: prim = ops.EmbeddingFeatureMappingFind() out = prim(table_name, feature_size, num) is equivalent to .. code-block:: ops.embedding_feature_mapping_find(table_name, feature_size, num) Refer to :func:`mindspore.ops.embedding_feature_mapping_find` for more details. """ __mindspore_signature__ = ( sig.make_sig('table_name'), sig.make_sig('feature_size'), sig.make_sig('num', default=1), ) @prim_arg_register def __init__(self): pass def __call__(self, table_name, feature_size, num=1): return super().__call__(table_name, feature_size, num) embedding_feature_mapping_find_op=EmbeddingFeatureMappingFind() class EmbeddingFeatureMappingImport(Primitive): r""" .. code-block:: prim = ops.EmbeddingFeatureMappingImport() out = prim(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag, num) is equivalent to .. code-block:: ops.embedding_feature_mapping_import(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag, num) Refer to :func:`mindspore.ops.embedding_feature_mapping_import` for more details. """ __mindspore_signature__ = ( sig.make_sig('file_path'), sig.make_sig('teble_name'), sig.make_sig('feature_size'), sig.make_sig('global_step'), sig.make_sig('embedding_dim'), sig.make_sig('only_offset_flag', default=True), sig.make_sig('num', default=1), ) @prim_arg_register def __init__(self): pass def __call__(self, file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag=True, num=1): return super().__call__(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag, num) embedding_feature_mapping_import_op=EmbeddingFeatureMappingImport() class EmbeddingFeatureMappingInsert(Primitive): r""" .. code-block:: prim = ops.EmbeddingFeatureMappingInsert() out = prim(table_name, num, feature_id, offset_id) is equivalent to .. code-block:: ops.embedding_feature_mapping_insert(table_name, num, feature_id, offset_id) Refer to :func:`mindspore.ops.embedding_feature_mapping_insert` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, table_name, num, feature_id, offset_id): return super().__call__(table_name, num, feature_id, offset_id) embedding_feature_mapping_insert_op=EmbeddingFeatureMappingInsert() class EmbeddingFeatureMappingTableSize(Primitive): r""" .. code-block:: prim = ops.EmbeddingFeatureMappingTableSize() out = prim(table_name) is equivalent to .. code-block:: ops.embedding_feature_mapping_table_size(table_name) Refer to :func:`mindspore.ops.embedding_feature_mapping_table_size` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, table_name): return super().__call__(table_name) embedding_feature_mapping_table_size_op=EmbeddingFeatureMappingTableSize() class EmbeddingFeatureMappingV2(Primitive): r""" .. code-block:: prim = ops.EmbeddingFeatureMappingV2() out = prim(table_name, feature_id, table_total_size, table_actual_size) is equivalent to .. code-block:: ops.embedding_feature_mapping_v2(table_name, feature_id, table_total_size, table_actual_size) Refer to :func:`mindspore.ops.embedding_feature_mapping_v2` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, table_name, feature_id, table_total_size, table_actual_size): return super().__call__(table_name, feature_id, table_total_size, table_actual_size) embedding_feature_mapping_v2_op=EmbeddingFeatureMappingV2() class Embedding(Primitive): r""" Retrieve the word embeddings in `weight` using indices specified in `input`. .. warning:: On Ascend, the behavior is unpredictable when the value of `input` is invalid. Args: input (Tensor): The indices used to lookup in the `weight`. The data type must be mindspore.int32 or mindspore.int64, and the value should be in range `[0, weight.shape[0])`. weight (Parameter): The matrix where to lookup from. The shape must be 2D. padding_idx (int, optional): If the value is not None, the corresponding row of `weight` will not be updated in training. The value should be in range `[-weight.shape[0], weight.shape[0])` if it's not ``None``. Default ``None``. max_norm (float, optional): If not None, firstly get the p-norm result of the `weight` specified by `input` where p is specified by `norm_type`; if the result is larger then `max_norm`, update the `weight` with :math:`\frac{max\_norm}{result+1e^{-7}}` in-place. Default ``None``. norm_type (float, optional): Indicates the value of p in p-norm. Default ``2.0``. scale_grad_by_freq (bool, optional): If ``True`` the gradients will be scaled by the inverse of frequency of the index in `input`. Default ``False``. Returns: Tensor, has the same data type as `weight`, the shape is :math:`(*input.shape, weight.shape[1])`. Raises: ValueError: If `padding_idx` is out of valid range. ValueError: If the shape of `weight` is invalid. TypeError: `weight` is not a :class:`mindspore.Parameter`. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, Parameter, ops >>> input = Tensor([[1, 0, 1, 1], [0, 0, 1, 0]]) >>> weight = Parameter(np.random.randn(3, 3).astype(np.float32)) >>> output = ops.auto_generate.Embedding()(input, weight, max_norm=0.4) >>> print(output) [[[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01], [ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01], [ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01], [ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01]], [[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01], [ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01], [ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01], [ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01]]] """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('weight', sig.sig_rw.RW_WRITE), sig.make_sig('padding_idx', default=None), sig.make_sig('max_norm', default=None), sig.make_sig('norm_type', default=2.0), sig.make_sig('scale_grad_by_freq', default=False), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False): return _convert_stub(pyboost_embedding(self, [input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq])) embedding_op=Embedding() class EmbeddingTableEvict(Primitive): r""" .. code-block:: prim = ops.EmbeddingTableEvict() out = prim(var_handle, global_step, steps_to_live) is equivalent to .. code-block:: ops.embedding_table_evict(var_handle, global_step, steps_to_live) Refer to :func:`mindspore.ops.embedding_table_evict` for more details. """ __mindspore_signature__ = ( sig.make_sig('var_handle'), sig.make_sig('global_step'), sig.make_sig('steps_to_live', default=0), ) @prim_arg_register def __init__(self): self.add_prim_attr("_process_node_engine_id", 'PS') def __call__(self, var_handle, global_step, steps_to_live=0): return super().__call__(var_handle, global_step, steps_to_live) embedding_table_evict_op=EmbeddingTableEvict()
[docs]class Equal(Primitive): r""" .. code-block:: prim = ops.Equal() out = prim(input, other) is equivalent to .. code-block:: ops.equal(input, other) Refer to :func:`mindspore.ops.equal` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_equal(self, [input, other]))
equal_op=Equal()
[docs]class Erf(Primitive): r""" .. code-block:: prim = ops.Erf() out = prim(input) is equivalent to .. code-block:: ops.erf(input) Refer to :func:`mindspore.ops.erf` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_erf(self, [input]))
erf_op=Erf()
[docs]class Erfc(Primitive): r""" .. code-block:: prim = ops.Erfc() out = prim(input) is equivalent to .. code-block:: ops.erfc(input) Refer to :func:`mindspore.ops.erfc` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_erfc(self, [input]))
erfc_op=Erfc()
[docs]class Erfinv(Primitive): r""" .. code-block:: prim = ops.Erfinv() out = prim(input) is equivalent to .. code-block:: ops.erfinv(input) Refer to :func:`mindspore.ops.erfinv` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_erfinv(self, [input]))
erfinv_op=Erfinv()
[docs]class Exp(Primitive): r""" .. code-block:: prim = ops.Exp() out = prim(input) is equivalent to .. code-block:: ops.exp(input) Refer to :func:`mindspore.ops.exp` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_exp(self, [input]))
exp_op=Exp()
[docs]class ExpandDims(Primitive): r""" .. code-block:: prim = ops.ExpandDims() out = prim(input_x, axis) is equivalent to .. code-block:: ops.expand_dims(input_x, axis) Refer to :func:`mindspore.ops.expand_dims` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input_x, axis): return super().__call__(input_x, axis)
expand_dims_op=ExpandDims()
[docs]class Expm1(Primitive): r""" .. code-block:: prim = ops.Expm1() out = prim(input) is equivalent to .. code-block:: ops.expm1(input) Refer to :func:`mindspore.ops.expm1` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_expm1(self, [input]))
expm1_op=Expm1() class ExtractImagePatches(Primitive): r""" .. code-block:: prim = ops.ExtractImagePatches(ksizes, strides, rates, padding) out = prim(input_x) is equivalent to .. code-block:: ops.extract_image_patches(input_x, ksizes, strides, rates, padding) Refer to :func:`mindspore.ops.extract_image_patches` for more details. """ @prim_arg_register def __init__(self, ksizes, strides, rates, padding='VALID'): self._set_prim_arg_with_handler("ksizes", type_it('ExtractImagePatches', 'ksizes', ksizes, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT), to_kernel_size) self._set_prim_arg_with_handler("strides", type_it('ExtractImagePatches', 'strides', strides, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT), to_strides) self._set_prim_arg_with_handler("rates", type_it('ExtractImagePatches', 'rates', rates, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT), to_rates) self._set_prim_arg_with_handler("padding", padding, str_to_enum) def __call__(self, input_x): return super().__call__(input_x, self.ksizes, self.strides, self.rates, self.padding)
[docs]class Eye(Primitive): r""" Creates a tensor with ones on the diagonal and zeros in the rest. Refer to :func:`mindspore.ops.eye` for more details. Note: The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8 or bool on Ascend platforms. Inputs: - **n** (int) - The number of rows of returned tensor. Constant value only. - **m** (int) - The number of columns of returned tensor. Constant value only. - **t** (mindspore.dtype) - MindSpore's dtype, the data type of the returned tensor. Default: ``None`` , the data type of the returned tensor is mindspore.float32. Outputs: Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on the user's Inputs `n` and `m`. And the data type depends on Inputs `t`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> from mindspore import ops >>> eye = ops.Eye() >>> output = eye(2, 2, mindspore.int32) >>> print(output) [[1 0] [0 1]] >>> print(output.dtype) Int32 >>> output = eye(1, 2, mindspore.float32) >>> print(output) [[1. 0.]] >>> print(output.dtype) Float32 """ @prim_arg_register def __init__(self): pass def __call__(self, n, m, dtype): return _convert_stub(pyboost_eye(self, [n, m, dtype_to_type_id('Eye', 'dtype', dtype)]))
eye_op=Eye() class FastGeLUGrad(Primitive): r""" Gradients of FastGeLU operation. """ @prim_arg_register def __init__(self): pass def __call__(self, dy, x): return super().__call__(dy, x) fast_gelu_grad_op=FastGeLUGrad()
[docs]class FastGeLU(Primitive): r""" .. code-block:: prim = ops.FastGeLU() out = prim(x) is equivalent to .. code-block:: ops.fast_gelu(x) Refer to :func:`mindspore.ops.fast_gelu` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, x): return super().__call__(x)
fast_gelu_op=FastGeLU() class FFNExt(Primitive): r""" .. code-block:: prim = ops.FFNExt(activation, inner_precise) out = prim(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2) is equivalent to .. code-block:: ops.ffn_ext(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise) Refer to :func:`mindspore.ops.ffn_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('weight1'), sig.make_sig('weight2'), sig.make_sig('expertTokens', default=None), sig.make_sig('bias1', default=None), sig.make_sig('bias2', default=None), sig.make_sig('scale', default=None), sig.make_sig('offset', default=None), sig.make_sig('deqScale1', default=None), sig.make_sig('deqScale2', default=None), sig.make_sig('antiquant_scale1', default=None), sig.make_sig('antiquant_scale2', default=None), sig.make_sig('antiquant_offset1', default=None), sig.make_sig('antiquant_offset2', default=None), ) @prim_arg_register def __init__(self, activation='fastgelu', inner_precise=0): self._set_prim_arg_with_handler("activation", activation, str_to_enum) self._set_prim_arg("inner_precise", inner_precise) def __call__(self, x, weight1, weight2, expertTokens=None, bias1=None, bias2=None, scale=None, offset=None, deqScale1=None, deqScale2=None, antiquant_scale1=None, antiquant_scale2=None, antiquant_offset1=None, antiquant_offset2=None): return _convert_stub(pyboost_ffn_ext(self, [x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, self.activation, self.inner_precise])) class FFT2(Primitive): r""" .. code-block:: prim = ops.FFT2() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.fft2(input, s, dim, norm) Refer to :func:`mindspore.ops.fft2` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=(-2, -1)), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=(-2, -1), norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('FFT2', 'norm', norm)) fft2_op=FFT2() class FFT(Primitive): r""" .. code-block:: prim = ops.FFT() out = prim(input, n, dim, norm) is equivalent to .. code-block:: ops.fft(input, n, dim, norm) Refer to :func:`mindspore.ops.fft` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('n', default=None), sig.make_sig('dim', default=-1), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, n=None, dim=-1, norm=None): return super().__call__(input, n, dim, norm if norm is None else str_to_enum('FFT', 'norm', norm)) fft_op=FFT() class FFTOrtho(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('axes', default=None), sig.make_sig('forward', default=True), ) @prim_arg_register def __init__(self): pass def __call__(self, input, axes=None, forward=True): return super().__call__(input, axes, forward) fft_ortho_op=FFTOrtho() class FFTShapeCopy(Primitive): r""" Truncate or zero-fill the gradient of an fft operation. """ @prim_arg_register def __init__(self): pass def __call__(self, dout, shape): return super().__call__(dout, shape) fft_shapecopy_op=FFTShapeCopy()
[docs]class FFTWithSize(Primitive): r""" Fourier transform, can be adjusted by parameters to achieve FFT/IFFT/RFFT/IRFFT. For fft, it computes the following expression: .. math:: X[\omega_1, \dots, \omega_d] = \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d] e^{-j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}}, where :math:`d` = `signal_ndim` is number of dimensions for the signal, and :math:`N_i` is the size of signal dimension :math:`i`. For ifft, it computes the following expression: .. math:: X[\omega_1, \dots, \omega_d] = \frac{1}{\prod_{i=1}^d N_i} \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d] e^{\ j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}}, where :math:`d` = `signal_ndim` is number of dimensions for the signal, and :math:`N_i` is the size of signal dimension :math:`i`. Note: - FFT/IFFT requires complex64 or complex128 inputs, return complex64 or complex128 outputs. - RFFT requires bool, uint8, int8, int16, int32, int64, float32 and float64 inputs, return complex64 or complex128 outputs. - IRFFT requires complex64 or complex128 inputs, return float32 or float64 outputs. .. warning:: This is an experimental API that is subject to change or deletion. Args: signal_ndim (int): The number of dimensions in each signal, this controls how many dimensions of the fourier transform are realized, can only be 1, 2 or 3. inverse (bool): Whether it is the inverse transformation, used to select from FFT and RFFT or IFFT and IRFFT. - when set to ``True``: IFFT and IRFFT. - when set to ``False``: FFT and RFFT. real (bool): Whether it is the real transformation, combines with `inverse` to select a specific transformation mode: - `inverse` is ``False`` , `real` is ``False`` : corresponds to FFT. - `inverse` is ``True`` , `real` is ``False`` : corresponds to IFFT. - `inverse` is ``False`` , `real` is ``True`` : corresponds to RFFT. - `inverse` is ``True`` , `real` is ``True`` : corresponds to IRFFT. norm (str, optional): The normalization, optional values: [ ``"backward"`` , ``"forward"`` , ``"ortho"`` ]. Default value: ``"backward"`` . - ``"backward"`` has the direct transforms unscaled and the inverse transforms scaled by :math:`1/n`, where n is the input x's element numbers. - ``"ortho"`` has both direct and inverse transforms are scaled by :math:`1/\sqrt n`. - ``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse transforms unscaled. onesided (bool, optional): Controls whether the input is halved to avoid redundancy. Default: ``True`` . signal_sizes (tuple, optional): Size of the original signal (the signal before rfft, no batch dimension), only in IRFFT mode and set `onesided` to ``True`` requires the parameter, the following conditions must be satisfied. Default: ``()`` . - The length of `signal_sizes` is equal to the signal_ndim of the IRFFT: :math:`len(signal\_sizes)=signal\_ndim`. - The last dimension of `signal_sizes` divided by 2 is equal to the last dimension of the IRFFT input: :math:`signal\_size[-1]/2+1=x.shape[-1]`. - `signal_sizes` has exactly the same dimensions as the input shape except for the last dimension: :math:`signal\_sizes[:-1]=x.shape[:-1]`. Inputs: - **x** (Tensor) - The dimension of the input tensor must be greater than or equal to signal_ndim. Outputs: A tensor containing the complex-to-complex, real-to-complex or complex-to-real Fourier transform result. Raises: TypeError: If the input type of FFT/IFFT/IRFFT is not one of: complex64, complex128. TypeError: If the input type is not Tensor. ValueError: If `x` dimension is less than signal_ndim. ValueError: If signal_ndim is greater than 3 or less than 1. ValueError: If norm is none of "backward", "forward" or "ortho". Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> # case FFT: signal_ndim: 1, inverse: False, real: False. >>> fft_in = Tensor(np.array([2, 1, 2]), mindspore.complex64) >>> fft_net = ops.FFTWithSize(signal_ndim=1, inverse=False, real=False) >>> fft_output = fft_net(fft_in) >>> print(fft_output) [5. +0.j 0.5 +0.86602545j 0.50000006-0.8660255j ] >>> # case IFFT: signal_ndim: 1, inverse: True, real: False. >>> ifft_in = fft_output >>> ifft_net = ops.FFTWithSize(signal_ndim=1, inverse=True, real=False) >>> ifft_output = ifft_net(ifft_in) >>> print(ifft_output) [2. -1.9868216e-08j 0.99999994+0.0000000e+00j 1.9999999 +7.9472862e-08j] >>> # case RFFT2D: signal_ndim: 2, inverse: False, real: True. >>> rfft_in = Tensor(np.array([[2, 1, 2], [3, 1, 6]]), mindspore.float32) >>> rfft_net = ops.FFTWithSize(signal_ndim=2, inverse=False, real=True) >>> rfft_output = rfft_net(rfft_in) >>> print(rfft_output) [[ 1.5000000e+01+1.1920929e-07j -2.3841858e-07+5.1961522e+00j] [-5.0000000e+00-2.9802322e-08j 9.9999988e-01-3.4641016e+00j]] >>> # case IRFFT2D: signal_ndim: 2, inverse: True, real: True. >>> irfft_in = rfft_output >>> irfft_net = ops.FFTWithSize(signal_ndim=2, inverse=True, real=True, signal_sizes=rfft_in.shape) >>> irfft_output = irfft_net(irfft_in) >>> print(irfft_output) [[2. 1. 2. ] [3. 0.99999994 5.9999995 ]] """ @prim_arg_register def __init__(self, signal_ndim, inverse, real, norm='backward', onesided=True, signal_sizes=()): self._set_prim_arg("signal_ndim", signal_ndim) self._set_prim_arg("inverse", inverse) self._set_prim_arg("real", real) self._set_prim_arg_with_handler("norm", norm, str_to_enum) self._set_prim_arg("onesided", onesided) self._set_prim_arg("signal_sizes", signal_sizes) def __call__(self, x): return super().__call__(x, self.signal_ndim, self.inverse, self.real, self.norm, self.onesided, self.signal_sizes)
class FFTFreq(Primitive): r""" .. code-block:: prim = ops.FFTFreq() out = prim(n, d, dtype) is equivalent to .. code-block:: ops.fftfreq(n, d, dtype) Refer to :func:`mindspore.ops.fftfreq` for more details. """ __mindspore_signature__ = ( sig.make_sig('n'), sig.make_sig('d', default=1.0), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, n, d=1.0, dtype=None): return super().__call__(n, d, dtype if dtype is None else dtype_to_type_id('FFTFreq', 'dtype', dtype)) fftfreq_op=FFTFreq() class FFTN(Primitive): r""" .. code-block:: prim = ops.FFTN() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.fftn(input, s, dim, norm) Refer to :func:`mindspore.ops.fftn` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=None), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=None, norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('FFTN', 'norm', norm)) fftn_op=FFTN() class FFTShift(Primitive): r""" .. code-block:: prim = ops.FFTShift() out = prim(input, dim) is equivalent to .. code-block:: ops.fftshift(input, dim) Refer to :func:`mindspore.ops.fftshift` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=None): return super().__call__(input, dim) fftshift_op=FFTShift() class FillScalar(Primitive): r""" Create a Tensor of the specified shape and fill it with the specified scalar value. Args: size (Union(tuple[int], list[int])): The specified shape of output tensor. fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now. Keyword Args: dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details, please refer to :class:`mindspore.dtype` . Default: ``None`` . Returns: Tensor. Raises: TypeError: If `size` is not a tuple or list. ValueError: The element in `size` is less than 0. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = ( sig.make_sig('size'), sig.make_sig('fill_value'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, size, fill_value, dtype=None): return _convert_stub(pyboost_fill_scalar(self, [size, fill_value, dtype if dtype is None else dtype_to_type_id('FillScalar', 'dtype', dtype)])) fill_scalar_op=FillScalar() class FillTensor(Primitive): r""" Create a Tensor of the specified shape and fill it with the specified tensor value. Args: size (Union(tuple[int], list[int])): The specified shape of output tensor. fill_value (Tensor): Value to fill the returned tensor. Complex numbers are not supported for now. Must be scalar Tensor or 1-D Tensor with shape of [1]. Keyword Args: dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details, please refer to :class:`mindspore.dtype` . Default: ``None`` . Returns: Tensor. Raises: TypeError: If `size` is not a tuple or list. ValueError: The element in `size` is less than 0. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = ( sig.make_sig('size'), sig.make_sig('fill_value'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, size, fill_value, dtype=None): return _convert_stub(pyboost_fill_tensor(self, [size, fill_value, dtype if dtype is None else dtype_to_type_id('FillTensor', 'dtype', dtype)])) fill_tensor_op=FillTensor() class FlashAttentionScoreGrad(Primitive): r""" Calculates the gradient of FlashAttentionScore operation. .. warning:: This is an experimental API that is subject to change or deletion. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = ( sig.make_sig('query'), sig.make_sig('key'), sig.make_sig('value'), sig.make_sig('dy'), sig.make_sig('pse_shift', default=None), sig.make_sig('drop_mask', default=None), sig.make_sig('padding_mask', default=None), sig.make_sig('atten_mask', default=None), sig.make_sig('softmax_max', default=None), sig.make_sig('softmax_sum', default=None), sig.make_sig('softmax_in', default=None), sig.make_sig('attention_in', default=None), sig.make_sig('prefix', default=None), sig.make_sig('actual_seq_qlen', default=None), sig.make_sig('actual_seq_kvlen', default=None), ) @prim_arg_register def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=65536, next_tokens=65536, inner_precise=1, input_layout='BSH', sparse_mode=0): self._set_prim_arg("head_num", head_num) self._set_prim_arg("keep_prob", keep_prob) self._set_prim_arg("scale_value", scale_value) self._set_prim_arg("pre_tokens", pre_tokens) self._set_prim_arg("next_tokens", next_tokens) self._set_prim_arg("inner_precise", inner_precise) self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum) self._set_prim_arg("sparse_mode", sparse_mode) def __call__(self, query, key, value, dy, pse_shift=None, drop_mask=None, padding_mask=None, atten_mask=None, softmax_max=None, softmax_sum=None, softmax_in=None, attention_in=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None): return _convert_stub(pyboost_flash_attention_score_grad(self, [query, key, value, dy, pse_shift, drop_mask, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode])) class FlashAttentionScore(Primitive): r""" FlashAttentionScore. .. math:: \begin{array}{ll} \\ y = Dropout(Softmax(Mask(scale_value \mul (real_shift + query * key), attn_mask), -1), keep_prob) \\ \mul value \\ \end{array} B -- Batch size. Value range 1 to 2k. S1 -- Sequence length of query. Value range 1 to 512k. S2 -- Sequence length of key and value. Value range 1 to 512k. N1 -- Num heads of query. Value range 1 to 256. N2 -- Num heads of key and value, and N2 must be a factor of N1. D -- Head size. The value ranges is a multiple of 16, with the max value of 512. H1 -- Hidden size of query, which equals to N1 * D. H2 -- Hidden size of key and value, which equals to N2 * D. .. warning:: This is an experimental API that is subject to change or deletion. Only support on Atlas A2 training series. Args: head_num (int): The head num of query, equal to N1. keep_prob (float): The keep probability of dropout. Value range is (0.0, 1.0]. Default: 1.0. When keep_prob is 1.0, drop_mask should be none. scale_value (float): The scale factor of score. Generally, the value is 1.0 / (D ** 0.5). Default: 1.0. pre_tokens (int): Parameter for sparse computation, represents how many tokens are counted forward. When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647. next_tokens (int): Parameter for sparse computation, represents how many tokens are counted backward. When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647. The value of pre_tokens corresponds to S1, and the value of next_tokens corresponds to S2. They define the valid area on the attn_mask matrix. It must ensure that the band is not empty. The following values are not allowed: - pre_tokens < 0 and next_tokens < 0. - (pre_tokens < 0 and next_tokens >= 0) and (next_tokens < abs(pre_tokens) or abs(pre_tokens) >= S2). - (pre_tokens >= 0 and next_tokens < 0) and (abs(next_tokens) > pre_tokens or abs(next_tokens) >= S1). inner_precise (int): The parameter is reserved and not implemented yet. Default: 0. input_layout (str): Specifies the layout of input `query`, key and value. The value can be "BSH", "BNSD", "SBH", "BSND" or "TND". "TND" is an experimental format. Default: "BSH". When input_layout is "TND", the following restrictions must be met. There are two lists that represent the length of the input sequence: list_seq_q and list_seq_k. Each value in the list indicates the length of the sequence in the batch. For example, list_seq_q = [4, 2, 6], list_seq_k = [10, 3, 9]. The element of list indicate S. T1 is sum(list_seq_q) = 12, T2 is sum(list_seq_k) = 22. max_seqlen_q = max(list_seq_q), max_seqlen_k = max(list_seq_k). qk_pointer = sum(list_seq_q * list_seq_k), which is the sum of the element multiplication. - The lengths of two lists are the same, and size of list is batch. batch is less than or equal to 1024. - When input_layout is "TND", actual_seq_qlen and actual_seq_kvlen must be not none. Otherwise, they are none. - The actual_seq_qlen and actual_seq_kvlen are the cumulative sum of sequence of key/value, so they must be non-decreasing. - If real_shift is not none, list_seq_q and list_seq_k must be same. The maximum value of list_seq_q and list_seq_k is greater than 1024. Real_shift should be `(B, N1, 1024, S2)` and `(1, N1, 1024, S2)`, and S2 is equal to max_seqlen_k. - Attn mask must be a lower trianglar matrix, so sparse_mode should be 2 or 3. The shape of attn_mask should be `(2048, 2048)`. - The shape of drop_mask is (qk_pointer * N1 // 8,). - Prefix is none. - Next_tokens is 0, and pre_tokens is not less than max_seqlen_q. - When sparse_mode is 3, S1 of each batch should be less than or equal to S2. - 0 should not exist in list_seq_k. sparse_mode (int): Indicates sparse mode. Default 0. - 0: Indicates the defaultMask mode. If attn_mask is not passed, the mask operation is not performed, and preTokens and nextTokens(internally assigned as INT_MAX) are ignored. If passed in, the full attn_mask matrix (S1 * S2) needs to be passed in, indicating that the part between preTokens and nextTokens needs to be calculated. - 1: Represents allMask, that is, passing in the complete attn_mask matrix. - 2: Representing the leftUpCausal mode corresponds to the lower triangle scenario divided by the left vertex, and the optimized attn_mask matrix (2048*2048) is required. - 3: Representing the rightDownCausal model corresponds to the lower triangle scene divided by the lower right vertex, and the optimized attn_mask matrix (2048*2048) is required. - 4: Represents the band scenario, that is, the part between counting preTokens and nextTokens, and the optimized attn_mask matrix (2048*2048) is required.. - 5: Represents the prefix scenario, that is, on the basis of rightDownCasual, a matrix with length S1 and width N is added to the left side. The value of N is obtained by the new input prefix, and the N value of each Batch axis is different, not implemented yet. - 6: Represents the global scenario, not implemented yet. - 7: Represents the dilated scenario, not implemented yet. - 8: Represents the block_local scenario, not implemented yet. Inputs: - **query** (Tensor[float16, bfloat16]) - The query tensor. Input tensor of shape :math:`(B, S1, H1)`, `(B, N1, S1, D)`, `(S1, B, H1)`, `(B, S1, N1, D)` or `(T1, N1, D)`. - **key** (Tensor[float16, bfloat16]) - The key tensor. Input tensor of shape :math:`(B, S2, H2)`, `(B, N2, S2, D)`, `(S2, B, H2)`, `(B, S2, N2, D)` or `(T2, N2, D)`. - **value** (Tensor[float16, bfloat16]) - The value tensor. Input tensor of shape :math:`(B, S2, H2)`, `(B, N2, S2, D)`, `(S2, B, H2)`, `(B, S2, N2, D)` or `(T2, N2, D)`. The key and value have the same shape. - **real_shift** (Union[Tensor[float16, bfloat16], None]) - Also known as pse. The position embedding code. If S is greater than 1024 and the mask of the lower triangle is used, enter only the inverse 1024 lines of the lower triangle for memory optimization. Input tensor of shape :math:`(B, N1, S1, S2)`, `(1, N1, S1, S2)`, `(B, N1, 1024, S2)`, `(1, N1, 1024, S2)`. - ALiBi scenario: real_shift must meet the ALiBi rule, and sparse_mode is 2 or 3 for the lower triangle. In this scenario, real_shift is `(B, N1, 1024, S2)`, `(1, N1, 1024, S2)`. - Non-ALiBi scenario: real_shift is `(B, N1, S1, S2)`, `(1, N1, S1, S2)`. The shape of `real_shift` should be `(B, N1, 1024, S2)` and `(1, N1, 1024, S2)` when input_layout is `TND`. - **drop_mask** (Union[Tensor[uint8], None]) - The dropout mask tensor. Input tensor of shape :math:`(B, N1, S1, S2 // 8) or None`. S2 is a multiple of 8 when not None. - **padding_mask** (None) - Reserved parameter. Not implemented yet. - **attn_mask** (Union[Tensor[uint8], Tensor[bool], None]) - The attention mask tensor. For each element, 0 indicates retention and 1 indicates discard. Input tensor of shape :math:`(B, N1, S1, S2)`, `(B, 1, S1, S2)`, `(S1, S2)` or (2048, 2048). In compression scenario, sparse_mode is 2, 3, or 4, attn_mask must be `(2048, 2048)`. When sparse_mode is 5, attn_mask must be `(B, N1, S1, S2)`, `(B, 1, S1, S2)`. When sparse_mode is 0 and 1, attn_mask should be `(B, N1, S1, S2)`, `(B, 1, S1, S2)`, `(S1, S2)`. - **prefix** (Union[List[int64], Tuple[int64], None]) - N value of each Batch in the prefix sparse calculation scenario. Input tensor of shape :math:`(B,)`. B max value 32. Not none only when sparse_mode is 5. If S1 > S2, N ranges from 0 to S2. If S1 <= S2, N ranges from S2 - S1 to S2. - **actual_seq_qlen** (Union[List[int64], Tuple[int64], None]) - Size of query corresponding to each batch, array with increasing values and the last value equal to T1. - **actual_seq_kvlen** (Union[List[int64], Tuple[int64], None]) - Size of key and value corresponding to each batch, array with increasing values and the last value equal to T2. Outputs: - **softmax_max** (Tensor[float32]) - (B, N1, S1, 8) when input_layout is not `TND` else (T1, N1, D) - **softmax_sum** (Tensor[float32]) - (B, N1, S1, 8) when input_layout is not `TND` else (T1, N1, D) - **softmax_out** (Tensor[float16, bfloat16]) - Useless output, ignore it. Output tensor of shape : `()` - **attention_out** (Tensor[float16, bfloat16]) - The output of attention, its shape, and data type are the same as the query. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = ( sig.make_sig('query'), sig.make_sig('key'), sig.make_sig('value'), sig.make_sig('real_shift', default=None), sig.make_sig('drop_mask', default=None), sig.make_sig('padding_mask', default=None), sig.make_sig('attn_mask', default=None), sig.make_sig('prefix', default=None), sig.make_sig('actual_seq_qlen', default=None), sig.make_sig('actual_seq_kvlen', default=None), ) @prim_arg_register def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=2147483647, next_tokens=2147483647, inner_precise=0, input_layout='BSH', sparse_mode=0): self._set_prim_arg("head_num", head_num) self._set_prim_arg("keep_prob", keep_prob) self._set_prim_arg("scale_value", scale_value) self._set_prim_arg("pre_tokens", pre_tokens) self._set_prim_arg("next_tokens", next_tokens) self._set_prim_arg("inner_precise", inner_precise) self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum) self._set_prim_arg("sparse_mode", sparse_mode) def __call__(self, query, key, value, real_shift=None, drop_mask=None, padding_mask=None, attn_mask=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None): return _convert_stub(pyboost_flash_attention_score(self, [query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode])) class FlattenExt(Primitive): r""" .. code-block:: prim = ops.FlattenExt() out = prim(input, start_dim, end_dim) is equivalent to .. code-block:: ops.flatten_ext(input, start_dim, end_dim) Refer to :func:`mindspore.ops.flatten_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('start_dim', default=0), sig.make_sig('end_dim', default=-1), ) @prim_arg_register def __init__(self): pass def __call__(self, input, start_dim=0, end_dim=-1): return _convert_stub(pyboost_flatten_ext(self, [input, start_dim, end_dim])) flatten_ext_op=FlattenExt()
[docs]class Flatten(Primitive): r""" Flattens a tensor without changing its batch size on the 0-th axis. Refer to :func:`mindspore.ops.flatten` for more details. Inputs: - **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened, where :math:`N` is batch size. Outputs: Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is the product of the remaining dimension. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32) >>> flatten = ops.Flatten() >>> output = flatten(input_x) >>> print(output.shape) (1, 24) """ @prim_arg_register def __init__(self): pass def __call__(self, input_x): return super().__call__(input_x)
flatten_op=Flatten()
[docs]class FloorDiv(Primitive): r""" .. code-block:: prim = ops.FloorDiv() out = prim(input, other) is equivalent to .. code-block:: ops.floor_divide(input, other) Refer to :func:`mindspore.ops.floor_divide` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return super().__call__(input, other)
floor_div_op=FloorDiv()
[docs]class FloorMod(Primitive): r""" .. code-block:: prim = ops.FloorMod() out = prim(x, y) is equivalent to .. code-block:: ops.floor_mod(x, y) Refer to :func:`mindspore.ops.floor_mod` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, x, y): return super().__call__(x, y)
floor_mod_op=FloorMod()
[docs]class Floor(Primitive): r""" .. code-block:: prim = ops.Floor() out = prim(input) is equivalent to .. code-block:: ops.floor(input) Refer to :func:`mindspore.ops.floor` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_floor(self, [input]))
floor_op=Floor() class GatherDGradV2(Primitive): r""" Computes gradient for the GatherD operation. Note that the operator "GatherDGrad" has been abandoned. """ @prim_arg_register def __init__(self): pass def __call__(self, x, dim, index, dout): return _convert_stub(pyboost_gather_d_grad_v2(self, [x, dim, index, dout])) gather_d_grad_v2_op=GatherDGradV2()
[docs]class GatherD(Primitive): r""" .. code-block:: prim = ops.GatherD() out = prim(x, dim, index) is equivalent to .. code-block:: ops.gather_d(x, dim, index) Refer to :func:`mindspore.ops.gather_d` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, x, dim, index): return _convert_stub(pyboost_gather_d(self, [x, dim, index]))
gather_d_op=GatherD()
[docs]class GatherNd(Primitive): r""" .. code-block:: prim = ops.GatherNd() out = prim(input_x, indices) is equivalent to .. code-block:: ops.gather_nd(input_x, indices) Refer to :func:`mindspore.ops.gather_nd` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input_x, indices): return super().__call__(input_x, indices)
gather_nd_op=GatherNd()
[docs]class Gather(Primitive): r""" .. code-block:: prim = ops.Gather(batch_dims) out = prim(input_params, input_indices, axis) is equivalent to .. code-block:: ops.gather(input_params, input_indices, axis, batch_dims) Refer to :func:`mindspore.ops.gather` for more details. """ @prim_arg_register def __init__(self, batch_dims=0): self._set_prim_arg("batch_dims", batch_dims) def __call__(self, input_params, input_indices, axis): return super().__call__(input_params, input_indices, axis, self.batch_dims)
[docs]class Gcd(Primitive): r""" .. code-block:: prim = ops.Gcd() out = prim(input, other) is equivalent to .. code-block:: ops.gcd(input, other) Refer to :func:`mindspore.ops.gcd` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_gcd(self, [input, other]))
gcd_op=Gcd() class GeLUGrad(Primitive): r""" Gradients of GeLU operation. """ @prim_arg_register def __init__(self): pass def __call__(self, dy, x, y): return _convert_stub(pyboost_gelu_grad(self, [dy, x, y])) gelu_grad_op=GeLUGrad()
[docs]class GeLU(Primitive): r""" Gaussian Error Linear Units activation function. GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_. And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding <https://arxiv.org/abs/1810.04805>`_. GeLU is defined as follows: .. math:: GELU(x_i) = x_i*P(X < x_i) where :math:`P` is the cumulative distribution function of the standard Gaussian distribution, :math:`x_i` is the input element. Note: When calculating the input gradient of GELU with an input value of infinity, there are differences in the output of the backward between 'Ascend' and 'GPU'. when x is -inf, the computation result of 'Ascend' is 0, and the computation result of 'GPU' is Nan. when x is inf, the computation result of 'Ascend' is dy, and the computation result of 'GPU' is Nan. In mathematical terms, Ascend's result has higher precision. Inputs: - **x** (Tensor) - The input of the activation function GeLU, the data type is float16, float32 or float64. Outputs: Tensor, with the same type and shape as `x`. Raises: TypeError: If `x` is not a Tensor. TypeError: If dtype of `x` is not float16, float32 or float64. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) >>> result = ops.GeLU()(x) >>> print(result) [0.841192 1.9545976 2.9963627] """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_gelu(self, [input]))
gelu_op=GeLU() class GenerateEodMaskV2(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('ele_pos'), sig.make_sig('cur_step'), sig.make_sig('seed'), sig.make_sig('offset'), sig.make_sig('start', default=0), sig.make_sig('steps', default=1), sig.make_sig('error_mode', default='cycle'), sig.make_sig('flip_mode', default='bitflip'), sig.make_sig('multiply_factor', default=0.0), sig.make_sig('bit_pos', default=0), sig.make_sig('flip_probability', default=0.0), ) @prim_arg_register def __init__(self): pass def __call__(self, input, ele_pos, cur_step, seed, offset, start=0, steps=1, error_mode='cycle', flip_mode='bitflip', multiply_factor=0.0, bit_pos=0, flip_probability=0.0): return super().__call__(input, ele_pos, cur_step, seed, offset, start, steps, str_to_enum('GenerateEodMaskV2', 'error_mode', error_mode), str_to_enum('GenerateEodMaskV2', 'flip_mode', flip_mode), multiply_factor, bit_pos, flip_probability) generate_eod_mask_v2_op=GenerateEodMaskV2() class Generator(Primitive): r""" """ @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, cmd, inputs): return super().__call__(cmd, inputs) generator_op=Generator()
[docs]class Geqrf(Primitive): r""" .. code-block:: prim = ops.Geqrf() out = prim(input) is equivalent to .. code-block:: ops.geqrf(input) Refer to :func:`mindspore.ops.geqrf` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
geqrf_op=Geqrf()
[docs]class GreaterEqual(Primitive): r""" .. code-block:: prim = ops.GreaterEqual() out = prim(input, other) is equivalent to .. code-block:: ops.greater_equal(input, other) Refer to :func:`mindspore.ops.greater_equal` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_greater_equal(self, [input, other]))
greater_equal_op=GreaterEqual()
[docs]class Greater(Primitive): r""" .. code-block:: prim = ops.Greater() out = prim(input, other) is equivalent to .. code-block:: ops.greater(input, other) Refer to :func:`mindspore.ops.greater` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_greater(self, [input, other]))
greater_op=Greater() class GridSampler2DGrad(Primitive): r""" Computes gradients for GridSampler2D operation. Args: - **grad** (Tensor) - A 4-D tensor whose dtype is float16 or float32 and whose shape is :math:`(N, C, H_{out}, W_{out})`. The shape is inconsistent with the shape of the output result of forward calculation. - **input_x** (Tensor) - A 4-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, C, H_{in}, W_{in})`. - **grid** (Tensor) - A 4-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, H_{out}, W_{out}, 2)`. interpolation_mode (str): An optional string specifying the interpolation method. The optional values are "bilinear" or "nearest". Default: "bilinear". padding_mode (str): An optional string specifying the pad method. The optional values are "zeros", "border" or "reflection". Default: "zeros". align_corners (bool): An optional bool. If "true", the centers of the corner pixels of the input and output tensors are aligned. Defaults to "false". Returns: - **dx** (Tensor) - A 4-D tensor whose dtype and shape are the same as `input_x`. - **dgrid** (Tensor) - A 4-D tensor whose dtype and shape are the same as `grid`. Raises: TypeError: If `grad`, `input_x` or `grid` is not a Tensor. TypeError: If the dtypes of `grad`, `input_x` and `grid` are inconsistent. TypeError: If the dtype of `grad`, `input_x` or `grid` is not a valid type. TypeError: If `align_corners` is not a boolean value. ValueError: If the rank of `grad`, `input_x` or `grid` is not equal to 4. ValueError: If the first dimension of `grad`, `input_x` and `grid` are inconsistent. ValueError: If the last dimension of `grid` is not equal to 2. ValueError: If `interpolation_mode` is not "bilinear", "nearest" or a string value. ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value. ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation. Supported Platforms: ``GPU`` ``CPU`` """ @prim_arg_register def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False): self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum) self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum) self._set_prim_arg("align_corners", align_corners) def __call__(self, grad, input_x, grid): return _convert_stub(pyboost_grid_sampler_2d_grad(self, [grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
[docs]class GridSampler2D(Primitive): r""" This operation samples 2d `input_x` by using interpolation based on flow field grid, which is usually gennerated by :func:`mindspore.ops.affine_grid`. .. warning:: This is an experimental API that is subject to change or deletion. Refer to :func:`mindspore.ops.grid_sample` for more details. Args: interpolation_mode (str, optional): An optional string specifying the interpolation method. The optional values are ``"bilinear"`` or ``"nearest"`` . Default: ``"bilinear"`` . - ``"nearest"``: Nearest neighbor interpolation. Each output pixel is assigned the value of the nearest input pixel. This method is simple and fast but can result in blocky or pixelated outputs. - ``"bilinear"``: Bilinear interpolation. Each output pixel is a weighted average of the four nearest input pixels, computed using bilinear interpolation. This method produces smoother results compared to nearest neighbor interpolation. padding_mode (str, optional): An optional string specifying the pad method. The optional values are ``"zeros"`` , ``"border"`` or ``"reflection"`` . Default: ``"zeros"`` . When the sampling grid is outside input's bounds, effects of various padding modes are as follows: - ``"zeros"``: Pads the input tensor with zeros. - ``"border"``: Pads the input tensor with the values of the pixels on the border of the tensor. - ``"reflection"``: Pads the input tensor by reflecting the values of the pixels at the boundary of the tensor. align_corners (bool, optional): An optional bool. When set to ``True`` , the centers of the corner pixels of the input and output tensors are aligned. When set to ``False`` , it is not aligned. Default: ``False`` . Inputs: - **input_x** (Tensor) - A 4-D tensor with shape :math:`(N, C, H_{in}, W_{in})`. Supported dtypes: - Ascend: float16, float32. - GPU/CPU: float16, float32, float64. - **grid** (Tensor) - A 4-D tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, H_{out}, W_{out}, 2)`. Used to specify the sampling pixel locations normalized by the input spatial dimensions. Outputs: A 4-D Tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, C, H_{out}, W_{out})`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> gridsampler = ops.GridSampler2D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True) >>> input_x = Tensor(np.arange(16).reshape((2, 2, 2, 2)).astype(np.float32)) >>> grid = Tensor(np.arange(-9, 9, 0.5).reshape((2, 3, 3, 2)).astype(np.float32)) >>> output = gridsampler(input_x, grid) >>> print(output) [[[[ 0. 0. 0. ] [ 0. 0. 0. ] [ 0. 0. 0.5 ]] [[ 0. 0. 0. ] [ 0. 0. 0. ] [ 0. 1.5 4.5 ]]] [[[10. 8.25 1.375] [ 0. 0. 0. ] [ 0. 0. 0. ]] [[14. 11.25 1.875] [ 0. 0. 0. ] [ 0. 0. 0. ]]]] """ @prim_arg_register def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False): self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum) self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum) self._set_prim_arg("align_corners", align_corners) def __call__(self, input_x, grid): return _convert_stub(pyboost_grid_sampler_2d(self, [input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
class GridSampler3DGrad(Primitive): r""" Computes gradients for GridSampler3D operation. Args: - **grad** (Tensor) - A 5-D tensor whose dtype is float32 or float64 and whose shape is :math:`(N, C, D_{out}, H_{out}, W_{out})`. The shape is inconsistent with the shape of the output result of forward calculation. - **input_x** (Tensor) - A 5-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, C, D_{in}, H_{in}, W_{in})`. - **grid** (Tensor) - A 5-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, D_{out}, H_{out}, W_{out}, 3)`. interpolation_mode (str): An optional string specifying the interpolation method. The optional values are "bilinear" or "nearest". Default: "bilinear". padding_mode (str): An optional string specifying the pad method. The optional values are "zeros", "border" or "reflection". Default: "zeros". align_corners (bool): An optional bool. If "true", the centers of the corner pixels of the input and output tensors are aligned. Defaults to "false". Returns: - **dx** (Tensor) - A 5-D tensor whose dtype and shape are the same as `input_x`. - **dgrid** (Tensor) - A 5-D tensor whose dtype and shape are the same as `grid`. Raises: TypeError: If `grad`, `input_x` or `grid` is not a Tensor. TypeError: If the dtypes of `grad`, `input_x` and `grid` are inconsistent. TypeError: If the dtype of `grad`, `input_x` or `grid` is not a valid type. TypeError: If `align_corners` is not a boolean value. ValueError: If the rank of `grad`, `input_x` or `grid` is not equal to 5. ValueError: If the first dimension of `grad`, `input_x` and `grid` are inconsistent. ValueError: If the last dimension of `grid` is not equal to 3. ValueError: If `interpolation_mode` is not "bilinear", "nearest" or a string value. ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value. ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation. Supported Platforms: ``GPU`` ``CPU`` """ @prim_arg_register def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False): self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum) self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum) self._set_prim_arg("align_corners", align_corners) def __call__(self, grad, input_x, grid): return _convert_stub(pyboost_grid_sampler_3d_grad(self, [grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
[docs]class GridSampler3D(Primitive): r""" Given an input and a grid, the output is calculated using the input values and pixel positions in the grid. Only volume (5-D) input is supported. .. warning:: This is an experimental API that is subject to change or deletion. Refer to :func:`mindspore.ops.grid_sample` for more details. Args: interpolation_mode (str, optional): An optional string specifying the interpolation method. The optional values are ``"bilinear"`` or ``"nearest"`` . Default: ``"bilinear"`` . - ``"nearest"``: Nearest neighbor interpolation. Each output pixel is assigned the value of the nearest input pixel. This method is simple and fast but can result in blocky or pixelated outputs. - ``"bilinear"``: Bilinear interpolation. Each output pixel is a weighted average of the four nearest input pixels, computed using bilinear interpolation. This method produces smoother results compared to nearest neighbor interpolation. padding_mode (str, optional): An optional string specifying the pad method. The optional values are ``"zeros"`` , ``"border"`` or ``"reflection"`` . Default: ``"zeros"`` . When the sampling grid is outside input's bounds, effects of various padding modes are as follows: - ``"zeros"``: Pads the input tensor with zeros. - ``"border"``: Pads the input tensor with the values of the pixels on the border of the tensor. - ``"reflection"``: Pads the input tensor by reflecting the values of the pixels at the boundary of the tensor. align_corners (bool, optional): An optional bool specifying alignment method. If set to ``True`` , the extrema (-1 and 1) are considered as referring to the center points of the input's corner pixels. If set to ``False`` , they are instead considered as referring to the corner points of the input's corner pixels, making the sampling more resolution agnostic. Default: ``False`` . Inputs: - **input_x** (Tensor) - A 5-D tensor with dtype of float16, float32 or float64 and shape of :math:`(N, C, D_{in}, H_{in}, W_{in})`. - **grid** (Tensor) - A 5-D tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, D_{out}, H_{out}, W_{out}, 3)`. Outputs: A 5-D Tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, C, D_{out}, H_{out}, W_{out})`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> gridsampler = ops.GridSampler3D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True) >>> input_x = Tensor(np.arange(32).reshape((2, 2, 2, 2, 2)).astype(np.float32)) >>> grid = Tensor(np.arange(-0.2, 1, 0.1).reshape((2, 2, 1, 1, 3)).astype(np.float32)) >>> output = gridsampler(input_x, grid) >>> print(output) [[[[[ 3.3 ]] [[ 4.35 ]]] [[[11.300001]] [[12.349999]]]] [[[[21.4 ]] [[22.449999]]] [[[29.4 ]] [[30.449999]]]]] """ @prim_arg_register def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False): self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum) self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum) self._set_prim_arg("align_corners", align_corners) def __call__(self, input_x, grid): return _convert_stub(pyboost_grid_sampler_3d(self, [input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
class GroupNormGrad(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('x'), sig.make_sig('mean'), sig.make_sig('rstd'), sig.make_sig('gamma_opt'), sig.make_sig('num_groups'), sig.make_sig('dx_is_require', default=True), sig.make_sig('dgamma_is_require', default=True), sig.make_sig('dbeta_is_require', default=True), ) @prim_arg_register def __init__(self): pass def __call__(self, dy, x, mean, rstd, gamma_opt, num_groups, dx_is_require=True, dgamma_is_require=True, dbeta_is_require=True): return _convert_stub(pyboost_group_norm_grad(self, [dy, x, mean, rstd, gamma_opt, num_groups, dx_is_require, dgamma_is_require, dbeta_is_require])) group_norm_grad_op=GroupNormGrad() class GroupNorm(Primitive): r""" Group Normalization over a mini-batch of inputs. Group Normalization is widely used in recurrent neural networks. It applies normalization on a mini-batch of inputs for each single training case as described in the paper `Group Normalization <https://arxiv.org/pdf/1803.08494.pdf>`_. Group Normalization divides the channels into groups and computes within each group the mean and variance for normalization, and it performs very stable over a wide range of batch size. :math:`\gamma` and :math:`\beta` are trainable scale and shift. It can be described using the following formula: .. math:: y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta where :math:`\gamma` is `weight`, :math:`\beta` is `bias`, :math:`\epsilon` is `eps`. Args: input (Tensor): The input feature with shape :math:`(N, C, *)` where :math:`*` means, any number of additional dimensions. num_groups (int): The number of groups to be divided along the channel dimension. weight (Tensor, optional): The shape :math:`(C,)`, Default: ``None``, has the same data type with `input`. bias (Tensor, optional): The shape :math:`(C,)`, Default: ``None``, has the same data type with `input`. eps (float, optional): A value added to the denominator for numerical stability. Default: ``1e-5`` . Returns: Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input`. Raises: TypeError: If `num_groups` is not an int. TypeError: If `eps` is not a float. ValueError: If `num_groups` is less than 1. ValueError: If `C` (the second parameter of dimensions of `input`) is not divided by `num_groups`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore as ms >>> import numpy as np >>> from mindspore import ops >>> x = ms.Tensor(np.ones([1, 2, 4, 4], np.float32)) >>> group_norm_op = ops.GroupNorm() >>> output = group_norm_op(x, 2)[0] >>> print(output) [[[[0. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.]] [[0. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.] [0. 0. 0. 0.]]]] """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('num_groups'), sig.make_sig('weight', default=None), sig.make_sig('bias', default=None), sig.make_sig('eps', default=1e-5), ) @prim_arg_register def __init__(self): pass def __call__(self, input, num_groups, weight=None, bias=None, eps=1e-5): return _convert_stub(pyboost_group_norm(self, [input, num_groups, weight, bias, eps])) group_norm_op=GroupNorm() class HFFT2(Primitive): r""" .. code-block:: prim = ops.HFFT2() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.hfft2(input, s, dim, norm) Refer to :func:`mindspore.ops.hfft2` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=(-2, -1)), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=(-2, -1), norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('HFFT2', 'norm', norm)) hfft2_op=HFFT2() class HFFT(Primitive): r""" .. code-block:: prim = ops.HFFT() out = prim(input, n, dim, norm) is equivalent to .. code-block:: ops.hfft(input, n, dim, norm) Refer to :func:`mindspore.ops.hfft` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('n', default=None), sig.make_sig('dim', default=-1), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, n=None, dim=-1, norm=None): return super().__call__(input, n, dim, norm if norm is None else str_to_enum('HFFT', 'norm', norm)) hfft_op=HFFT() class HFFTN(Primitive): r""" .. code-block:: prim = ops.HFFTN() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.hfftn(input, s, dim, norm) Refer to :func:`mindspore.ops.hfftn` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=None), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=None, norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('HFFTN', 'norm', norm)) hfftn_op=HFFTN() class HistcExt(Primitive): r""" .. code-block:: prim = ops.HistcExt() out = prim(input, bins, min, max) is equivalent to .. code-block:: ops.histc_ext(input, bins, min, max) Refer to :func:`mindspore.ops.histc_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('bins', default=100), sig.make_sig('min', default=0), sig.make_sig('max', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, input, bins=100, min=0, max=0): return _convert_stub(pyboost_histc_ext(self, [input, bins, min, max])) histc_ext_op=HistcExt() class HShrinkGrad(Primitive): r""" Computes gradients for HShrinkGrad operation. Args: Gradients (Tensor) - the gradients of loss to output of HShrink function. Currently gradients data type only support float16 and float32. Features (Tensor) - Must be the input `input_x` of the forward operator HSHrink. Currently features data type only support float16 and float32. lambd (float): the lambda value for the Hardshrink formulation. Default: 0.5 Returns: backprops - Tensor, with the same shape and data type as `features`. Rasise: ValueError: If `lambd` is not a float. ValueError: If shape of `gradients` is not the same as `features`. TypeError: If dtype of `gradients` is not the same as `features`. TypeError: If dtype of `gradients` or `features` is neither float16 nor float32. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` """ @prim_arg_register def __init__(self, lambd=0.5): self._set_prim_arg("lambd", type_it('HShrinkGrad', 'lambd', lambd, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT)) def __call__(self, gradients, features): return _convert_stub(pyboost_hshrink_grad(self, [gradients, features, self.lambd]))
[docs]class HShrink(Primitive): r""" .. code-block:: prim = ops.HShrink(lambd) out = prim(input) is equivalent to .. code-block:: ops.hardshrink(input, lambd) Refer to :func:`mindspore.ops.hardshrink` for more details. """ @prim_arg_register def __init__(self, lambd=0.5): self._set_prim_arg("lambd", type_it('HShrink', 'lambd', lambd, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT)) def __call__(self, input): return _convert_stub(pyboost_hshrink(self, [input, self.lambd]))
class HSigmoidGrad(Primitive): r""" Gets the gradient of HSigmoid operation. """ @prim_arg_register def __init__(self): pass def __call__(self, grads, input_x): return _convert_stub(pyboost_hsigmoid_grad(self, [grads, input_x])) hsigmoid_grad_op=HSigmoidGrad()
[docs]class HSigmoid(Primitive): r""" .. code-block:: prim = ops.HSigmoid() out = prim(input) is equivalent to .. code-block:: ops.hardsigmoid(input) Refer to :func:`mindspore.ops.hardsigmoid` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_hsigmoid(self, [input]))
hsigmoid_op=HSigmoid() class HSwishGrad(Primitive): r""" Gets the gradient of HSwish operation. """ @prim_arg_register def __init__(self): pass def __call__(self, y_grad, x): return _convert_stub(pyboost_hswish_grad(self, [y_grad, x])) hswish_grad_op=HSwishGrad()
[docs]class HSwish(Primitive): r""" .. code-block:: prim = ops.HSwish() out = prim(input) is equivalent to .. code-block:: ops.hardswish(input) Refer to :func:`mindspore.ops.hardswish` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_hswish(self, [input]))
hswish_op=HSwish() class IDCT(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('type', default=2), sig.make_sig('n', default=None), sig.make_sig('axis', default=-1), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, x, type=2, n=None, axis=-1, norm=None): return super().__call__(x, type, n, axis, norm if norm is None else str_to_enum('IDCT', 'norm', norm)) idct_op=IDCT() class IDCTN(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('type', default=2), sig.make_sig('s', default=None), sig.make_sig('axes', default=None), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, x, type=2, s=None, axes=None, norm=None): return super().__call__(x, type, s, axes, norm if norm is None else str_to_enum('IDCTN', 'norm', norm)) idctn_op=IDCTN()
[docs]class Identity(Primitive): r""" .. code-block:: prim = ops.Identity() out = prim(input_x) is equivalent to .. code-block:: ops.deepcopy(input_x) Refer to :func:`mindspore.ops.deepcopy` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input_x): return super().__call__(input_x)
identity_op=Identity() class IFFT2(Primitive): r""" .. code-block:: prim = ops.IFFT2() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.ifft2(input, s, dim, norm) Refer to :func:`mindspore.ops.ifft2` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=(-2, -1)), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=(-2, -1), norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IFFT2', 'norm', norm)) ifft2_op=IFFT2() class IFFT(Primitive): r""" .. code-block:: prim = ops.IFFT() out = prim(input, n, dim, norm) is equivalent to .. code-block:: ops.ifft(input, n, dim, norm) Refer to :func:`mindspore.ops.ifft` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('n', default=None), sig.make_sig('dim', default=-1), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, n=None, dim=-1, norm=None): return super().__call__(input, n, dim, norm if norm is None else str_to_enum('IFFT', 'norm', norm)) ifft_op=IFFT() class IFFTN(Primitive): r""" .. code-block:: prim = ops.IFFTN() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.ifftn(input, s, dim, norm) Refer to :func:`mindspore.ops.ifftn` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=None), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=None, norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IFFTN', 'norm', norm)) ifftn_op=IFFTN() class IFFTShift(Primitive): r""" .. code-block:: prim = ops.IFFTShift() out = prim(input, dim) is equivalent to .. code-block:: ops.ifftshift(input, dim) Refer to :func:`mindspore.ops.ifftshift` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=None): return super().__call__(input, dim) ifftshift_op=IFFTShift() class IHFFT2(Primitive): r""" .. code-block:: prim = ops.IHFFT2() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.ihfft2(input, s, dim, norm) Refer to :func:`mindspore.ops.ihfft2` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=(-2, -1)), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=(-2, -1), norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IHFFT2', 'norm', norm)) ihfft2_op=IHFFT2() class IHFFT(Primitive): r""" .. code-block:: prim = ops.IHFFT() out = prim(input, n, dim, norm) is equivalent to .. code-block:: ops.ihfft(input, n, dim, norm) Refer to :func:`mindspore.ops.ihfft` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('n', default=None), sig.make_sig('dim', default=-1), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, n=None, dim=-1, norm=None): return super().__call__(input, n, dim, norm if norm is None else str_to_enum('IHFFT', 'norm', norm)) ihfft_op=IHFFT() class IHFFTN(Primitive): r""" .. code-block:: prim = ops.IHFFTN() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.ihfftn(input, s, dim, norm) Refer to :func:`mindspore.ops.ihfftn` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=None), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=None, norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IHFFTN', 'norm', norm)) ihfftn_op=IHFFTN() class Im2ColExt(Primitive): r""" .. code-block:: prim = ops.Im2ColExt() out = prim(input, kernel_size, dilation, padding, stride) is equivalent to .. code-block:: ops.unfold_ext(input, kernel_size, dilation, padding, stride) Refer to :func:`mindspore.ops.unfold_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('kernel_size'), sig.make_sig('dilation', default=1), sig.make_sig('padding', default=0), sig.make_sig('stride', default=1), ) @prim_arg_register def __init__(self): pass def __call__(self, input, kernel_size, dilation=1, padding=0, stride=1): return _convert_stub(pyboost_im2col_ext(self, [input, to_pair('Im2ColExt', 'kernel_size', kernel_size), to_pair('Im2ColExt', 'dilation', dilation), to_pair('Im2ColExt', 'padding', padding), to_pair('Im2ColExt', 'stride', stride)])) im2col_ext_op=Im2ColExt() class IncreFlashAttention(Primitive): r""" The interface for fully inference. B -- Batch size N -- Num heads kvN -- Num key value heads S -- Sequence length D -- Head dim H -- Hidden size kvH -- Hidden size of key value where :math:`H=N\times D`, :math:`kvH=kvN\times D` Self attention constructs an attention model based on the relationship between input samples themselves. The principle is to assume that there is a length of the input sample sequence :math:`x` of :math:`n`, and each element of :math:`x` is a :math:`d` dimensional vector, which can be viewed as a token embedding. This sequence can be transformed through 3 weight matrices to obtain 3 matrices with dimensions of :math:`n\times d`. The self attention calculation formula is defined as: .. math:: Attention(Q,K,V)=Softmax(\frac{QK^{T} }{\sqrt{d} } )V where the product of :math:`Q` and :math:`K^{T}` represents the attention of input :math:`x`. To avoid the value becoming too large, it is usually scaled by dividing it by the square root of :math:`d` and perform softmax normalization on each row, yields a matrix of :math:`n\times d` after multiplying :math:`V`. .. warning:: This is an experimental API that is subject to change or deletion. Note: - If there is no input parameter and no default value, None needs to be passed. - The shape of the tensor corresponding to the key and value parameters needs to be completely consistent. - :math:`N` of parameter query is equal with num_heads. :math:`N` of parameter key and parameter value is equal with num_key_value_heads. num_heads is a multiple of num_key_value_heads. - Quantization - When the data type of query, key, and value is float16 and the data type of output is int8, the input parameter quant_scale2 is required and quant_offset2 is optional. - When antiquant_scale exists, key and value need to be passed by int8. antiquant_offset is optional. - The data type of antiquant_scale and antiquant_offset should be consistency with that of query. - pse_shift - The pse_shift data type needs to be consistent with the query data type, and only supports D-axis alignment, which means that the D-axis can be divided by 16. - Page attention: - The necessary condition for enabling page attention is that the block_table exists, and the key and value are arranged in a contiguous memory according to the index in the block_table. The support for key and value dtypes is float16/bfloat16/int8. - In the enabling scenario of page attention, 16 alignment is required when input types of key and value are float16/bfloat16, and 32 alignment is required when input types of key and value are int8. It is recommended to use 128. - The maximum max_block_num_per_seq currently supported by block_table is 16k, and exceeding 16k will result in interception and error messages; If you encounter :math:`S` being too large and causing max_block_num_per_seq to exceed 16k, you can increase the block_size to solve the problem. - The multiplication of all dimensions of the shape of the parameters key and value in the page attention scenario cannot exceed the representation range of int32. - When performing per-channel post quantization, page attention cannot be enabled simultaneously. - kv_padding_size: - The calculation formula for the starting point of KV cache transfer is :math:`S-kv\_padding\_size-actual\_seq\_lengths`. The calculation formula for the transfer endpoint of KV cache is :math:`S-kv\_padding\_size`. When the starting or ending point of the KV cache transfer is less than 0, the returned data result is all 0. - When kv_padding_size is less than 0, it will be set to 0. - kv_padding_size needs to be enabled together with the actual_seq_lengths parameter, otherwise it is considered as the KV right padding scene. - It needs to be enabled together with the atten_mask parameter and ensure that the meaning of atten_mask is correct, that is, it can correctly hide invalid data. Otherwise, it will introduce accuracy issues. - kv_padding_size does not support page attention scenarios Args: num_heads (int): The number of heads. input_layout (str): the data layout of the input qkv, support 'BSH' and 'BNSD'. Default: ``'BSH'``. scale_value (double): The scale value indicating the scale coefficient, which is used as the scalar of Muls in the calculation. Default: ``1.0``. num_key_value_heads (int): Head numbers of key/value which are used in GQA algorithm. The value 0 indicates if the key and value have the same head nums, use num_heads. Default: ``0``. block_size (int): The maximum number of tokens stored in each block of KV in page attention. Default: ``0``. inner_precise (int): Default: ``1``. Inputs: - **query** (Tensor) - The query tensor with data type of float16 or bfloat16. The shape is :math:`(B, 1, H)` / :math:`(B, N, 1, D)`. - **key** (TensorList) - The key tensor with data type of float16 or bfloat16 or int8. The shape is :math:`(B, S, kvH)` / :math:`(B, kvN, S, D)`. - **value** (TensorList) - The value tensor with data type of float16 or bfloat16 or int8. The shape is :math:`(B, S, kvH)` / :math:`(B, kvN, S, D)`. - **attn_mask** (Tensor, optional) - The attention mask tensor with data type of bool or int8 or uint8. The shape is :math:`(B, S)` / :math:`(B, 1, S)` / :math:`(B, 1, 1, S)`. Default: ``None``. - **actual_seq_lengths** (Union[Tensor, tuple[int], list[int]], optional) - Describe actual sequence length of each input with data type of int32 or int64. The shape is :math:`(B, )`. Default: ``None``. - **pse_shift** (Tensor, optional) - The position encoding tensor with data type of float16 or bfloat16. Input tensor of shape :math:`(1, N, 1, S)` / :math:`(B, N, 1, S)`. Default: ``None``. - **dequant_scale1** (Tensor, optional) - Quantitative parametor, the tensor with data type of uint64 or float32. It is disable now. Default: ``None``. - **quant_scale1** (Tensor, optional) - Quantitative parametor, the tensor with data type of float32. It is disable now. Default: ``None``. - **dequant_scale2** (Tensor, optional) - Quantitative parametor, the tensor with data type of uint64 or float32. It is disable now. Default: ``None``. - **quant_scale2** (Tensor, optional) - Post quantitative parametor, the tensor with data type of float32. The shape is :math:`(1,)`. Default: ``None``. - **quant_offset2** (Tensor, optional) - Post quantitative parametor, the tensor with data type of float32. The shape is :math:`(1,)`. Default: ``None``. - **antiquant_scale** (Tensor, optional) - Pseudo quantitative parametor, the tensor with data type of float16 or bfloat16. The shape is :math:`(2, kvN, 1, D)` when input_layout is 'BNSD' or :math:`(2, kvH)` when input_layout is 'BSH'. Default: ``None``. - **antiquant_offset** (Tensor, optional) - Pseudo quantitative parametor, the tensor with data type of float16 or bfloat16. The shape is :math:`(2, kvN, 1, D)` when input_layout is 'BNSD' or :math:`(2, kvH)` when input_layout is 'BSH'. Default: ``None``. - **block_table** (Tensor, optional) - The tensor with data type of int32. The shape is :math:`(B, max\_block\_num\_per\_seq)`, where :math:`max\_block\_num\_per\_seq = ceil(\frac{max(actual\_seq\_length)}{block\_size} )`. Default: ``None``. - **kv_padding_size** (Tensor, optional) - The tensor with data type of int64. The range of values is :math:`0\le kv\_padding\_size \le S-max(actual\_seq\_length)`. The shape is :math:`()` or :math:`(1,)`. Default: ``None``. Outputs: attention_out (Tensor), the shape is :math:`(B, 1, H)` / :math:`(B, N, 1, D)`. Supported Platforms: ``Ascend`` Examples: >>> from mindspore import ops >>> from mindspore.common import Tensor >>> from mindspore.common import dtype as mstype >>> import numpy as np >>> from mindspore.ops.auto_generate import IncreFlashAttention >>> B, N, S, D, kvN = 1, 4, 10, 128, 1 >>> query = Tensor(np.random.randn(B, 1, N * D), mstype.float16) >>> key = [Tensor(np.random.randn(B, S, kvN * D), mstype.float16)] >>> value = [Tensor(np.random.randn(B, S, kvN * D), mstype.float16)] >>> ifa_ms = IncreFlashAttention(num_heads=N, num_key_value_heads=kvN) >>> attn_out = ifa_ms(query, key, value) >>> attn_out Tensor(shape=[1, 1, 512], dtype=Float16, value= [[[-1.5161e-01, -2.1814e-01, -1.6284e-01 ... 1.0283e+00, -1.1143e+00, -1.7607e+00]]]) """ __mindspore_signature__ = ( sig.make_sig('query'), sig.make_sig('key'), sig.make_sig('value'), sig.make_sig('attn_mask', default=None), sig.make_sig('actual_seq_lengths', default=None), sig.make_sig('pse_shift', default=None), sig.make_sig('dequant_scale1', default=None), sig.make_sig('quant_scale1', default=None), sig.make_sig('dequant_scale2', default=None), sig.make_sig('quant_scale2', default=None), sig.make_sig('quant_offset2', default=None), sig.make_sig('antiquant_scale', default=None), sig.make_sig('antiquant_offset', default=None), sig.make_sig('block_table', default=None), sig.make_sig('kv_padding_size', default=None), ) @prim_arg_register def __init__(self, num_heads=1, input_layout='BSH', scale_value=1.0, num_key_value_heads=0, block_size=0, inner_precise=1): self._set_prim_arg("num_heads", num_heads) self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum) self._set_prim_arg("scale_value", scale_value) self._set_prim_arg("num_key_value_heads", num_key_value_heads) self._set_prim_arg("block_size", block_size) self._set_prim_arg("inner_precise", inner_precise) def __call__(self, query, key, value, attn_mask=None, actual_seq_lengths=None, pse_shift=None, dequant_scale1=None, quant_scale1=None, dequant_scale2=None, quant_scale2=None, quant_offset2=None, antiquant_scale=None, antiquant_offset=None, block_table=None, kv_padding_size=None): return _convert_stub(pyboost_incre_flash_attention(self, [query, key, value, attn_mask, actual_seq_lengths, pse_shift, dequant_scale1, quant_scale1, dequant_scale2, quant_scale2, quant_offset2, antiquant_scale, antiquant_offset, block_table, kv_padding_size, self.num_heads, self.input_layout, self.scale_value, self.num_key_value_heads, self.block_size, self.inner_precise])) class IndexAddExt(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), sig.make_sig('index', dtype=sig.sig_dtype.T1), sig.make_sig('source', dtype=sig.sig_dtype.T), sig.make_sig('axis', dtype=sig.sig_dtype.T2), sig.make_sig('alpha', dtype=sig.sig_dtype.T3, default=1), ) @prim_arg_register def __init__(self): pass def __call__(self, input, index, source, axis, alpha=1): return _convert_stub(pyboost_index_add_ext(self, [input, index, source, axis, alpha])) index_add_ext_op=IndexAddExt() class IndexSelect(Primitive): r""" .. code-block:: prim = ops.IndexSelect() out = prim(input, dim, index) is equivalent to .. code-block:: ops.index_select_ext(input, dim, index) Refer to :func:`mindspore.ops.index_select_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, dim, index): return _convert_stub(pyboost_index_select(self, [input, dim, index])) index_select_op=IndexSelect() class InnerCommAllGather(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, rank_size, group): return _convert_stub(pyboost_inner_comm_all_gather(self, [input, rank_size, group])) inner_comm_all_gather_op=InnerCommAllGather() class InnerCommAllReduce(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, op_type, group): return _convert_stub(pyboost_inner_comm_all_reduce(self, [input, op_type, group])) inner_comm_all_reduce_op=InnerCommAllReduce() class InnerCommAllToAllV(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, group, send_numel_list, recv_numel_list, rank_size, split_sizes_empty): return _convert_stub(pyboost_inner_comm_all_to_all_v(self, [input, group, send_numel_list, recv_numel_list, rank_size, split_sizes_empty])) inner_comm_all_to_all_v_op=InnerCommAllToAllV() class InnerCommIrecv(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, tag, src, shape, group, dtype): return _convert_stub(pyboost_inner_comm_irecv(self, [tag, src, shape, group, dtype_to_type_id('InnerCommIrecv', 'dtype', dtype)])) inner_comm_irecv_op=InnerCommIrecv() class InnerCommIsend(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, dst, group, tag): return _convert_stub(pyboost_inner_comm_isend(self, [input, dst, group, tag])) inner_comm_isend_op=InnerCommIsend() class InnerCommReduceScatter(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, rank_size, op_type, group): return _convert_stub(pyboost_inner_comm_reduce_scatter(self, [input, rank_size, op_type, group])) inner_comm_reduce_scatter_op=InnerCommReduceScatter() class InplaceAddExt(Primitive): r""" .. code-block:: prim = ops.InplaceAddExt() out = prim(input, other, alpha) is equivalent to .. code-block:: ops.inplace_add_ext(input, other, alpha) Refer to :func:`mindspore.ops.inplace_add_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('other'), sig.make_sig('alpha', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, input, other, alpha=1): return _convert_stub(pyboost_inplace_add_ext(self, [input, other, alpha])) inplace_add_ext_op=InplaceAddExt() class InplaceAddmm(Primitive): r""" .. code-block:: prim = ops.InplaceAddmm() out = prim(input, mat1, mat2, beta, alpha) is equivalent to .. code-block:: ops.inplace_addmm(input, mat1, mat2, beta, alpha) Refer to :func:`mindspore.ops.inplace_addmm` for more details. """ __mindspore_signature__ = ( sig.make_sig('input', sig.sig_rw.RW_WRITE), sig.make_sig('mat1'), sig.make_sig('mat2'), sig.make_sig('beta', default=1), sig.make_sig('alpha', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, input, mat1, mat2, beta=1, alpha=1): return _convert_stub(pyboost_inplace_addmm(self, [input, mat1, mat2, beta, alpha])) inplace_addmm_op=InplaceAddmm() class InplaceAddsExt(Primitive): r""" .. code-block:: prim = ops.InplaceAddsExt() out = prim(input, other, alpha) is equivalent to .. code-block:: ops.inplace_adds_ext(input, other, alpha) Refer to :func:`mindspore.ops.inplace_adds_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('other'), sig.make_sig('alpha', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, input, other, alpha=1): return _convert_stub(pyboost_inplace_adds_ext(self, [input, other, alpha])) inplace_adds_ext_op=InplaceAddsExt() class InplaceClampScalar(Primitive): r""" .. code-block:: prim = ops.InplaceClampScalar() out = prim(input, min, max) is equivalent to .. code-block:: ops.inplace_clamp_scalar(input, min, max) Refer to :func:`mindspore.ops.inplace_clamp_scalar` for more details. """ __mindspore_signature__ = ( sig.make_sig('input', sig.sig_rw.RW_WRITE), sig.make_sig('min', default=None), sig.make_sig('max', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, min=None, max=None): return _convert_stub(pyboost_inplace_clamp_scalar(self, [input, min, max])) inplace_clamp_scalar_op=InplaceClampScalar() class InplaceClampTensor(Primitive): r""" .. code-block:: prim = ops.InplaceClampTensor() out = prim(input, min, max) is equivalent to .. code-block:: ops.inplace_clamp_tensor(input, min, max) Refer to :func:`mindspore.ops.inplace_clamp_tensor` for more details. """ __mindspore_signature__ = ( sig.make_sig('input', sig.sig_rw.RW_WRITE), sig.make_sig('min', default=None), sig.make_sig('max', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, min=None, max=None): return _convert_stub(pyboost_inplace_clamp_tensor(self, [input, min, max])) inplace_clamp_tensor_op=InplaceClampTensor() class InplaceFillScalar(Primitive): r""" .. code-block:: prim = ops.InplaceFillScalar() out = prim(input, value) is equivalent to .. code-block:: ops.inplace_fill_scalar(input, value) Refer to :func:`mindspore.ops.inplace_fill_scalar` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, value): return _convert_stub(pyboost_inplace_fill_scalar(self, [input, value])) inplace_fill_scalar_op=InplaceFillScalar() class InplaceFillTensor(Primitive): r""" .. code-block:: prim = ops.InplaceFillTensor() out = prim(input, value) is equivalent to .. code-block:: ops.inplace_fill_tensor(input, value) Refer to :func:`mindspore.ops.inplace_fill_tensor` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, value): return _convert_stub(pyboost_inplace_fill_tensor(self, [input, value])) inplace_fill_tensor_op=InplaceFillTensor() class InplaceFloor(Primitive): r""" .. code-block:: prim = ops.InplaceFloor() out = prim(input) is equivalent to .. code-block:: ops.floor_(input) Refer to :func:`mindspore.ops.floor_` for more details. """ __mindspore_signature__ = ( sig.make_sig('input', sig.sig_rw.RW_WRITE), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, input): return _convert_stub(pyboost_inplace_floor(self, [input])) inplace_floor_op=InplaceFloor() class InplaceZero(Primitive): r""" .. code-block:: prim = ops.InplaceZero() out = prim(input) is equivalent to .. code-block:: ops.zero_(input) Refer to :func:`mindspore.ops.zero_` for more details. """ __mindspore_signature__ = ( sig.make_sig('input', sig.sig_rw.RW_WRITE), ) @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_inplace_zero(self, [input])) inplace_zero_op=InplaceZero() class InsertGemV2InBackward(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('ele_pos'), sig.make_sig('cur_step'), sig.make_sig('seed'), sig.make_sig('offset'), sig.make_sig('start', default=0), sig.make_sig('steps', default=1), sig.make_sig('error_mode', default='cycle'), sig.make_sig('flip_mode', default='bitflip'), sig.make_sig('multiply_factor', default=0.0), sig.make_sig('bit_pos', default=0), sig.make_sig('flip_probability', default=0.0), ) @prim_arg_register def __init__(self): pass def __call__(self, input, ele_pos, cur_step, seed, offset, start=0, steps=1, error_mode='cycle', flip_mode='bitflip', multiply_factor=0.0, bit_pos=0, flip_probability=0.0): return super().__call__(input, ele_pos, cur_step, seed, offset, start, steps, str_to_enum('InsertGemV2InBackward', 'error_mode', error_mode), str_to_enum('InsertGemV2InBackward', 'flip_mode', flip_mode), multiply_factor, bit_pos, flip_probability) insert_gem_v2_in_backward_op=InsertGemV2InBackward() class IRFFT2(Primitive): r""" .. code-block:: prim = ops.IRFFT2() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.irfft2(input, s, dim, norm) Refer to :func:`mindspore.ops.irfft2` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=(-2, -1)), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=(-2, -1), norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IRFFT2', 'norm', norm)) irfft2_op=IRFFT2() class IRFFTDouble(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('n'), sig.make_sig('dim', default=-1), ) @prim_arg_register def __init__(self): pass def __call__(self, input, n, dim=-1): return super().__call__(input, n, dim) irfft_double_op=IRFFTDouble() class IRFFT(Primitive): r""" .. code-block:: prim = ops.IRFFT() out = prim(input, n, dim, norm) is equivalent to .. code-block:: ops.irfft(input, n, dim, norm) Refer to :func:`mindspore.ops.irfft` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('n', default=None), sig.make_sig('dim', default=-1), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, n=None, dim=-1, norm=None): return super().__call__(input, n, dim, norm if norm is None else str_to_enum('IRFFT', 'norm', norm)) irfft_op=IRFFT() class IRFFTN(Primitive): r""" .. code-block:: prim = ops.IRFFTN() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.irfftn(input, s, dim, norm) Refer to :func:`mindspore.ops.irfftn` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=None), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=None, norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IRFFTN', 'norm', norm)) irfftn_op=IRFFTN()
[docs]class IsClose(Primitive): r""" Returns a tensor of Boolean values indicating whether each element of `input` is "close" to the corresponding element of `other`. Closeness is defined as: .. math:: |input-other| <= atol + rtol * |other| Refer to :func:`mindspore.ops.isclose` for more details. Args: rtol(float, optional): Relative tolerance. Default: ``1e-05`` . atol(float, optional): Absolute tolerance. Default: ``1e-08`` . equal_nan(bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``True`` . Inputs: - **input** (Tensor) - First tensor to compare. - **other** (Tensor) - Second tensor to compare. Outputs: Tensor, with the same shape as `input` and `other` after broadcasting, its dtype is bool. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore.ops import IsClose >>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16) >>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16) >>> isclose = IsClose() >>> output = isclose(input, other) >>> print(output) [ True False False False True] """ @prim_arg_register def __init__(self, rtol=1e-05, atol=1e-08, equal_nan=True): self._set_prim_arg("rtol", type_it('IsClose', 'rtol', rtol, (OpDtype.DT_BOOL, OpDtype.DT_INT), OpDtype.DT_FLOAT)) self._set_prim_arg("atol", type_it('IsClose', 'atol', atol, (OpDtype.DT_BOOL, OpDtype.DT_INT), OpDtype.DT_FLOAT)) self._set_prim_arg("equal_nan", equal_nan) def __call__(self, input, other): return _convert_stub(pyboost_isclose(self, [input, other, self.rtol, self.atol, self.equal_nan]))
[docs]class IsFinite(Primitive): r""" .. code-block:: prim = ops.IsFinite() out = prim(x) is equivalent to .. code-block:: ops.isfinite(x) Refer to :func:`mindspore.ops.isfinite` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, x): return _convert_stub(pyboost_isfinite(self, [x]))
isfinite_op=IsFinite() class L1LossBackwardExt(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('grad_output'), sig.make_sig('input'), sig.make_sig('target'), sig.make_sig('reduction', default='mean'), ) @prim_arg_register def __init__(self): pass def __call__(self, grad_output, input, target, reduction='mean'): return _convert_stub(pyboost_l1_loss_backward_ext(self, [grad_output, input, target, str_to_enum('L1LossBackwardExt', 'reduction', reduction)])) l1_loss_backward_ext_op=L1LossBackwardExt() class L1LossExt(Primitive): r""" .. code-block:: prim = ops.L1LossExt() out = prim(input, target, reduction) is equivalent to .. code-block:: ops.l1_loss_ext(input, target, reduction) Refer to :func:`mindspore.ops.l1_loss_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('target'), sig.make_sig('reduction', default='mean'), ) @prim_arg_register def __init__(self): pass def __call__(self, input, target, reduction='mean'): return _convert_stub(pyboost_l1_loss_ext(self, [input, target, str_to_enum('L1LossExt', 'reduction', reduction)])) l1_loss_ext_op=L1LossExt() class LayerNormExt(Primitive): r""" Applies the Layer Normalization to the input tensor. This operator will normalize the input tensor on given axis. LayerNorm is described in the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. .. math:: y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta where :math:`\gamma` is weight, :math:`\beta` is bias, :math:`\epsilon` is eps. Args: input (Tensor): Tensor of shape :math:`(N, \ldots)`. The input of LayerNorm. normalized_shape (Union(tuple[int], list[int])): The normalized shape of `input` for LayerNorm. weight (Tensor, optional): Learnable parameter :math:`\gamma` . Tensor of shape `normalized_shape`. Default: ``None`` . bias (Tensor, optional): Learnable parameter :math:`\beta` . Tensor of shape `normalized_shape`. Default: ``None`` . eps (float, optional): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-5`` . Returns: tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters. - **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`. - **mean** (Tensor) - The first `begin_norm_axis` (The begin axis of the `input_x` to apply LayerNorm) dimensions of `mean` shape is the same as `input_x`, and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`, the shape of the `mean` is :math:`(x_1, \ldots, x_{begin_params_axis}, 1, \ldots, 1)` (when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ). - **rstd** (Tensor) - Shape is the same as `mean` . Raises: TypeError: If `input` is not a Tensor. TypeError: If `normalized_shape` is not an integer, a list or a tuple. TypeError: If `eps` is not a float. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32) >>> normalized_shape = (3,) >>> gamma = Tensor(np.ones(normalized_shape), mindspore.float32) >>> beta = Tensor(np.zeros(normalized_shape), mindspore.float32) >>> eps = 1e-7 >>> layer_norm = ops.LayerNormExt() >>> output, mean, rstd = layer_norm(input_x, normalized_shape, gamma, beta, eps) >>> print(output) [[-1.2247448 0. 1.2247448] [-1.2247448 0. 1.2247448]] >>> print(mean) [[2.] [2.]] >>> print(rstd) [[1.2247447] [1.2247447]] """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('normalized_shape'), sig.make_sig('weight', default=None), sig.make_sig('bias', default=None), sig.make_sig('eps', default=1e-5), ) @prim_arg_register def __init__(self): pass def __call__(self, input, normalized_shape, weight=None, bias=None, eps=1e-5): return _convert_stub(pyboost_layer_norm_ext(self, [input, normalized_shape, weight, bias, eps])) layer_norm_ext_op=LayerNormExt() class LayerNormGradExt(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, dy, x, normalized_shape, mean, variance, gamma, beta): return _convert_stub(pyboost_layer_norm_grad_ext(self, [dy, x, normalized_shape, mean, variance, gamma, beta])) layer_norm_grad_ext_op=LayerNormGradExt() class LayerNormGradGrad(Primitive): r""" Gets the gradient of LayerNormGrad operation. Inputs: - **x** (Tensor) - The input tensor to be normalized, float32 or float16. - **dy** (Tensor) - The gradient of LayerNorm's output y, float32 or float16. - **variance** (Tensor) - The variance of x, float32 or float16. - **mean** (Tensor) - The mean of x, float32 or float16. - **gamma** (Tensor) - The original value of weight gamma initialized in LayerNorm, float32 or float16. Default: 'ones'. - **d_dx** (Tensor) - The gradient of dx, where dx is the gradient of LayerNorm's input x, float32 or float16. - **d_dg** (Tensor) - The gradient of dg, where dg is the gradient of LayerNorm's weight gamma, float32 or float16. - **d_db** (Tensor) - The gradient of db, where db is the gradient of LayerNorm's weight beta, float32 or float16. - **begin_norm_axis** (int) - The begin axis for the input to apply layernorm. Default: 1. - **begin_params_axis** (int) - The begin axis for the parameter input to apply layernorm. Default: 1. Outputs: Tuple[Tensor], tuple of 3 Tensors (the gradients of layernormgrad x, dy, gamma). Raises: TypeError: If the 8 inputs don't have the same dtype. ValueError: If x, dy, d_dx don't have the same shape. ValueError: If variance, mean don't have the same shape. ValueError: If gamma, d_dg, d_db don't have the same shape. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` """ @prim_arg_register def __init__(self, begin_norm_axis=1, begin_params_axis=1): self._set_prim_arg("begin_norm_axis", begin_norm_axis) self._set_prim_arg("begin_params_axis", begin_params_axis) def __call__(self, x, dy, variance, mean, gamma, d_dx, d_dg, d_db): return super().__call__(x, dy, variance, mean, gamma, d_dx, d_dg, d_db, self.begin_norm_axis, self.begin_params_axis) class LayerNormGrad(Primitive): r""" Applies the layer Normalization to the input array. This operator will calculate the input gradients of layernorm. Inputs: x (Tensor): The inputs of layer norm op. dy (Tensor): The gradient of outputs of layer norm op. variance (Tensor): The variance of x. mean (Tensor): The mean of x. gamma (Tensor): The weights of normalized elements. begin_norm_axis (int): The begin axis for the input to apply layernorm. Default: 1. begin_params_axis (int): The begin axis for the parameter input to apply layernorm. Default: 1. Outputs: tuple[int], tuple of 3 values (the gradients of layernorm input, gamma, beta). pd_x (Tensor): the gradients of layernorm input x. pd_gamma (Tensor): the gradients of gamma. pd_beta (Tensor): the gradients of beta. """ @prim_arg_register def __init__(self, begin_norm_axis=1, begin_params_axis=1): self._set_prim_arg("begin_norm_axis", begin_norm_axis) self._set_prim_arg("begin_params_axis", begin_params_axis) def __call__(self, x, dy, variance, mean, gamma): return super().__call__(x, dy, variance, mean, gamma, self.begin_norm_axis, self.begin_params_axis) class LayerNormGradV3(Primitive): r""" Applies the layer Normalization to the input array. This operator will calculate the input gradients of LayerNormV3. Inputs: x (Tensor): The inputs of layer norm operator. dy (Tensor): The gradient of outputs of layer norm operator. rstd (Tensor): The rstd of x. mean (Tensor): The mean of x. gamma (Tensor): The weights of normalized elements. begin_norm_axis (int): The begin axis for the input to apply LayerNormV3. Default: 1. begin_params_axis (int): The begin axis for the parameter input to apply LayerNormV3. Default: 1. Outputs: tuple[int], tuple of 3 values (the gradients of LayerNormV3 input, gamma, beta). pd_x (Tensor): the gradients of LayerNormV3 input x. pd_gamma (Tensor): the gradients of gamma. pd_beta (Tensor): the gradients of beta. """ @prim_arg_register def __init__(self, begin_norm_axis=1, begin_params_axis=1): self._set_prim_arg("begin_norm_axis", begin_norm_axis) self._set_prim_arg("begin_params_axis", begin_params_axis) def __call__(self, x, dy, variance, mean, gamma): return super().__call__(x, dy, variance, mean, gamma, self.begin_norm_axis, self.begin_params_axis)
[docs]class LayerNorm(Primitive): r""" Applies the Layer Normalization to the input tensor. This operator will normalize the input tensor on given axis. LayerNorm is described in the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. .. math:: y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon. Args: begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNorm, the value must be in [-1, rank(input_x)). Default: ``1`` . begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to apply LayerNorm, the value must be in [-1, rank(input_x)). Default: ``1`` . Note: On the Ascend platform, the value of `begin_params_axis` needs to be equal to the value of `begin_norm_axis` . epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-7`` . Inputs: - **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)`. The input of LayerNorm. Supported dtypes: float16, float32, float64. - **gamma** (Tensor) - Learnable parameter :math:`\gamma` . Tensor of shape `input_x_shape[begin_params_axis:]`. Supported dtypes: float16, float32, float64. - **beta** (Tensor) - Learnable parameter :math:`\beta` . Tensor of shape `input_x_shape[begin_params_axis:]`. Supported dtypes: float16, float32, float64. Outputs: tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters. - **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`. - **mean** (Tensor) - The first `begin_norm_axis` dimensions of `mean` shape is the same as `input_x`, and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`, the shape of the `mean` is :math:`(x_1, \ldots, x_{begin\_norm\_axis}, 1, \ldots, 1)` (when `begin_norm_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ). - **rstd** (Tensor) - The reciprocal of the input standard deviation. Shape is the same as `mean` . Raises: TypeError: If `begin_norm_axis` or `begin_params_axis` is not an int. TypeError: If `epsilon` is not a float. TypeError: If `input_x`, `gamma` or `beta` is not a Tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32) >>> gamma = Tensor(np.ones([3]), mindspore.float32) >>> beta = Tensor(np.ones([3]), mindspore.float32) >>> layer_norm = ops.LayerNorm() >>> output, _, _ = layer_norm(input_x, gamma, beta) >>> print(output) [[-0.2247448 1. 2.2247448] [-0.2247448 1. 2.2247448]] """ @prim_arg_register def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7): self._set_prim_arg("begin_norm_axis", begin_norm_axis) self._set_prim_arg("begin_params_axis", begin_params_axis) self._set_prim_arg("epsilon", epsilon) def __call__(self, input_x, gamma, beta): return super().__call__(input_x, gamma, beta, self.begin_norm_axis, self.begin_params_axis, self.epsilon)
class LayerNormV3(Primitive): r""" Applies the Layer Normalization to the input tensor. This operator will normalize the input tensor on given axis. LayerNormV3 is described in the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`_. .. math:: y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon. Args: begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNormV3, the value must be in [-1, rank(input_x)). Default: ``1`` . begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to apply LayerNormV3, the value must be in [-1, rank(input_x)). Default: ``1`` . epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-7`` . Inputs: - **input_x** (Tensor) - Tensor with shape :math:`(N, \ldots)`. The input of LayerNormV3. Supported dtypes: float16, float32, bfloat16. - **gamma** (Tensor) - Tensor with shape `input_x_shape[begin_params_axis:]`. - **beta** (Tensor) - Tensor with shape `input_x_shape[begin_params_axis:]`. Outputs: tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters. - **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`. - **mean** (Tensor) - The first `begin_norm_axis` dimensions of `mean` shape is the same as `input_x`, and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`, the shape of the `mean` is :math:`(x_1, \ldots, x_{begin_params_axis}, 1, \ldots, 1)` (when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ). - **rstd** (Tensor) - Shape is the same as `mean` . Raises: TypeError: If `begin_norm_axis` or `begin_params_axis` is not an int. TypeError: If `epsilon` is not a float. TypeError: If `input_x`, `gamma` or `beta` is not a Tensor. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32) >>> gamma = Tensor(np.ones([3]), mindspore.float32) >>> beta = Tensor(np.ones([3]), mindspore.float32) >>> layer_norm = ops.LayerNormV3() >>> output, mean, variance = layer_norm(input_x, gamma, beta) >>> print(output) [[-0.22474468 1. 2.22474468] [-0.22474468 1. 2.22474468]] >>> print(mean) [[2.] [2.]] >>> print(variance) [[1.2247447] [.2247447]] """ @prim_arg_register def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7): self._set_prim_arg("begin_norm_axis", begin_norm_axis) self._set_prim_arg("begin_params_axis", begin_params_axis) self._set_prim_arg("epsilon", epsilon) def __call__(self, input_x, gamma, beta): return super().__call__(input_x, gamma, beta, self.begin_norm_axis, self.begin_params_axis, self.epsilon) class LeakyReLUExt(Primitive): r""" .. code-block:: prim = ops.LeakyReLUExt() out = prim(input, negative_slope) is equivalent to .. code-block:: ops.leaky_relu_ext(input, negative_slope) Refer to :func:`mindspore.ops.leaky_relu_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('negative_slope', default=0.01), ) @prim_arg_register def __init__(self): pass def __call__(self, input, negative_slope=0.01): return _convert_stub(pyboost_leaky_relu_ext(self, [input, negative_slope])) leaky_relu_ext_op=LeakyReLUExt() class LeakyReLUGradExt(Primitive): r""" Computes gradient for the LeakyReLU activation. Args: dy (Tensor): Input gradients tensor, has the same dtype and shape as `input`. input (Tensor): Origin input tensor. negative_slope (Scalar): Origin negative_slope is_result(bool): Output input if True. Returns: Tensor, has the same dtype and shape as `input`. """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('input'), sig.make_sig('negative_slope', default=0.01), sig.make_sig('is_result', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, dy, input, negative_slope=0.01, is_result=False): return _convert_stub(pyboost_leaky_relu_grad_ext(self, [dy, input, negative_slope, is_result])) leaky_relu_grad_ext_op=LeakyReLUGradExt()
[docs]class LessEqual(Primitive): r""" .. code-block:: prim = ops.LessEqual() out = prim(input, other) is equivalent to .. code-block:: ops.less_equal(input, other) Refer to :func:`mindspore.ops.less_equal` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_less_equal(self, [input, other]))
less_equal_op=LessEqual()
[docs]class Less(Primitive): r""" .. code-block:: prim = ops.Less() out = prim(input, other) is equivalent to .. code-block:: ops.less(input, other) Refer to :func:`mindspore.ops.less` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_less(self, [input, other]))
less_op=Less() class LinSpaceExt(Primitive): r""" Returns a Tensor whose value is `steps` evenly spaced in the interval `start` and `end` (including `start` and `end`), and the length of the output Tensor is `steps`. .. math:: \begin{aligned} &step = (end - start)/(steps - 1)\\ &output = [start, start+step, start+2*step, ... , end] \end{aligned} .. warning:: Atlas training series does not support int16 dtype currently. Inputs: - **start** (Union[float, int]) - Start value of interval. It can be a float or integer. - **end** (Union[float, int]) - Last value of interval. It can be a float or integer. - **steps** (int) - Number of ticks in the interval, inclusive of start and end. Must be positive integer. - **dtype** (mindspore.dtype, optional) - The output Tensor data type. Default: ``None`` , where the data type of output Tensor is float32. Outputs: Tensor, has the shape of :math:`(steps,)`, with dtype specified by `dtype`. Raises: TypeError: If type of `start` or dtype of `end` is not supported. ValueError: If `steps` is not positive integer. Supported Platforms: ``Ascend`` Examples: >>> import mindspore as ms >>> from mindspore import ops >>> start = 1 >>> end = 10 >>> steps = 5 >>> output = ops.auto_generate.LinSpaceExt()(start, end, steps, dtype=ms.float32) >>> print(output) [ 1. 3.25 5.5 7.75 10. ] """ __mindspore_signature__ = ( sig.make_sig('start'), sig.make_sig('end'), sig.make_sig('steps'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, start, end, steps, dtype=None): return _convert_stub(pyboost_lin_space_ext(self, [start, end, steps, dtype if dtype is None else dtype_to_type_id('LinSpaceExt', 'dtype', dtype)])) lin_space_ext_op=LinSpaceExt()
[docs]class LinSpace(Primitive): r""" Returns a Tensor whose value is `num` evenly spaced in the interval `start` and `stop` (including `start` and `stop`), and the length of the output Tensor is `num`. Refer to :func:`mindspore.ops.linspace` for more details. Inputs: - **start** (Tensor) - Start value of interval, 0-D Tensor with dtype float32 or float64. - **stop** (Tensor) - Last value of interval, 0-D Tensor with dtype float32 or float64. - **num** (Union[int, Tensor]) - Number of ticks in the interval, inclusive of `start` and `stop`. Must be a positive integer. When the input is Tensor, it must be a 0-D Tensor with dtype int32 or int64. Outputs: Tensor, has the same shape and dtype as `start`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> from mindspore import Tensor, ops >>> start = Tensor(1, mindspore.float32) >>> stop = Tensor(10, mindspore.float32) >>> num = 5 >>> output = ops.LinSpace()(start, stop, num) >>> print(output) [ 1. 3.25 5.5 7.75 10. ] """ @prim_arg_register def __init__(self): pass def __call__(self, start, stop, num): return super().__call__(start, stop, num)
lin_space_op=LinSpace()
[docs]class Log1p(Primitive): r""" .. code-block:: prim = ops.Log1p() out = prim(input) is equivalent to .. code-block:: ops.log1p(input) Refer to :func:`mindspore.ops.log1p` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_log1p(self, [input]))
log1p_op=Log1p() class LogMatrixDeterminant(Primitive): r""" Computes the sign and the log of the absolute value of the determinant of one or more square matrices. Note: The type of output always be real-value, even `input` is complex. Args: input (Tensor): A matrix to be calculated, its shape is :math:`(..., M, M)`. The matrix must be at least two dimensions, and the last two dimensions must be the same size. Data type must be float32, float64, complex64 or complex128. Returns: Tensor. The signs of the log determinants. The shape is :math:`input.shape[:-2]`. Tensor. The absolute values of the log determinants. The shape is :math:`input.shape[:-2]`. Raises: TypeError: If `input` is not a Tensor. TypeError: If dtype of `input` not float32, float64, complex64 or complex128. ValueError: If the last two dimensions of `input` is not same size. ValueError: If the dimension of `input` is less than 2. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32) >>> sign, output = ops.LogMatrixDeterminant()(input_x) >>> print(sign) [-1. 1.] >>> print(output) [2.80336046e+00 3.04452229e+00] """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input) log_matrix_determinant_op=LogMatrixDeterminant()
[docs]class Log(Primitive): r""" .. code-block:: prim = ops.Log() out = prim(input) is equivalent to .. code-block:: ops.log(input) Refer to :func:`mindspore.ops.log` for more details. """ @prim_arg_register def __init__(self): self.add_prim_attr("cust_aicpu", 'Log') self.add_prim_attr("base", -1.0) self.add_prim_attr("scale", 1.0) self.add_prim_attr("shift", 0.0) def __call__(self, input): return _convert_stub(pyboost_log(self, [input]))
log_op=Log() class LogSoftmaxExt(Primitive): r""" .. code-block:: prim = ops.LogSoftmaxExt() out = prim(input, dim, dtype) is equivalent to .. code-block:: ops.log_softmax_ext(input, dim, dtype) Refer to :func:`mindspore.ops.log_softmax_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=None), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=None, dtype=None): return _convert_stub(pyboost_log_softmax_ext(self, [input, dim, dtype if dtype is None else dtype_to_type_id('LogSoftmaxExt', 'dtype', dtype)])) log_softmax_ext_op=LogSoftmaxExt() class LogSoftmaxGrad(Primitive): r""" Computes gradient for the Log Softmax activation. """ @prim_arg_register def __init__(self, axis=-1): self._set_prim_arg("axis", axis) def __call__(self, logits, grad): return _convert_stub(pyboost_log_softmax_grad(self, [logits, grad, self.axis]))
[docs]class LogSoftmax(Primitive): r""" .. code-block:: prim = ops.LogSoftmax(axis) out = prim(logits) is equivalent to .. code-block:: ops.log_softmax(logits, axis) Refer to :func:`mindspore.ops.log_softmax` for more details. """ @prim_arg_register def __init__(self, axis=-1): self._set_prim_arg("axis", axis) def __call__(self, logits): return _convert_stub(pyboost_log_softmax(self, [logits, self.axis]))
class LogAddExp(Primitive): r""" .. code-block:: prim = ops.LogAddExp() out = prim(input, other) is equivalent to .. code-block:: ops.logaddexp_ext(input, other) Refer to :func:`mindspore.ops.logaddexp_ext` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_logaddexp(self, [input, other])) logaddexp_op=LogAddExp()
[docs]class LogicalAnd(Primitive): r""" Computes the "logical AND" of two tensors element-wise. Refer to :func:`mindspore.ops.logical_and` for more details. Inputs: - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly converted to bool. - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or a tensor whose data type can be implicitly converted to bool. Outputs: Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([True, False, True]), mindspore.bool_) >>> y = Tensor(np.array([True, True, False]), mindspore.bool_) >>> logical_and = ops.LogicalAnd() >>> output = logical_and(x, y) >>> print(output) [ True False False] >>> x = Tensor(1, mindspore.bool_) >>> y = Tensor(0, mindspore.bool_) >>> output = ops.LogicalAnd()(x, y) >>> print(output) False >>> x = True >>> y = Tensor(0, mindspore.bool_) >>> output = ops.LogicalAnd()(x, y) >>> print(output) False >>> x = True >>> y = Tensor(np.array([True, False]), mindspore.bool_) >>> output = ops.LogicalAnd()(x, y) >>> print(output) [True False] """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, x, y): return _convert_stub(pyboost_logical_and(self, [x, y]))
logical_and_op=LogicalAnd()
[docs]class LogicalNot(Primitive): r""" Computes the "logical NOT" of a tensor element-wise. Refer to :func:`mindspore.ops.logical_not` for more details. Inputs: - **x** (Tensor) - The input tensor. Outputs: Tensor, the shape is the same as the `x`, and the dtype is bool. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([True, False, True]), mindspore.bool_) >>> logical_not = ops.LogicalNot() >>> output = logical_not(x) >>> print(output) [False True False] """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_logical_not(self, [input]))
logical_not_op=LogicalNot()
[docs]class LogicalOr(Primitive): r""" Computes the "logical OR" of two tensors element-wise. Refer to :func:`mindspore.ops.logical_or` for more details. Inputs: - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly converted to bool. - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or a tensor whose data type can be implicitly converted to bool. Outputs: Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([True, False, True]), mindspore.bool_) >>> y = Tensor(np.array([True, True, False]), mindspore.bool_) >>> logical_or = ops.LogicalOr() >>> output = logical_or(x, y) >>> print(output) [ True True True] >>> x = Tensor(1, mindspore.bool_) >>> y = Tensor(0, mindspore.bool_) >>> output = ops.LogicalOr()(x, y) >>> print(output) True >>> x = True >>> y = Tensor(0, mindspore.bool_) >>> output = ops.LogicalOr()(x, y) >>> print(output) True >>> x = True >>> y = Tensor(np.array([True, False]), mindspore.bool_) >>> output = ops.LogicalOr()(x, y) >>> print(output) [True True] """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, x, y): return _convert_stub(pyboost_logical_or(self, [x, y]))
logical_or_op=LogicalOr()
[docs]class LogicalXor(Primitive): r""" Computes the "logical XOR" of two tensors element-wise. .. warning:: This is an experimental API that is subject to change or deletion. Refer to :func:`mindspore.ops.logical_xor` for more details. Inputs: - **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly converted to bool. - **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or a tensor whose data type can be implicitly converted to bool. Outputs: Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool. Supported Platforms: ``Ascend`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([True, False, True]), mindspore.bool_) >>> y = Tensor(np.array([True, True, False]), mindspore.bool_) >>> logical_xor = ops.LogicalXor() >>> output = logical_xor(x, y) >>> print(output) [ False True True] >>> x = Tensor(1, mindspore.bool_) >>> y = Tensor(0, mindspore.bool_) >>> output = ops.LogicalXor()(x, y) >>> print(output) True >>> x = True >>> y = Tensor(0, mindspore.bool_) >>> output = ops.LogicalXor()(x, y) >>> print(output) True >>> x = True >>> y = Tensor(np.array([True, False]), mindspore.bool_) >>> output = ops.LogicalXor()(x, y) >>> print(output) [False True] """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_logical_xor(self, [input, other]))
logical_xor_op=LogicalXor() class LogitGrad(Primitive): r""" Computes LogitGrad of input element-wise. Returns: Tensor, has the same type as input. """ @prim_arg_register def __init__(self, eps=-1.0): self._set_prim_arg("eps", eps) def __call__(self, grad, input): return super().__call__(grad, input, self.eps)
[docs]class Logit(Primitive): r""" Calculate the logit of a tensor element-wise. Element in `x` is clamped to [eps, 1-eps]. .. warning:: This is an experimental API that is subject to change or deletion. Refer to :func:`mindspore.ops.logit` for more details. Args: eps (float, optional): The epsilon. The input clamp bound is defined as [eps, 1-eps]. Default: ``-1.0`` . Inputs: - **x** (Tensor) - The input tensor of type float16, float32 or float64. Outputs: Tensor, with the same shape and dtype as the `x`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([0.1, 0.2, 0.3]).astype(np.float32)) >>> op = ops.Logit(eps=1e-5) >>> output = op(x) >>> print(output) [-2.1972246 -1.3862944 -0.8472978] """ @prim_arg_register def __init__(self, eps=-1.0): self._set_prim_arg("eps", eps) def __call__(self, input): return super().__call__(input, self.eps)
class LogSigmoidGrad(Primitive): r""" .. code-block:: prim = ops.LogSigmoidGrad() out = prim(dy, input, buffer) is equivalent to .. code-block:: ops.logsigmoid_grad(dy, input, buffer) Refer to :func:`mindspore.ops.logsigmoid_grad` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, dy, input, buffer): return _convert_stub(pyboost_logsigmoid_grad(self, [dy, input, buffer])) logsigmoid_grad_op=LogSigmoidGrad() class LogSigmoid(Primitive): r""" Applies logsigmoid activation element-wise. The input is a Tensor with any valid shape. Logsigmoid is defined as: .. math:: \text{logsigmoid}(x_{i}) = \log(\frac{1}{1 + \exp(-x_i)}), where :math:`x_{i}` is the element of the input. LogSigmoid Activation Function Graph: .. image:: ../images/LogSigmoid.png :align: center .. warning:: This is an experimental API that is subject to change or deletion. Args: input (Tensor): The input of LogSigmoid with data type of bfloat16, float16 or float32. The shape is :math:`(*)` where :math:`*` means, any number of additional dimensions. Returns: Tensors, with the same type and shape as the `input`. Raises: TypeError: If dtype of `input` is not bfloat16, float16 and float32. TypeError: If `input` is not a Tensor. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> from mindspore import Tensor, ops >>> input = Tensor([1.0, 2.0, 3.0], mindspore.float32) >>> output = ops.auto_generate.LogSigmoid()(input)[0] >>> print(output) [-0.31326166 -0.12692806 -0.04858734] """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_logsigmoid(self, [input])) logsigmoid_op=LogSigmoid() class LogSumExp(Primitive): r""" .. code-block:: prim = ops.LogSumExp() out = prim(input, dim, keepdim) is equivalent to .. code-block:: ops.logsumexp_ext(input, dim, keepdim) Refer to :func:`mindspore.ops.logsumexp_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim'), sig.make_sig('keepdim', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim, keepdim=False): return _convert_stub(pyboost_logsumexp(self, [input, dim, keepdim])) logsumexp_op=LogSumExp() class LpNormV2(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input_x'), sig.make_sig('p', default=2.0), sig.make_sig('dim', default=None), sig.make_sig('keepdim', default=False), sig.make_sig('epsilon', default=1e-12), ) @prim_arg_register def __init__(self): pass def __call__(self, input_x, p=2.0, dim=None, keepdim=False, epsilon=1e-12): return super().__call__(input_x, p, dim, keepdim, epsilon) lp_norm_v2_op=LpNormV2() class LstsqV2Grad(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, gX, A, B): return super().__call__(gX, A, B) lstsq_v2_grad_op=LstsqV2Grad() class LstsqV2(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('A'), sig.make_sig('B'), sig.make_sig('driver', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, A, B, driver=None): return super().__call__(A, B, driver if driver is None else str_to_enum('LstsqV2', 'driver', driver)) lstsq_v2_op=LstsqV2()
[docs]class MaskedFill(Primitive): r""" .. code-block:: prim = ops.MaskedFill() out = prim(input_x, mask, value) is equivalent to .. code-block:: ops.masked_fill(input_x, mask, value) Refer to :func:`mindspore.ops.masked_fill` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input_x, mask, value): return _convert_stub(pyboost_masked_fill(self, [input_x, mask, value]))
masked_fill_op=MaskedFill() class MaskedSelectGrad(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, mask, grad): return _convert_stub(pyboost_masked_select_grad(self, [input, mask, grad])) masked_select_grad_op=MaskedSelectGrad()
[docs]class MaskedSelect(Primitive): r""" .. code-block:: prim = ops.MaskedSelect() out = prim(input, mask) is equivalent to .. code-block:: ops.masked_select(input, mask) Refer to :func:`mindspore.ops.masked_select` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, mask): return _convert_stub(pyboost_masked_select(self, [input, mask]))
masked_select_op=MaskedSelect() class MatMulExt(Primitive): r""" .. code-block:: prim = ops.MatMulExt() out = prim(input, mat2) is equivalent to .. code-block:: ops.matmul_ext(input, mat2) Refer to :func:`mindspore.ops.matmul_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, mat2): return _convert_stub(pyboost_matmul_ext(self, [input, mat2])) matmul_ext_op=MatMulExt()
[docs]class MatMul(Primitive): r""" Multiplies matrix `a` and matrix `b`. .. math:: Output_{i j}=\sum_{k=1}^{p} a_{i k} b_{k j}=a_{i 1} b_{1 j}+a_{i 2} b_{2 j}+\cdots+a_{i p} b_{p j}, p\in N where the :math:`i,j` indicates the output of the i-th row and j-th column element. Note: - If :math:`N * M` cannot be divided by 16, the performance will be poor in ascend environment. - The dtype of inputs must be same. - On Ascend, float64 doesn't be supported. Args: transpose_a (bool): If ``True`` , `a` is transposed before multiplication. Default: ``False`` . transpose_b (bool): If ``True`` , `b` is transposed before multiplication. Default: ``False`` . Inputs: - **a** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If `transpose_a` is ``True`` , its shape must be :math:`(C, N)` after transpose. - **b** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If `transpose_b` is ``True`` , its shape must be :math:`(M, C)` after transpose. Outputs: Tensor, the shape of the output tensor is :math:`(N, M)`. Raises: TypeError: If `transpose_a` or `transpose_b` is not a bool. TypeError: If the dtype of `a` and the dtype of `b` are not the same. ValueError: If the column of matrix dimensions of `a` is not equal to the row of matrix dimensions of `b`. ValueError: If length of shape of `a` or `b` is not equal to 2. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> a = Tensor(np.ones(shape=[1, 3]), mindspore.float32) >>> b = Tensor(np.ones(shape=[3, 4]), mindspore.float32) >>> matmul = ops.MatMul() >>> output = matmul(a, b) >>> print(output) [[3. 3. 3. 3.]] """ @prim_arg_register def __init__(self, transpose_a=False, transpose_b=False): self._set_prim_arg("transpose_a", transpose_a) self._set_prim_arg("transpose_b", transpose_b) def __call__(self, input, mat2): return _convert_stub(pyboost_matmul(self, [input, mat2, self.transpose_a, self.transpose_b]))
class MatrixDeterminant(Primitive): r""" Calculates the value of the determinant for one or more square matrices. Refer to :func:`mindspore.ops.det` for more details. Inputs: - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, and the last two dimensions must be the same size. Outputs: Tensor, the shape is `x_shape[:-2]`, the dtype is same as `x`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32) >>> op = ops.MatrixDeterminant() >>> output = op(input_x) >>> print(output) [-16.5 21. ] """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input) matrix_determinant_op=MatrixDeterminant() class MatrixExp(Primitive): r""" .. code-block:: prim = ops.MatrixExp() out = prim(input) is equivalent to .. code-block:: ops.matrix_exp(input) Refer to :func:`mindspore.ops.matrix_exp` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input) matrix_exp_op=MatrixExp() class MatrixInverseExt(Primitive): r""" .. code-block:: prim = ops.MatrixInverseExt() out = prim(input) is equivalent to .. code-block:: ops.matrix_inverse_ext(input) Refer to :func:`mindspore.ops.matrix_inverse_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_matrix_inverse_ext(self, [input])) matrix_inverse_ext_op=MatrixInverseExt() class Max(Primitive): r""" .. code-block:: prim = ops.Max() out = prim(input) is equivalent to .. code-block:: ops.max_(input) Refer to :func:`mindspore.ops.max_` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_max(self, [input])) max_op=Max() class MaxPoolGradWithIndices(Primitive): r""" Gradients of the MaxPoolWithIndices operation. """ @prim_arg_register def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64): self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size) self._set_prim_arg_with_handler("strides", strides, to_strides) self._set_prim_arg_with_handler("pads", pads, to_output_padding) self._set_prim_arg_with_handler("dilation", dilation, to_dilations) self._set_prim_arg("ceil_mode", ceil_mode) self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id) def __call__(self, x, grad, argmax): return _convert_stub(pyboost_max_pool_grad_with_indices(self, [x, grad, argmax, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type])) class MaxPoolGradWithMask(Primitive): r""" Gradients of the MaxPoolWithMask operation. """ @prim_arg_register def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64): self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size) self._set_prim_arg_with_handler("strides", strides, to_strides) self._set_prim_arg_with_handler("pads", pads, to_output_padding) self._set_prim_arg_with_handler("dilation", dilation, to_dilations) self._set_prim_arg("ceil_mode", ceil_mode) self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id) def __call__(self, x, grad, mask): return _convert_stub(pyboost_max_pool_grad_with_mask(self, [x, grad, mask, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type])) class MaxPoolWithIndices(Primitive): r""" Performs max pooling on the input Tensor and returns both max values and indices. Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size :math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows: .. math:: \text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1} \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n) .. warning:: This is an experimental API that is subject to change or deletion. Only support on Atlas training series. Args: kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and argmax value, is an int number that represents height and width of the kernel, or a tuple of two int numbers that represent height and width respectively. strides (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents not only the height of movement but also the width of movement, or a tuple of two int numbers that represent height and width of movement respectively. Default: ``None`` , meaning that `strides = kernel_size`. pads (Union[int, tuple[int]], optional): An int number that represents the depth, height and width of movement are both strides, or a tuple of two int numbers that represent depth, height and width of movement respectively. Default: 0. dilation (Union[int, tuple[int]], optional): Control the stride of elements in the kernel. Default: ``(1, 1)`` . ceil_mode (bool, optional): Whether to use ceil instead of floor to calculate output shape. Default: ``False`` . argmax_type (mindspore.dtype, optional) : The dtype for argmax. Default: ``mstype.int64`` . [Disabled in Ascend.] Inputs: - **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})` with data type of float32 in Ascend. Outputs: Tuple of 2 Tensors, representing the maxpool result and where the max values are generated. - **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`. It has the same data type as `x`. .. math:: H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{pads[0]} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}{\text{strides[0]}} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{pads[1]} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}{\text{strides[1]}} + 1\right\rfloor - **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int32 in Ascend. Raises: TypeError: If `x` is not a Tensor. ValueError: If length of shape of `x` is not equal to 4. TypeError: If `kernel_size` , `strides` , `pads` or `dilation` is not int or tuple. ValueError: If `kernel_size`, `strides` or `dilation` is less than 1. ValueError: If `pads` is less than 0. ValueError: If `pads` is more than half of `kernel_size`. TypeError: If `ceil_mode` is not bool. Supported Platforms: ``Ascend`` """ @prim_arg_register def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64): self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size) self._set_prim_arg_with_handler("strides", strides, to_strides) self._set_prim_arg_with_handler("pads", pads, to_output_padding) self._set_prim_arg_with_handler("dilation", dilation, to_dilations) self._set_prim_arg("ceil_mode", ceil_mode) self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id) def __call__(self, x): return _convert_stub(pyboost_max_pool_with_indices(self, [x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type])) class MaxPoolWithMask(Primitive): r""" Performs max pooling on the input Tensor and returns both max values and mask. Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size :math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows: .. math:: \text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1} \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n) .. warning:: This is an experimental API that is subject to change or deletion. Only support on Atlas training series. Args: kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and argmax value, is an int number that represents height and width of the kernel, or a tuple of two int numbers that represent height and width respectively. strides (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents not only the height of movement but also the width of movement, or a tuple of two int numbers that represent height and width of movement respectively. Default: ``1``. pads (Union[int, tuple[int]], optional): An int number that represents the depth, height and width of movement are both strides, or a tuple of two int numbers that represent depth, height and width of movement respectively. Default: 0. dilation (Union[int, tuple[int]], optional): Control the stride of elements in the kernel. Default: ``(1, 1)`` . ceil_mode (bool, optional): Whether to use ceil instead of floor to calculate output shape. Default: ``False`` . argmax_type (mindspore.dtype, optional) : The dtype for argmax. Default: ``mstype.int64`` . [Disabled in Ascend.] Inputs: - **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})` with data type of float16 and float32 in Ascend. Outputs: Tuple of 2 Tensors, representing the maxpool result and mask are generated. - **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`. It has the same data type as `x`. .. math:: H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{pads[0]} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}{\text{strides[0]}} + 1\right\rfloor .. math:: W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{pads[1]} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}{\text{strides[1]}} + 1\right\rfloor - **mask** (Tensor) - Maxpooling mask. Data type is int8 in Ascend. Raises: TypeError: If `x` is not a Tensor. ValueError: If length of shape of `x` is not equal to 4. TypeError: If `kernel_size` , `strides` , `pads` or `dilation` is not int or tuple. ValueError: If `kernel_size`, `strides` or `dilation` is less than 1. ValueError: If `pads` is less than 0. ValueError: If `pads` is more than half of `kernel_size`. TypeError: If `ceil_mode` is not bool. Supported Platforms: ``Ascend`` """ @prim_arg_register def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64): self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size) self._set_prim_arg_with_handler("strides", strides, to_strides) self._set_prim_arg_with_handler("pads", pads, to_output_padding) self._set_prim_arg_with_handler("dilation", dilation, to_dilations) self._set_prim_arg("ceil_mode", ceil_mode) self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id) def __call__(self, x): return _convert_stub(pyboost_max_pool_with_mask(self, [x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type])) class MaximumGradGrad(Primitive): r""" Grad for maximum grad. """ @prim_arg_register def __init__(self, grad_x=True, grad_y=True): self._set_prim_arg("grad_x", grad_x) self._set_prim_arg("grad_y", grad_y) def __call__(self, x, y, dx, dy): return super().__call__(x, y, dx, dy, self.grad_x, self.grad_y) class MaximumGrad(Primitive): r""" Grad for maximum. """ @prim_arg_register def __init__(self, grad_x=True, grad_y=True): self._set_prim_arg("grad_x", grad_x) self._set_prim_arg("grad_y", grad_y) def __call__(self, x, y, grads): return super().__call__(x, y, grads, self.grad_x, self.grad_y)
[docs]class Maximum(Primitive): r""" .. code-block:: prim = ops.Maximum() out = prim(input, other) is equivalent to .. code-block:: ops.maximum(input, other) Refer to :func:`mindspore.ops.maximum` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_maximum(self, [input, other]))
maximum_op=Maximum() class MeanExt(Primitive): r""" .. code-block:: prim = ops.MeanExt() out = prim(input, axis, keep_dims, dtype) is equivalent to .. code-block:: ops.mean_ext(input, axis, keep_dims, dtype) Refer to :func:`mindspore.ops.mean_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('axis', default=None), sig.make_sig('keep_dims', default=False), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, axis=None, keep_dims=False, dtype=None): return _convert_stub(pyboost_mean_ext(self, [input, axis, keep_dims, dtype if dtype is None else dtype_to_type_id('MeanExt', 'dtype', dtype)])) mean_ext_op=MeanExt() class MedianDim(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=-1), sig.make_sig('keepdim', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=-1, keepdim=False): return _convert_stub(pyboost_median_dim(self, [input, dim, keepdim])) median_dim_op=MedianDim() class MedianExt(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_median_ext(self, [input])) median_ext_op=MedianExt() class Min(Primitive): r""" .. code-block:: prim = ops.Min() out = prim(input) is equivalent to .. code-block:: ops.min_(input) Refer to :func:`mindspore.ops.min_` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_min(self, [input])) min_op=Min() class MinimumGrad(Primitive): r""" Grad for minimum. """ @prim_arg_register def __init__(self, grad_x=True, grad_y=True): self._set_prim_arg("grad_x", grad_x) self._set_prim_arg("grad_y", grad_y) def __call__(self, x1, x2, grads): return super().__call__(x1, x2, grads, self.grad_x, self.grad_y)
[docs]class Minimum(Primitive): r""" .. code-block:: prim = ops.Minimum() out = prim(input, other) is equivalent to .. code-block:: ops.minimum(input, other) Refer to :func:`mindspore.ops.minimum` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_minimum(self, [input, other]))
minimum_op=Minimum() class MishExt(Primitive): r""" .. code-block:: prim = ops.MishExt() out = prim(input) is equivalent to .. code-block:: ops.mish_ext(input) Refer to :func:`mindspore.ops.mish_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_mish_ext(self, [input])) mish_ext_op=MishExt() class MishGradExt(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, dout, x): return _convert_stub(pyboost_mish_grad_ext(self, [dout, x])) mish_grad_ext_op=MishGradExt() class Mm(Primitive): r""" .. code-block:: prim = ops.Mm() out = prim(input, mat2) is equivalent to .. code-block:: ops.mm_ext(input, mat2) Refer to :func:`mindspore.ops.mm_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, mat2): return _convert_stub(pyboost_mm_ext(self, [input, mat2])) mm_ext_op=Mm() class MSELossExt(Primitive): r""" .. code-block:: prim = ops.MSELossExt() out = prim(input, target, reduction) is equivalent to .. code-block:: ops.mse_loss_ext(input, target, reduction) Refer to :func:`mindspore.ops.mse_loss_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('target'), sig.make_sig('reduction', default='mean'), ) @prim_arg_register def __init__(self): pass def __call__(self, input, target, reduction='mean'): return _convert_stub(pyboost_mse_loss_ext(self, [input, target, str_to_enum('MSELossExt', 'reduction', reduction)])) mse_loss_ext_op=MSELossExt() class MSELossGradExt(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('dout'), sig.make_sig('x'), sig.make_sig('target'), sig.make_sig('reduction', default='mean'), ) @prim_arg_register def __init__(self): pass def __call__(self, dout, x, target, reduction='mean'): return _convert_stub(pyboost_mse_loss_grad_ext(self, [dout, x, target, str_to_enum('MSELossGradExt', 'reduction', reduction)])) mse_loss_grad_ext_op=MSELossGradExt()
[docs]class Mul(Primitive): r""" .. code-block:: prim = ops.Mul() out = prim(input, other) is equivalent to .. code-block:: ops.mul(input, other) Refer to :func:`mindspore.ops.mul` for more details. """ __mindspore_signature__ = ( sig.make_sig('input', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T), sig.make_sig('other', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T), ) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_mul(self, [input, other]))
mul_op=Mul() class Muls(Primitive): r""" .. code-block:: prim = ops.Muls() out = prim(input, other) is equivalent to .. code-block:: ops.muls(input, other) Refer to :func:`mindspore.ops.muls` for more details. """ __mindspore_signature__ = ( sig.make_sig('input', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T), sig.make_sig('other', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T), ) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_muls(self, [input, other])) muls_op=Muls() class MultinomialExt(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, num_samples, replacement, seed, offset): return _convert_stub(pyboost_multinomial_ext(self, [input, num_samples, replacement, seed, offset])) multinomial_ext_op=MultinomialExt() class Mv(Primitive): r""" .. code-block:: prim = ops.Mv() out = prim(input, vec) is equivalent to .. code-block:: ops.mv(input, vec) Refer to :func:`mindspore.ops.mv` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, vec): return _convert_stub(pyboost_mv(self, [input, vec])) mv_op=Mv()
[docs]class NanToNum(Primitive): r""" .. code-block:: prim = ops.NanToNum(nan, posinf, neginf) out = prim(input) is equivalent to .. code-block:: ops.nan_to_num(input, nan, posinf, neginf) Refer to :func:`mindspore.ops.nan_to_num` for more details. """ @prim_arg_register def __init__(self, nan=None, posinf=None, neginf=None): self._set_prim_arg("nan", type_it('NanToNum', 'nan', nan, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT)) self._set_prim_arg("posinf", type_it('NanToNum', 'posinf', posinf, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT)) self._set_prim_arg("neginf", type_it('NanToNum', 'neginf', neginf, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT)) def __call__(self, input): return _convert_stub(pyboost_nan_to_num(self, [input, self.nan, self.posinf, self.neginf]))
[docs]class Neg(Primitive): r""" .. code-block:: prim = ops.Neg() out = prim(input) is equivalent to .. code-block:: ops.neg(input) Refer to :func:`mindspore.ops.neg` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_neg(self, [input]))
neg_op=Neg()
[docs]class NextAfter(Primitive): r""" .. code-block:: prim = ops.NextAfter() out = prim(input, other) is equivalent to .. code-block:: ops.nextafter(input, other) Refer to :func:`mindspore.ops.nextafter` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return super().__call__(input, other)
next_after_op=NextAfter() class NLLLoss2dGrad(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, loss_grad, input, target, weight, reduction, ignore_index, total_weight): return _convert_stub(pyboost_nllloss_2d_grad(self, [loss_grad, input, target, weight, str_to_enum('NLLLoss2dGrad', 'reduction', reduction), ignore_index, total_weight])) nllloss_2d_grad_op=NLLLoss2dGrad() class NLLLoss2d(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('target'), sig.make_sig('weight'), sig.make_sig('reduction', default='mean'), sig.make_sig('ignore_index', default=-100), ) @prim_arg_register def __init__(self): pass def __call__(self, input, target, weight, reduction='mean', ignore_index=-100): return _convert_stub(pyboost_nllloss_2d(self, [input, target, weight, str_to_enum('NLLLoss2d', 'reduction', reduction), ignore_index])) nllloss_2d_op=NLLLoss2d() class NLLLossGrad(Primitive): r""" Computes the gradients of `NLLLoss`. """ @prim_arg_register def __init__(self, reduction='mean', ignore_index=-100): self._set_prim_arg_with_handler("reduction", reduction, str_to_enum) self._set_prim_arg("ignore_index", ignore_index) def __call__(self, logits, loss_grad, labels, weight, total_weight): return _convert_stub(pyboost_nllloss_grad(self, [logits, loss_grad, labels, weight, total_weight, self.reduction, self.ignore_index]))
[docs]class NLLLoss(Primitive): r""" Gets the negative log likelihood loss between logits and labels. The nll loss with :math:`reduction = none` can be described as: .. math:: \ell(x, t)=L=\left\{l_{1}, \ldots, l_{N}\right\}^{\top}, \quad l_{n}=-w_{t_{n}} x_{n, t_{n}}, \quad w_{c}=\text { weight }[c] \cdot 1 where :math:`x` is the logits, :math:`t` is the labels, :math:`w` is the weight, N is the batch size, :math:`c` belonging to [0, C-1] is class index, where :math:`C` is the number of classes. If :math:`reduction \neq none` (default ``'mean'`` ), then .. math:: \ell(x, t)=\left\{\begin{array}{ll} \sum_{n=1}^{N} \frac{1}{\sum_{n=1}^{N} w_{t n}} l_{n}, & \text { if reduction }=\text { 'mean'; } \\ \sum_{n=1}^{N} l_{n}, & \text { if reduction }=\text { 'sum' } \end{array}\right. Args: reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` , ``'sum'`` . Default: ``'mean'`` . - ``'none'``: no reduction will be applied. - ``'mean'``: compute and return the weighted mean of elements in the output. - ``'sum'``: the output elements will be summed. ignore_index (int): Specifies a target value that is ignored and does not contribute to the input gradient. Default: ``-100`` . Inputs: - **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type only supports float32 or float16. - **labels** (Tensor) - Ground truth labels, with shape :math:`(N,)`, where each value belong to :math:`[0, C-1]`. Data type only supports int32 or int64. - **weight** (Tensor) - The rescaling weight to each class, with shape :math:`(C,)` and data type only supports float32 or float16. Outputs: Tuple of 2 tensors composed with `loss` and `total_weight`. - **loss** (Tensor) - When `reduction` is ``'none'`` and `logits` is a 2D tensor, the `loss` shape is :math:`(N,)`. Otherwise, the `loss` is a scalar. The data type is the same with `input's`. - **total_weight** (Tensor) - The `total_weight` is a scalar. The data type is the same with `weight's`. Raises: TypeError: If dtype of `logits` or `weight` is neither float16 nor float32. TypeError: If dtype of `labels` is neither int32 nor int64. ValueError: If `logits` is not a one or two dimension tensor, `labels` and `weight` are not one dimension tensors. When `logits` is a two dimension tensor, the first dimension of `logits` is not equal to `labels`, and second dimension of `logits` is not equal to `weight`. When `logits` is a one dimension tensor, the dimensions of `logits`, `labels` and `weight` should be equal to each other. ValueError: If the value of `labels` exceed :math:`[0, C-1]`, where :math:`C` is the number of classes. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> logits = Tensor(np.array([[0.5488135, 0.71518934], ... [0.60276335, 0.5448832], ... [0.4236548, 0.6458941]]).astype(np.float32)) >>> labels = Tensor(np.array([0, 0, 0]).astype(np.int32)) >>> weight = Tensor(np.array([0.3834415, 0.79172504]).astype(np.float32)) >>> nll_loss = ops.NLLLoss(reduction="mean") >>> loss, weight = nll_loss(logits, labels, weight) >>> print(loss) -0.52507716 >>> print(weight) 1.1503246 """ @prim_arg_register def __init__(self, reduction='mean', ignore_index=-100): self._set_prim_arg_with_handler("reduction", reduction, str_to_enum) self._set_prim_arg("ignore_index", ignore_index) def __call__(self, logits, labels, weight): return _convert_stub(pyboost_nllloss(self, [logits, labels, weight, self.reduction, self.ignore_index]))
class NonZeroExt(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_non_zero_ext(self, [input])) non_zero_ext_op=NonZeroExt()
[docs]class NonZero(Primitive): r""" Return a Tensor of the positions of all non-zero values. Inputs: - **input** (Tensor) - The input Tensor, its rank should be greater than or eaqual to 1. Outputs: Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input. Raises: TypeError: If `input` is not Tensor. ValueError: If dim of `input` equals to 0. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32) >>> output = ops.NonZero()(input) >>> print(output) [[0] [2] [4]] """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_non_zero(self, [input]))
non_zero_op=NonZero() class Norm(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('p', default=2.0), sig.make_sig('dim', default=None), sig.make_sig('keepdim', default=False), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, p=2.0, dim=None, keepdim=False, dtype=None): return _convert_stub(pyboost_norm(self, [input, p, dim, keepdim, dtype if dtype is None else dtype_to_type_id('Norm', 'dtype', dtype)])) norm_op=Norm() class NormalFloatFloat(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, mean, std, size, seed, offset): return _convert_stub(pyboost_normal_float_float(self, [mean, std, size, seed, offset])) normal_float_float_op=NormalFloatFloat() class NormalFloatTensor(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, mean, std, seed, offset): return _convert_stub(pyboost_normal_float_tensor(self, [mean, std, seed, offset])) normal_float_tensor_op=NormalFloatTensor() class NormalTensorFloat(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, mean, std, seed, offset): return _convert_stub(pyboost_normal_tensor_float(self, [mean, std, seed, offset])) normal_tensor_float_op=NormalTensorFloat() class NormalTensorTensor(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, mean, std, seed, offset): return _convert_stub(pyboost_normal_tensor_tensor(self, [mean, std, seed, offset])) normal_tensor_tensor_op=NormalTensorTensor()
[docs]class NotEqual(Primitive): r""" .. code-block:: prim = ops.NotEqual() out = prim(input, other) is equivalent to .. code-block:: ops.not_equal(input, other) Refer to :func:`mindspore.ops.not_equal` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_not_equal(self, [input, other]))
not_equal_op=NotEqual() class NPUClearFloatStatusV2(Primitive): r""" Compare to NPUClearStatus Clear the flag for storage overflow status. This flag is located in a register at a fixed address on the `Ascend` device, and overflow information is automatically written to this register. The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`. If the value of flag is zero, no overflow has occurred, otherwise, overflow. When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register status after the network execution is completed. Note: - In order to avoid mis-optimization by the compiler, additional input and output are added to this operator. The input and output are defined as a shape of: math:`(8,)` and data type of `mindspore.dtype.int32` Tensor, meaningless. - Since this op lacks contextual dependencies with parameters in the network, :class:`mindspore.ops.Depend` needs to be used to ensure order of execution. Inputs: - **input** Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`, data type is `mindspore.dtype.int32`, and has no actual meaning.. Outputs: - **output** Tensor, shape and data type are the same as input, meaningless. Supported Platforms: ``Ascend`` Examples: >>> import mindspore as ms >>> import numpy as np >>> from mindspore import ops, nn, Tensor >>> from mindspore.ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2 >>> class Net(nn.Cell): ... def __init__(self): ... super().__init__() ... self.clear_status = NPUClearFloatStatusV2() ... self.get_status = NPUGetFloatStatusV2() ... self.sub = ops.Sub() ... self.neg = ops.Neg() ... self.equal = ops.Equal() ... self.reduce_all = ops.ReduceAll(keep_dims=False) ... self.base = Tensor([0], dtype=ms.int32) ... self.logic_not = ops.LogicalNot() ... ... def construct(self, x): ... init = Tensor([0]*8, dtype=ms.int32) ... clear_status = self.clear_status(init) ... x = ops.depend(x, clear_status) ... res = self.sub(x, self.neg(x)) ... init = ops.depend(init, res) ... get_status = self.get_status(init) ... flag = self.equal(self.base, get_status) ... overall_finite = self.reduce_all(flag) ... overflow = self.logic_not(overall_finite) ... return overflow ... >>> value = 65504 >>> data = np.full((2, 3), value, dtype=np.float16) >>> x = Tensor(data, dtype=ms.float16) >>> net = Net() >>> res = net(x) >>> print(res) True >>> value = 10 >>> data = np.full((2, 3), value, dtype=np.float16) >>> x = Tensor(data, dtype=ms.float16) >>> net = Net() >>> res = net(x) >>> print(res) False """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input) npu_clear_float_status_v2_op=NPUClearFloatStatusV2() class NPUGetFloatStatusV2(Primitive): r""" Get the flag for storage overflow status. This flag is located in a register at a fixed address on the `Ascend` device, and overflow information is automatically written to this register. The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`. If the value of flag is zero, no overflow has occurred, otherwise, overflow. When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register status after the network execution is completed. Note: - In order to avoid mis-optimization by the compiler, additional input is added to this operator. The input is defined as a shape of: math:`(8,)` and data type of `mindspore.dtype.int32` Tensor, meaningless. - Since this op lacks contextual dependencies with parameters in the network, :class:`mindspore.ops.Depend` needs to be used to ensure order of execution. Inputs: - **input** Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`, data type is `mindspore.dtype.int32`, and has no actual meaning. Usually use the output of `NPUClearFloatStatusV2`. Outputs: - **output** Tensor, shape and data type are the same as input. If all are zero, it means no overflow, otherwise, overflow. Supported Platforms: ``Ascend`` Examples: >>> import mindspore as ms >>> import numpy as np >>> from mindspore import ops, nn, Tensor >>> from mindspore.ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2 >>> class Net(nn.Cell): ... def __init__(self): ... super().__init__() ... self.clear_status = NPUClearFloatStatusV2() ... self.get_status = NPUGetFloatStatusV2() ... self.sub = ops.Sub() ... self.neg = ops.Neg() ... self.equal = ops.Equal() ... self.reduce_all = ops.ReduceAll(keep_dims=False) ... self.base = Tensor([0], dtype=ms.int32) ... self.logic_not = ops.LogicalNot() ... ... def construct(self, x): ... init = Tensor([0]*8, dtype=ms.int32) ... clear_status = self.clear_status(init) ... x = ops.depend(x, clear_status) ... res = self.sub(x, self.neg(x)) ... init = ops.depend(init, res) ... get_status = self.get_status(init) ... flag = self.equal(self.base, get_status) ... overall_finite = self.reduce_all(flag) ... overflow = self.logic_not(overall_finite) ... return overflow ... >>> value = 65504 >>> data = np.full((2, 3), value, dtype=np.float16) >>> x = Tensor(data, dtype=ms.float16) >>> net = Net() >>> res = net(x) >>> print(res) True >>> value = 10 >>> data = np.full((2, 3), value, dtype=np.float16) >>> x = Tensor(data, dtype=ms.float16) >>> net = Net() >>> res = net(x) >>> print(res) False """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input) npu_get_float_status_v2_op=NPUGetFloatStatusV2() class OneHotExt(Primitive): r""" Computes a one-hot tensor. The locations represented by tensor in `tensor` take value `1`, while all other locations take value `0`. Args: - **tensor** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`. Data type must be int32 or int64. - **num_classes** (int) - A scalar defining the depth of the one-hot dimension. Returns: Tensor, one-hot tensor. Raises: TypeError: If `num_classes` is not an int. TypeError: If dtype of `tensor` is not int32 or int64. ValueError: If `num_classes` is less than 0. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> import mindspore.ops as ops >>> from mindspore import Tensor >>> tensor = Tensor(np.array([0, 1, 2]), mindspore.int32) >>> num_classes = 3 >>> output = ops.extend.one_hot(tensor, num_classes) >>> print(output) [[1 0 0] [0 1 0] [0 0 1]] """ @prim_arg_register def __init__(self, axis=-1): self._set_prim_arg("axis", axis) def __call__(self, tensor, num_classes, on_value, off_value): return _convert_stub(pyboost_one_hot_ext(self, [tensor, num_classes, on_value, off_value, self.axis]))
[docs]class OneHot(Primitive): r""" Computes a one-hot tensor. The locations represented by indices in `indices` take value `on_value`, while all other locations take value `off_value`. Note: If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`. On Ascend, if `on_value` is Int64 dtype, `indices` must be Int64 dtype, and the value for `on_value` and `off_value` can only be 1 and 0. Args: axis (int): Position to insert the value. e.g. If shape of `indices` is :math:`(N, C)`, and `axis` is -1, the output shape will be :math:`(N, C, D)`, If `axis` is 0, the output shape will be :math:`(D, N, C)`. Default: ``-1`` . Inputs: - **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`. Data type must be int32 or int64. - **depth** (Union[int, Tensor]) - A scalar defining the depth of the one-hot dimension. - **on_value** (Tensor) - A value to fill in output when `indices[j] = i`. - **off_value** (Tensor) - A value to fill in output when `indices[j] != i`. It has the same data type as `on_value`. Outputs: Tensor, one-hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`. Raises: TypeError: If `axis` or `depth` is not an int. TypeError: If dtype of `on_value` is not int32, int64, float16 or float32. TypeError: If dtype of `indices` is not int32 or int64. TypeError: If `indices`, `on_value` or `off_value` is not a Tensor. ValueError: If `axis` is not in range [-1, len(indices_shape)]. ValueError: If `depth` is less than 0. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32) >>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32) >>> onehot = ops.OneHot() >>> output = onehot(indices, depth, on_value, off_value) >>> print(output) [[1. 0. 0.] [0. 1. 0.] [0. 0. 1.]] """ @prim_arg_register def __init__(self, axis=-1): self._set_prim_arg("axis", axis) def __call__(self, indices, depth, on_value, off_value): return super().__call__(indices, depth, on_value, off_value, self.axis)
class OnesLikeExt(Primitive): r""" Returns a Tensor with a value of 1 and its shape and data type is the same as the input. Refer to :func:`mindspore.ops.ones_like` for more details. Args: - **input_x** (Tensor) - Tensor of any dimension. Returns: Tensor, has the same shape and type as `input_x` but filled with ones. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dtype=None): return _convert_stub(pyboost_ones_like_ext(self, [input, dtype if dtype is None else dtype_to_type_id('OnesLikeExt', 'dtype', dtype)])) ones_like_ext_op=OnesLikeExt()
[docs]class OnesLike(Primitive): r""" Returns a Tensor with a value of 1 and its shape and data type is the same as the input. Refer to :func:`mindspore.ops.ones_like` for more details. Inputs: - **input_x** (Tensor) - Tensor of any dimension. Outputs: Tensor, has the same shape and type as `input_x` but filled with ones. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) >>> output = ops.OnesLike()(input_x) >>> print(output) [[1 1] [1 1]] """ @prim_arg_register def __init__(self): pass def __call__(self, x): return super().__call__(x)
ones_like_op=OnesLike() class Outer(Primitive): r""" .. code-block:: prim = ops.Outer() out = prim(input, vec2) is equivalent to .. code-block:: ops.outer_ext(input, vec2) Refer to :func:`mindspore.ops.outer_ext` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, vec2): return _convert_stub(pyboost_outer(self, [input, vec2])) outer_op=Outer() class PagedAttentionMask(Primitive): r""" The PagedAttentionMask is the fusion of block-wise KV Cache access and self-attention(with alibi-mask) computing. Args: query (Tensor): The query tensor with data type of float16. :math:`(num\_tokens, num\_head, head\_dim)`. key_cache (Tensor): The cache tensor with data type of float16. :math:`(num\_blocks, block\_size, num\_head, head\_dim)`. value_cache (Tensor): The cache tensor with data type of float16. :math:`(num\_blocks, block\_size, num\_head, head\_dim)`. block_tables (Tensor): The block mapping table with data type of int32. :math:`(num\_tokens, max_num_blocks_per_batch)`. context_lens (Tensor): The context length of each sequence with data type of int32. :math:`(num\_tokens,)`. antiquant_scale (Tensor): The antiquant scale of key_cache and value_cache with data type of float16. key_cache and value_cache will be the type of int8. :math:`(2, num\_head * head\_dim,)`. antiquant_offset (Tensor): The antiquant offset of key_cache and value_cache with data type of float16. key_cache and value_cache will be the type of int8. :math:`(2, num\_head * head\_dim,)`. alibi_mask (Tensor): The bias after q @ k_t / (head_dim) ** 0.5 with data type of query. :math:`(num\_tokens, num\_head, q\_len, kv\_len)`. attn_mask (Tensor): The mask after alibi_mask with data type of float16. :math:`(num\_tokens, q\_len, kv\_len)`. Outputs: attention output. Notes: No backend implementation in MindSpore, only use to export MindIr and run in MindSpore Lite. Examples: >>> from mindspore.ops.operations import _inner_ops >>> num_tokens = = 4 >>> num_head = 40 >>> num_kv_head = 40 >>> head_dim = 128 >>> block_size = 16 >>> num_blocks = 128 >>> max_seq = 1024 >>> max_num_blocks_per_batch = max_seq // block_size >>> scale_value = 1.0 / math.sqrt(head_dim) >>> query = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16)) >>> key_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16))) >>> value_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16))) >>> dummy_block_indice = np.random.shuffle(np.arange(num_tokens * max_num_blocks_per_batch, dtype=np.int32)) >>> block_tables = Tensor(np.reshape(dummy_block_indice, (num_tokens, max_num_blocks_per_batch))) >>> context_lens = Tensor(np.random.randint(max_seq, size=num_tokens).astype(np.int32))) >>> alibi_mask = Tensor(np.random.randn(num_tokens, num_head, 1, max_seq).astype(np.int32))) >>> paged_attention_mask = _inner_ops.PagedAttentionMask() >>> output = paged_attention_mask(query, key_cache, value_cache, block_tables, context_lens, alibi_mask) >>> print(output) """ __mindspore_signature__ = ( sig.make_sig('query'), sig.make_sig('key_cache'), sig.make_sig('value_cache'), sig.make_sig('block_tables'), sig.make_sig('context_lens'), sig.make_sig('antiquant_scale', default=None), sig.make_sig('antiquant_offset', default=None), sig.make_sig('alibi_mask', default=None), ) @prim_arg_register def __init__(self, head_num, scale_value, kv_head_num, kv_cache_quant_mode='DEFAULT'): self._set_prim_arg("head_num", head_num) self._set_prim_arg("scale_value", scale_value) self._set_prim_arg("kv_head_num", kv_head_num) self._set_prim_arg_with_handler("kv_cache_quant_mode", kv_cache_quant_mode, str_to_enum) def __call__(self, query, key_cache, value_cache, block_tables, context_lens, antiquant_scale=None, antiquant_offset=None, alibi_mask=None): return super().__call__(query, key_cache, value_cache, block_tables, context_lens, antiquant_scale, antiquant_offset, alibi_mask, self.head_num, self.scale_value, self.kv_head_num, self.kv_cache_quant_mode) class PagedAttention(Primitive): r""" The PagedAttention is the fusion of block-wise KV Cache access and self-attention computing. Args: query (Tensor): The query tensor with data type of float16. :math:`(num\_tokens, num\_head, head\_dim)`. key_cache (Tensor): The cache tensor with data type of float16. :math:`(num\_blocks, block\_size, num\_head, head\_dim)`. value_cache (Tensor): The cache tensor with data type of float16. :math:`(num\_blocks, block\_size, num\_head, head\_dim)`. block_tables (Tensor): The block mapping table with data type of int32. :math:`(num\_tokens, max_num_blocks_per_batch)`. context_lens (Tensor): The context length of each sequence with data type of int32. :math:`(num\_tokens,)`. antiquant_scale (Tensor): The antiquant scale of key_cache and value_cache with data type of float16. key_cache and value_cache will be the type of int8. :math:`(2, num\_head * head\_dim,)`. antiquant_offset (Tensor): The antiquant offset of key_cache and value_cache with data type of float16. key_cache and value_cache will be the type of int8. :math:`(2, num\_head * head\_dim,)`. attn_mask (Tensor): The lookahead mask with data type of float16. Default is None. :math:`(num\_tokens, max_context_lens)`. q_seq_lens (Tensor): The query length of each sequence with data type of int32. Default is None. :math:`(batch,)`. Outputs: attention output. Notes: No backend implementation in MindSpore, only use to export MindIr and run in MindSpore Lite. Examples: >>> from mindspore.ops.operations import _inner_ops >>> num_tokens = = 4 >>> num_head = 40 >>> num_kv_head = 40 >>> head_dim = 128 >>> block_size = 16 >>> num_blocks = 128 >>> max_seq = 1024 >>> max_num_blocks_per_batch = max_seq // block_size >>> scale_value = 1.0 / math.sqrt(head_dim) >>> query = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16)) >>> key_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16))) >>> value_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16))) >>> dummy_block_indice = np.random.shuffle(np.arange(num_tokens * max_num_blocks_per_batch, dtype=np.int32)) >>> block_tables = Tensor(np.reshape(dummy_block_indice, (num_tokens, max_num_blocks_per_batch))) >>> context_lens = Tensor(np.random.randint(max_seq, size=num_tokens).astype(np.int32))) >>> paged_attention = _inner_ops.PagedAttention() >>> output = paged_attention(query, key_cache, value_cache, block_tables, context_lens) >>> print(output) """ __mindspore_signature__ = ( sig.make_sig('query'), sig.make_sig('key_cache'), sig.make_sig('value_cache'), sig.make_sig('block_tables'), sig.make_sig('context_lens'), sig.make_sig('antiquant_scale', default=None), sig.make_sig('antiquant_offset', default=None), sig.make_sig('attn_mask', default=None), sig.make_sig('q_seq_lens', default=None), ) @prim_arg_register def __init__(self, head_num, scale_value, kv_head_num, kv_cache_quant_mode='DEFAULT'): self._set_prim_arg("head_num", head_num) self._set_prim_arg("scale_value", scale_value) self._set_prim_arg("kv_head_num", kv_head_num) self._set_prim_arg_with_handler("kv_cache_quant_mode", kv_cache_quant_mode, str_to_enum) def __call__(self, query, key_cache, value_cache, block_tables, context_lens, antiquant_scale=None, antiquant_offset=None, attn_mask=None, q_seq_lens=None): return super().__call__(query, key_cache, value_cache, block_tables, context_lens, antiquant_scale, antiquant_offset, attn_mask, q_seq_lens, self.head_num, self.scale_value, self.kv_head_num, self.kv_cache_quant_mode)
[docs]class Polar(Primitive): r""" .. code-block:: prim = ops.Polar() out = prim(abs, angle) is equivalent to .. code-block:: ops.polar_ext(abs, angle) Refer to :func:`mindspore.ops.polar_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, abs, angle): return _convert_stub(pyboost_polar(self, [abs, angle]))
polar_op=Polar()
[docs]class Pow(Primitive): r""" .. code-block:: prim = ops.Pow() out = prim(input, exponent) is equivalent to .. code-block:: ops.pow(input, exponent) Refer to :func:`mindspore.ops.pow` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, exponent): return _convert_stub(pyboost_pow(self, [input, exponent]))
pow_op=Pow() class PReLUGrad(Primitive): r""" Gradients of PReLU operation. Note: 1-dimensional input_x is not supported. Inputs: - **dy** (Tensor) - Representing the backprop of the next layer. - **x** (Tensor) - Must be the input `x` of forward operator PRelu. - **weight** (Tensor) - Float Tensor, w > 0, must be the input `weight` of forward operator PRelu. Outputs: - **dx** (Tensor), with the same type as `x`. - **dw** (Tensor), with the same type as `weight`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` """ @prim_arg_register def __init__(self): pass def __call__(self, dy, x, weight): return _convert_stub(pyboost_prelu_grad(self, [dy, x, weight])) prelu_grad_op=PReLUGrad()
[docs]class PReLU(Primitive): r""" .. code-block:: prim = ops.PReLU() out = prim(input, weight) is equivalent to .. code-block:: ops.prelu(input, weight) Refer to :func:`mindspore.ops.prelu` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, weight): return _convert_stub(pyboost_prelu(self, [input, weight]))
prelu_op=PReLU() class ProdExt(Primitive): r""" .. code-block:: prim = ops.ProdExt() out = prim(input, axis, keep_dims, dtype) is equivalent to .. code-block:: ops.prod_ext(input, axis, keep_dims, dtype) Refer to :func:`mindspore.ops.prod_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('axis', default=None), sig.make_sig('keep_dims', default=False), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, axis=None, keep_dims=False, dtype=None): return _convert_stub(pyboost_prod_ext(self, [input, axis, keep_dims, dtype if dtype is None else dtype_to_type_id('ProdExt', 'dtype', dtype)])) prod_ext_op=ProdExt() class PromptFlashAttention(Primitive): r""" The interface for fully inference. B -- Batch size N -- Num heads S -- Sequence length D -- Head dim H -- Hidden size Self attention constructs an attention model based on the relationship between input samples themselves. The principle is to assume that there is a length of the input sample sequence :math:`x` of :math:`n`, and each element of :math:`x` is a :math:`d` dimensional vector, which can be viewed as a token embedding. This sequence can be transformed through 3 weight matrices to obtain 3 matrices with dimensions of :math:`n\times d`. The self attention calculation formula is defined as: .. math:: Attention(Q,K,V)=Softmax(\frac{QK^{T} }{\sqrt{d} } )V where the product of :math:`Q` and :math:`K^{T}` represents the attention of input :math:`x`. To avoid the value becoming too large, it is usually scaled by dividing it by the square root of :math:`d` and perform softmax normalization on each row, yields a matrix of :math:`n\times d` after multiplying :math:`V`. .. warning:: - This is an experimental API that is subject to change or deletion. - `attn_mask` of type float16 will be deprecated in the future. Note: - Maximum Support for each axis - Supports B-axis values less than or equal to 65536 (64k). When the input type includes int8 with D-axis not aligned to 32, or the input type is float16 or bfloat16 with D-axis not aligned to 16, the B-axis supports up to 128 only. - Supports N-axis values less than or equal to 256. - Supports S-axis values less than or equal to 20971520 (20M). - Supports D-axis values less than or equal to 512. - Quantization - int8 Input, int8 Output: Parameters `deq_scale1`, `quant_scale1`, `deq_scale2`, and `quant_scale2` must all be provided. `quant_offset2` is optional (default is 0 if not provided). - int8 Input, float16 Output: Parameters `deq_scale1`, `quant_scale1`, and `deq_scale2` must all be provided. If `quant_offset2` or `quant_scale2` is provided (i.e., not null), it will result in an error. - float16 or bfloat16 Input, int8 Output: Parameter `quant_scale2` must be provided. `quant_offset2` is optional (default is 0 if not provided). If `deq_scale1`, `quant_scale1`, or `deq_scale2` is provided (i.e., not null), it will result in an error. - int8 Output: `quant_scale2` and `quant_offset2` in per-channel format do not support scenarios with left padding, Ring Attention, or non-32-byte aligned D-axis. Does not support sparse as band and `pre_tokens`/`next_tokens` being negative. - Other Usage Caveats: - :math:`N` of parameter `query` must be equal to `num_heads`. :math:`N` of parameter `key` and parameter `value` must be equal to `num_key_value_heads`. - `num_heads` must be divisible by `num_key_value_heads`. - When `query` dtype is bfloat16, D axis should align with 16. Inputs: query (Tensor): The query tensor with data type of int8, float16 or bfloat16. The shape is :math:`(B, q_S, q_H)` / `(B, q_N, q_S, q_D)`. key (Tensor): The key tensor with the same dtype as `query`. The shape is :math:`(B, kv_S, kv_H)` / `(B, kv_N, kv_S, kv_D)`. value (Tensor): The value tensor with the same dtype as `query`. The shape is :math:`(B, kv_S, kv_H)` / `(B, kv_N, kv_S, kv_D)`. attn_mask (Tensor, optional) - The attention mask tensor with data type of bool, int8, uint8 or float16. For each element, 0/False indicates retention and 1/True indicates discard. The shape is :math:`(q_S, kv_S)` / :math:`(B, q_S, kv_S)` / :math:`(1, q_S, kv_S)` / :math:`(B, 1, q_S, kv_S)` / :math:`(1, 1, q_S, kv_S)`. Default: ``None``. actual_seq_lengths (Union[Tensor, tuple[int], list[int]], optional): Describe actual sequence length of each batch of `query` with data type of int64. The shape is :math:`(B, )`. Default: ``None``. actual_seq_lengths_kv (Union[Tensor, tuple[int], list[int]], optional): Describe actual sequence length of each batch of `key` or `value` with data type of int64. The shape is :math:`(B, )`. Default: ``None``. pse_shift (Tensor, optional): The position encoding tensor with data type of float16 or bfloat16. Input tensor of shape :math:`(B, N, q_S, kv_S)` / :math:`(1, N, q_S, kv_S)`. Default: ``None``. - q_S must be greater than or equal to the query's S length, and kv_S must be greater than or equal to the key's S length.' - If `pse_shift` has dtype float16, `query` should have dtype float16 or int8, in which case high precision mode is enabled automatically. - If `pse_shift` has dtype bfloat16, `query` should have dtype bfloat16. deq_scale1 (Tensor, optional): Quantitative parametor, the tensor with data type of uint64 or float32. Input Tensor of shape :math:`(1,)`. Default: ``None``. quant_scale1 (Tensor, optional): Quantitative parametor, the tensor with data type of float32. Input Tensor of shape :math:`(1,)`. Default: ``None``. deq_scale2 (Tensor, optional): Quantitative parametor, the tensor with data type of uint64 or float32. Input Tensor of shape :math:`(1,)`. Default: ``None``. quant_scale2 (Tensor, optional): Quantitative parametor, the tensor with data type of float32. The suggested shape is :math:`(1,)` / :math:`(1, 1, H)` / :math:`(H, )` when output layout is BSH, :math:`(1,)` / :math:`(1, N, 1, D)` / :math:`(N, D) when layout is BNSD. Default: ``None``. quant_offset2 (Tensor, optional): Quantitative parametor, the tensor with data type of float32. It has the same dtype and shape as `quant_scale2`. Default: ``None``. num_heads (int, optional): The number of heads. Default: ``1``. scale_value (double, optional): The scale value indicating the scale coefficient, which is used as the scalar of Muls in the calculation. Default: ``1.0``. pre_tokens (int, optional): For sparse cumputing, indicating the number of previous tokens Attention needs to associated with. Default: 2147483647. next_tokens (int, optional): For sparse cumputing, indicating the number of next tokens Attention needs to associated with. Default: 0. input_layout (str, optional): the data layout of the input qkv, support `(BSH)` and `(BNSD)`. Default `BSH`. num_key_value_heads (int, optional): An int indicates head numbers of ``key``/``value`` which are used in GQA algorithm. The value 0 indicates if the key and value have the same head nums, use `num_heads`. It it is specified(not 0), it must be a factor of `num_heads` and it must be equal to kv_n. Default: ``0``. sparse_mode (int, optional): An int specifies sparse mode, can be int from {0, 1, 2, 3, 4}. Default: ``0``. - sparseMode = 0: If `attn_mask` is a null pointer, `pre_tokens` and `next_tokens` inputs are ignored (internally set to INT_MAX). - sparseMode = 2, 3, 4: `attn_mask` shape must be :math:`(S, S)` or :math:`(1, S, S)` or :math:`(1, 1, S, S)`, with S fixed at 2048. User must ensure that `attn_mask` is lower triangular. If not provided or incorrect shape, it will result in an error. - sparseMode = 1, 2, 3: Ignores `pre_tokens`, `next_tokens` inputs and sets values according to specific rules. - sparseMode = 4: `pre_tokens` and `next_tokens` must be non-negative. inner_precise (int, optional): An int number from {0, 1} indicates computing mode. ``0`` for high precision mode for float16 dtype. ``1`` for high performance mode. Default: ``1``. Outputs: attention_out (Tensor) - Output tensor, has the same shape as` query` of :math:`(B, q_S, q_H)` / :math:`(B, q_N, q_S, q_D)`. Output dtype is determined by multiple factors, please refer to Note above for details. Supported Platforms: ``Ascend`` Raises: TypeError: Dtype of `query` is not int8, float16 or bfloat16. TypeError: `query`, `key` and `value` don't have the same dtype. TypeError: Dtype of `attn_mask` is not bool, int8 or uint8. TypeError: Dtype of `pse_shift` is not bfloat16 or float16. TypeError: `scale_value` is not a double number. TypeError: `input_layout` is not a string. TypeError: `num_key_value_heads` is not an int. TypeError: `sparse_mode` is not an int. TypeError: `sparse_inner_precisemode` is not an int. TypeError: `quant_scale1` is not Tensor of type float32. TypeError: `deq_scale1` is not Tensor of type uint64 or float32. TypeError: `quant_scale2` is not Tensor of type float32. TypeError: `deq_scale2` is not Tensor of type uint64 or float32. TypeError: `quant_offset2` is not Tensor of type float32. ValueError: `input_layout` is a string but of `(BSH)` or `(BNSD)`. RuntimeError: `num_heads` is not divisible by `num_key_value_heads`. RuntimeError: `num_heads` is not greater than 0. RuntimeError: `num_key_value_heads` is not greater than or equal to 0. RuntimeError: kv_n is not equal to `num_key_value_heads`. RuntimeError: `attn_mask` shape is not valid. RuntimeError: `sparse_mode` is specified but is not 0, 1, 2, 3 or 4. RuntimeError: `query` dtype is bfloat16 and D axis is not aligned with 16. RuntimeError: `input_layout` is BSH and kv_h is not divisible by `num_key_value_heads`. RuntimeError: D-axis of `query`, `key` and `value` is not the same. RuntimeError: In post quant per-channel scenario, D-axis is not 32 Byte aligned. Examples: >>> import mindspore >>> import mindspore.ops as ops >>> from mindspore import Tensor >>> import numpy as np >>> B = 1 >>> N = 16 >>> S = 256 >>> D = 16 >>> query = Tensor(np.ones((B, N, S, D), dtype=np.float16)) >>> key = Tensor(np.ones((B, N, S, D), dtype=np.float16)) >>> value = Tensor(np.ones((B, N, S, D), dtype=np.float16)) >>> out = ops.auto_generate.PromptFlashAttention()(query, key, value, None, None, None, None, None, None, None, None, None, N, input_layout='BNSD') >>> print(out.shape) (1, 16, 256, 16) """ __mindspore_signature__ = ( sig.make_sig('query'), sig.make_sig('key'), sig.make_sig('value'), sig.make_sig('attn_mask', default=None), sig.make_sig('actual_seq_lengths', default=None), sig.make_sig('actual_seq_lengths_kv', default=None), sig.make_sig('pse_shift', default=None), sig.make_sig('deq_scale1', default=None), sig.make_sig('quant_scale1', default=None), sig.make_sig('deq_scale2', default=None), sig.make_sig('quant_scale2', default=None), sig.make_sig('quant_offset2', default=None), ) @prim_arg_register def __init__(self, num_heads=1, scale_value=1.0, pre_tokens=2147483647, next_tokens=0, input_layout='BSH', num_key_value_heads=0, sparse_mode=0, inner_precise=1): self._set_prim_arg("num_heads", num_heads) self._set_prim_arg("scale_value", scale_value) self._set_prim_arg("pre_tokens", pre_tokens) self._set_prim_arg("next_tokens", next_tokens) self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum) self._set_prim_arg("num_key_value_heads", num_key_value_heads) self._set_prim_arg("sparse_mode", sparse_mode) self._set_prim_arg("inner_precise", inner_precise) def __call__(self, query, key, value, attn_mask=None, actual_seq_lengths=None, actual_seq_lengths_kv=None, pse_shift=None, deq_scale1=None, quant_scale1=None, deq_scale2=None, quant_scale2=None, quant_offset2=None): return _convert_stub(pyboost_prompt_flash_attention(self, [query, key, value, attn_mask, actual_seq_lengths, actual_seq_lengths_kv, pse_shift, deq_scale1, quant_scale1, deq_scale2, quant_scale2, quant_offset2, self.num_heads, self.scale_value, self.pre_tokens, self.next_tokens, self.input_layout, self.num_key_value_heads, self.sparse_mode, self.inner_precise])) class PromptKVCache(Primitive): r""" .. code-block:: prim = ops.PromptKVCache(align_mode) out = prim(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len) is equivalent to .. code-block:: ops.prompt_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len, align_mode) Refer to :func:`mindspore.ops.prompt_k_v_cache` for more details. """ @prim_arg_register def __init__(self, align_mode='LEFT'): self._set_prim_arg_with_handler("align_mode", align_mode, str_to_enum) self.add_prim_attr("side_effect_mem", True) def __call__(self, cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len): return super().__call__(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len, self.align_mode) class Qr(Primitive): r""" Returns the QR decomposition of one or more matrices. If `mode` is 'reduced'(the default), compute the P columns of Q where P is minimum of the 2 innermost dimensions of input. If `mode` is 'complete', compute full-sized Q and R. Args: full_matrices (bool, optional): Whether compute full-sized QR decomposition. Default: ``False`` . Inputs: - **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, the supported dtype are float16, float32, float64, complex64 and complex128. Define the shape of input as :math:`(..., m, n)`, p as the minimum values of m and n. Outputs: - **Q** (Tensor) - The orthonormal matrices of input. If `mode` is 'complete', the shape is :math:`(m, m)`, else the shape is :math:`(m, p)`. The dtype of `Q` is same as `input`. - **R** (Tensor) - The upper triangular matrices of input. If `mode` is 'complete', the shape is :math:`(m, n)`, else the shape is :math:`(p, n)`. The dtype of `R` is same as `input`. Raises: TypeError: If `input` is not a Tensor. TypeError: If `mode` is neither 'reduced' nor 'complete'. ValueError: If the dimension of `input` is less than 2. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore as ms >>> from mindspore import Tensor, ops >>> import numpy as np >>> x = Tensor(np.array([[20., -31, 7], [4, 270, -90], [-8, 17, -32]]), ms.float32) >>> Q, R = ops.Qr()(x) >>> print(Q) [[-0.912871 0.16366126 0.37400758] [-0.18257418 -0.9830709 -0.01544376] [ 0.36514837 -0.08238228 0.92729706]] >>> print(R) [[ -21.908903 -14.788506 -1.6431675] [ 0. -271.9031 92.25824 ] [ 0. 0. -25.665514 ]] """ @prim_arg_register def __init__(self, full_matrices=False): self._set_prim_arg("full_matrices", full_matrices) def __call__(self, x): return super().__call__(x, self.full_matrices) class RandExt(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('shape'), sig.make_sig('seed'), sig.make_sig('offset'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, shape, seed, offset, dtype=None): return _convert_stub(pyboost_rand_ext(self, [shape, seed, offset, dtype if dtype is None else dtype_to_type_id('RandExt', 'dtype', dtype)])) rand_ext_op=RandExt() class RandLikeExt(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('tensor'), sig.make_sig('seed'), sig.make_sig('offset'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, tensor, seed, offset, dtype=None): return _convert_stub(pyboost_rand_like_ext(self, [tensor, seed, offset, dtype if dtype is None else dtype_to_type_id('RandLikeExt', 'dtype', dtype)])) rand_like_ext_op=RandLikeExt() class RandIntLike(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('low'), sig.make_sig('high'), sig.make_sig('seed'), sig.make_sig('offset'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, low, high, seed, offset, dtype=None): return _convert_stub(pyboost_randint_like(self, [input, low, high, seed, offset, dtype if dtype is None else dtype_to_type_id('RandIntLike', 'dtype', dtype)])) randint_like_op=RandIntLike() class RandInt(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('low'), sig.make_sig('high'), sig.make_sig('shape'), sig.make_sig('seed'), sig.make_sig('offset'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, low, high, shape, seed, offset, dtype=None): return _convert_stub(pyboost_randint(self, [low, high, shape, seed, offset, dtype if dtype is None else dtype_to_type_id('RandInt', 'dtype', dtype)])) randint_op=RandInt() class RandnLike(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('seed'), sig.make_sig('offset'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, seed, offset, dtype=None): return _convert_stub(pyboost_randn_like(self, [input, seed, offset, dtype if dtype is None else dtype_to_type_id('RandnLike', 'dtype', dtype)])) randn_like_op=RandnLike() class Randn(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('shape'), sig.make_sig('seed'), sig.make_sig('offset'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, shape, seed, offset, dtype=None): return _convert_stub(pyboost_randn(self, [shape, seed, offset, dtype if dtype is None else dtype_to_type_id('Randn', 'dtype', dtype)])) randn_op=Randn() class RandpermExt(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('n'), sig.make_sig('seed'), sig.make_sig('offset'), sig.make_sig('dtype', default=mstype.int64), ) @prim_arg_register def __init__(self): pass def __call__(self, n, seed, offset, dtype=mstype.int64): return _convert_stub(pyboost_randperm_ext(self, [n, seed, offset, dtype_to_type_id('RandpermExt', 'dtype', dtype)])) randperm_ext_op=RandpermExt()
[docs]class RandpermV2(Primitive): r""" .. code-block:: prim = ops.RandpermV2(seed, offset, dtype) out = prim(n) is equivalent to .. code-block:: ops.randperm(n, seed, offset, dtype) Refer to :func:`mindspore.ops.randperm` for more details. """ @prim_arg_register def __init__(self, seed=0, offset=0, dtype=mstype.int64): self._set_prim_arg("seed", type_it('RandpermV2', 'seed', seed, OpDtype.DT_TENSOR, OpDtype.DT_INT)) self._set_prim_arg("offset", type_it('RandpermV2', 'offset', offset, OpDtype.DT_TENSOR, OpDtype.DT_INT)) self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id) def __call__(self, n): return super().__call__(n, self.seed, self.offset, self.dtype)
[docs]class Range(Primitive): r""" .. code-block:: prim = ops.Range(maxlen) out = prim(start, end, step) is equivalent to .. code-block:: ops.range(start, end, step, maxlen) Refer to :func:`mindspore.ops.range` for more details. """ @prim_arg_register def __init__(self, maxlen=1000000): self._set_prim_arg("maxlen", maxlen) def __call__(self, start, end, step): return super().__call__(start, end, step, self.maxlen)
[docs]class RealDiv(Primitive): r""" Divides the first input tensor by the second input tensor in floating-point type element-wise. Refer to :func:`mindspore.ops.div` for more details. Inputs: - **x** (Union[Tensor, Number, bool]) - The first input is a number or a bool or a tensor whose data type is number or bool. - **y** (Union[Tensor, Number, bool]) - The second input is a number or a bool when the first input is a tensor or a tensor whose data type is number or bool. Outputs: Tensor, the shape is the same as the one after broadcasting, and the data type is the one with higher precision or higher digits among the two inputs. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) >>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32) >>> realdiv = ops.RealDiv() >>> output = realdiv(x, y) >>> print(output) [0.25 0.4 0.5 ] """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, x, y): return super().__call__(x, y)
real_div_op=RealDiv()
[docs]class Real(Primitive): r""" .. code-block:: prim = ops.Real() out = prim(input) is equivalent to .. code-block:: ops.real(input) Refer to :func:`mindspore.ops.real` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
real_op=Real() class ReciprocalGrad(Primitive): r""" Performs grad of Reciprocal operation. """ @prim_arg_register def __init__(self): pass def __call__(self, y, dy): return super().__call__(y, dy) reciprocal_grad_op=ReciprocalGrad()
[docs]class Reciprocal(Primitive): r""" Returns reciprocal of a tensor element-wise. .. math:: out_{i} = \frac{1}{x_{i}} Inputs: - **x** (Tensor) - The input tensor. Outputs: Tensor, has the same shape as the `x`. Raises: TypeError: If `x` is not a Tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) >>> reciprocal = ops.Reciprocal() >>> output = reciprocal(x) >>> print(output) [1. 0.5 0.25] """ @prim_arg_register def __init__(self): pass def __call__(self, x): return _convert_stub(pyboost_reciprocal(self, [x]))
reciprocal_op=Reciprocal()
[docs]class ReduceAll(Primitive): r""" .. code-block:: prim = ops.ReduceAll(keep_dims) out = prim(input, axis) is equivalent to .. code-block:: ops.all(input, axis, keep_dims) Refer to :func:`mindspore.ops.all` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('axis', default=None), ) @prim_arg_register def __init__(self, keep_dims=False): self._set_prim_arg("keep_dims", keep_dims) def __call__(self, input, axis=None): return _convert_stub(pyboost_reduce_all(self, [input, axis, self.keep_dims]))
[docs]class ReduceAny(Primitive): r""" Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by default. And also can reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Note: The `axis` with tensor type is only used for compatibility with older versions and is not recommended. Args: keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep these dimensions. Default: ``False`` . Inputs: - **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool. - **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` , reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)). Outputs: Tensor, the dtype is bool. - If `axis` is ``()`` , and `keep_dims` is ``False`` , the output is a 0-D tensor representing the "logical or" of all elements in the input tensor. - If `axis` is int, set as 2, and `keep_dims` is ``False`` , the shape of output is :math:`(x_1, x_3, ..., x_R)`. - If `axis` is tuple(int), set as (2, 3), and `keep_dims` is ``False`` , the shape of output is :math:`(x_1, x_4, ..., x_R)`. - If `axis` is 1-D Tensor, set as [2, 3], and `keep_dims` is ``False`` , the shape of output is :math:`(x_1, x_4, ..., x_R)`. Raises: TypeError: If `keep_dims` is not a bool. TypeError: If `x` is not a Tensor. TypeError: If `axis` is not one of the following: int, tuple, list or Tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.array([[True, False], [True, True]])) >>> op = ops.ReduceAny(keep_dims=True) >>> # case 1: Reduces a dimension by the "logical OR" of all elements in the dimension. >>> output = op(x) >>> print(output) [[ True]] >>> print(output.shape) (1, 1) >>> # case 2: Reduces a dimension along axis 0. >>> output = op(x, 0) >>> print(output) [[ True True]] >>> # case 3: Reduces a dimension along axis 1. >>> output = op(x, 1) >>> print(output) [[True] [ True]] >>> # case 4: input is a scalar. >>> x = Tensor(True) >>> op = ops.ReduceAny() >>> output = op(x) >>> print(output) True """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('axis', default=()), ) @prim_arg_register def __init__(self, keep_dims=False): self._set_prim_arg("keep_dims", keep_dims) def __call__(self, x, axis=()): return _convert_stub(pyboost_reduce_any(self, [x, axis, self.keep_dims]))
[docs]class ReduceMax(Primitive): r""" Reduces a dimension of a tensor by the maximum value in this dimension, by default. And also can reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Note: The `axis` with tensor type is only used for compatibility with older versions and is not recommended. Args: keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep these dimensions. Default: ``False`` . Inputs: - **x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int), tensor]) - The dimensions to reduce. Default: ``()`` , reduce all dimensions. Must be in the range [-r, r). Outputs: output(Tensor): has the same dtype as the `x`. - If `axis` is ``()`` , and `keep_dims` is ``False`` , the output is a 0-D tensor representing the maximum of all elements in the input tensor. - If `axis` is int, set as 1, and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_2, ..., x_R)`. - If `axis` is tuple(int) or list(int), set as (1, 2), and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_3, ..., x_R)`. - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_3, ..., x_R)`. Raises: TypeError: If `keep_dims` is not a bool. TypeError: If `x` is not a Tensor. TypeError: If `axis` is not one of the following: int, tuple, list or Tensor. ValueError: If `axis` is out of range. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> output = ops.ReduceMax(keep_dims=True)(x, 1) >>> result = output.shape >>> print(result) (3, 1, 5, 6) >>> # case 1: Reduces a dimension by the maximum value of all elements in the dimension. >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]], ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]], ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32) >>> output = ops.ReduceMax(keep_dims=True)(x, ()) >>> print(output) [[[9.]]] >>> print(output.shape) (1, 1, 1) >>> # case 2: Reduces a dimension along axis 0. >>> output = ops.ReduceMax(keep_dims=True)(x, 0) >>> print(output) [[[7. 7. 7. 7. 7. 7.] [8. 8. 8. 8. 8. 8.] [9. 9. 9. 9. 9. 9.]]] >>> # case 3: Reduces a dimension along axis 1. >>> output = ops.ReduceMax(keep_dims=True)(x, 1) >>> print(output) [[[3. 3. 3. 3. 3. 3.]] [[6. 6. 6. 6. 6. 6.]] [[9. 9. 9. 9. 9. 9.]]] >>> # case 4: Reduces a dimension along axis 2. >>> output = ops.ReduceMax(keep_dims=True)(x, 2) >>> print(output) [[[1.] [2.] [3.]] [[4.] [5.] [6.]] [[7.] [8.] [9.]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('axis', default=()), ) @prim_arg_register def __init__(self, keep_dims=False): self._set_prim_arg("keep_dims", keep_dims) def __call__(self, x, axis=()): return super().__call__(x, axis, self.keep_dims)
[docs]class ReduceMean(Primitive): r""" Reduces a dimension of a tensor by averaging all elements in the dimension, by default. And also can reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Note: The `axis` with tensor type is only used for compatibility with older versions and is not recommended. Args: keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep these dimensions. Default: ``False`` . Inputs: - **x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` , reduce all dimensions. Only constant value is allowed. Must be in the range [-r, r). Outputs: Tensor, has the same dtype as the `x`. - If `axis` is ``()`` , and `keep_dims` is ``False`` , the output is a 0-D tensor representing the mean of all elements in the input tensor. - If `axis` is int, set as 1, and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_2, ..., x_R)`. - If `axis` is tuple(int) or list(int), set as (1, 2), and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_3, ..., x_R)`. - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_3, ..., x_R)`. Raises: TypeError: If `keep_dims` is not a bool. TypeError: If `x` is not a Tensor. TypeError: If `axis` is not one of the following: int, tuple, list or Tensor. ValueError: If `axis` is out of range. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = ops.ReduceMean(keep_dims=True) >>> output = op(x, 1) >>> result = output.shape >>> print(result) (3, 1, 5, 6) >>> # case 1: Reduces a dimension by averaging all elements in the dimension. >>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]], ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]], ... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]), ... mindspore.float32) >>> output = op(x) >>> print(output) [[[5.]]] >>> print(output.shape) (1, 1, 1) >>> # case 2: Reduces a dimension along the axis 0 >>> output = op(x, 0) >>> print(output) [[[4. 4. 4. 4. 4. 4.] [5. 5. 5. 5. 5. 5.] [6. 6. 6. 6. 6. 6.]]] >>> # case 3: Reduces a dimension along the axis 1 >>> output = op(x, 1) >>> print(output) [[[2. 2. 2. 2. 2. 2.]] [[5. 5. 5. 5. 5. 5.]] [[8. 8. 8. 8. 8. 8.]]] >>> # case 4: Reduces a dimension along the axis 2 >>> output = op(x, 2) >>> print(output) [[[ 2.] [ 2.] [ 2.]] [[ 4.] [ 5.] [ 6.]] [[ 6.] [ 8.] [10.]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('axis', default=()), ) @prim_arg_register def __init__(self, keep_dims=False): self._set_prim_arg("keep_dims", keep_dims) def __call__(self, x, axis=()): return super().__call__(x, axis, self.keep_dims)
[docs]class ReduceMin(Primitive): r""" Reduces a dimension of a tensor by the minimum value in the dimension, by default. And also can reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Note: The `axis` with tensor type is only used for compatibility with older versions and is not recommended. Args: keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep these dimensions. Default: ``False`` . Inputs: - **x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` , reduce all dimensions. Only constant value is allowed. Must be in the range [-r, r). Outputs: Tensor, has the same dtype as the `x`. - If `axis` is ``()`` , and `keep_dims` is ``False`` , the output is a 0-D tensor representing the minimum of all elements in the input tensor. - If `axis` is int, set as 1, and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_2, ..., x_R)`. - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_3, ..., x_R)`. - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_3, ..., x_R)`. Raises: TypeError: If `keep_dims` is not a bool. TypeError: If `x` is not a Tensor. TypeError: If `axis` is not one of the following: int, tuple, list or Tensor. ValueError: If `axis` is out of range. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = ops.ReduceMin(keep_dims=True) >>> output = op(x, 1) >>> result = output.shape >>> print(result) (3, 1, 5, 6) >>> # case 1: Reduces a dimension by the minimum value of all elements in the dimension. >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]], ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]], ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32) >>> output = op(x) >>> print(output) [[[1.]]] >>> print(output.shape) (1, 1, 1) >>> # case 2: Reduces a dimension along axis 0. >>> output = op(x, 0) >>> print(output) [[[1. 1. 1. 1. 1. 1.] [2. 2. 2. 2. 2. 2.] [3. 3. 3. 3. 3. 3.]]] >>> # case 3: Reduces a dimension along axis 1. >>> output = op(x, 1) >>> print(output) [[[1. 1. 1. 1. 1. 1.]] [[4. 4. 4. 4. 4. 4.]] [[7. 7. 7. 7. 7. 7.]]] >>> # case 4: Reduces a dimension along axis 2. >>> output = op(x, 2) >>> print(output) [[[1.] [2.] [3.]] [[4.] [5.] [6.]] [[7.] [8.] [9.]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('axis', default=()), ) @prim_arg_register def __init__(self, keep_dims=False): self._set_prim_arg("keep_dims", keep_dims) def __call__(self, x, axis=()): return super().__call__(x, axis, self.keep_dims)
[docs]class ReduceProd(Primitive): r""" Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Note: The `axis` with tensor type is only used for compatibility with older versions and is not recommended. Args: keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep these dimensions. Default: ``False`` . Inputs: - **x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` , reduce all dimensions. Only constant value is allowed. Must be in the range [-r, r). Outputs: Tensor, has the same dtype as the `x`. - If `axis` is ``()`` , and `keep_dims` is ``False`` , the output is a 0-D tensor representing the product of all elements in the input tensor. - If `axis` is int, set as 1, and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_2, ..., x_R)`. - If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_3, ..., x_R)`. - If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` , the shape of output is :math:`(x_0, x_3, ..., x_R)`. Raises: TypeError: If `keep_dims` is not a bool. TypeError: If `x` is not a Tensor. TypeError: If `axis` is not one of the following: int, tuple, list or Tensor. ValueError: If `axis` is out of range. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = ops.ReduceProd(keep_dims=True) >>> output = op(x, 1) >>> result = output.shape >>> print(result) (3, 1, 5, 6) >>> # case 1: Reduces a dimension by multiplying all elements in the dimension. >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]], ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]], ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32) >>> output = op(x) >>> print(output) [[[2.2833798e+33]]] >>> print(output.shape) (1, 1, 1) >>> # case 2: Reduces a dimension along axis 0. >>> output = op(x, 0) >>> print(output) [[[ 28. 28. 28. 28. 28. 28.] [ 80. 80. 80. 80. 80. 80.] [162. 162. 162. 162. 162. 162.]]] >>> # case 3: Reduces a dimension along axis 1. >>> output = op(x, 1) >>> print(output) [[[ 6. 6. 6. 6. 6. 6.]] [[120. 120. 120. 120. 120. 120.]] [[504. 504. 504. 504. 504. 504.]]] >>> # case 4: Reduces a dimension along axis 2. >>> output = op(x, 2) >>> print(output) [[[1.00000e+00] [6.40000e+01] [7.29000e+02]] [[4.09600e+03] [1.56250e+04] [4.66560e+04]] [[1.17649e+05] [2.62144e+05] [5.31441e+05]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('axis', default=()), ) @prim_arg_register def __init__(self, keep_dims=False): self._set_prim_arg("keep_dims", keep_dims) def __call__(self, x, axis=()): return super().__call__(x, axis, self.keep_dims)
class ReduceStd(Primitive): r""" Returns the standard-deviation and mean of the input Tensor along dimension(s) specified by `axis`. Note: The `axis` with tensor type is only used for compatibility with older versions and is not recommended. Args: axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. Default: ``()`` , reduce all dimensions. Only constant value is allowed. Let `r` be rank of `input_x`, it should be in the range :math:`[-r,r)`. unbiased (bool, optional): Whether to use Bessel's correction. If ``True`` , will use the Bessel correction unbiased estimation. If ``False`` , will through the biased estimation to calculate the standard deviation. Default: ``True`` . keep_dims (bool, optional): Whether the output Tensor has dim retained or not. If ``True`` , keep these reduced dimensions specified by `axis` and the length is 1. If ``False`` , don't keep these dimensions. Default: ``Fasle`` . Inputs: - **input_x** (Tensor[Number]) - The input Tensor with shape :math:`(N, *)` where :math:`*` means any number of additional dimensions. Supported dtypes: float16, float32. Outputs: Tuple(output_std, output_mean) containing the standard deviation and mean. Raises: TypeError: If `keep_dims` is not a bool. TypeError: If `input_x` is not a Tensor. ValueError: If `axis` is not one of the following: int, tuple, list or Tensor. Supported Platforms: ``Ascend`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import ops, Tensor >>> input_x = Tensor(np.array([[1, 2, 3], [-1, 1, 4]]).astype(np.float32)) >>> op = ops.ReduceStd(axis=1, unbiased=True, keep_dims=False) >>> output = op(input_x) >>> output_std, output_mean = output[0], output[1] >>> print(output_std) [1. 2.5166113] >>> print(output_mean) [2. 1.3333334] """ @prim_arg_register def __init__(self, axis=[], unbiased=True, keep_dims=False): self._set_prim_arg("axis", type_it('ReduceStd', 'axis', axis, (OpDtype.DT_INT, OpDtype.DT_LIST_INT, OpDtype.DT_TENSOR), OpDtype.DT_TUPLE_INT)) self._set_prim_arg("unbiased", unbiased) self._set_prim_arg("keep_dims", keep_dims) def __call__(self, x): return super().__call__(x, self.axis, self.unbiased, self.keep_dims)
[docs]class ReduceSum(Primitive): r""" Reduces a dimension of a tensor by summing all elements in the dimension, by default. And also can reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Note: The `axis` with tensor type is only used for compatibility with older versions and is not recommended. Args: keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1. If ``False`` , don't keep these dimensions. Default: ``False`` . skip_mode (bool): If ``True`` and `axis` is empty tuple or empty list, the ReduceSum operation isn't performed, skip it. If ``True`` and `axis` is other values, the ReduceSum calculation is performed normally. If ``False`` , do reduce. Default: ``False`` . Inputs: - **x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` , reduce all dimensions when `skip_mode` is ``False`` . Only constant value is allowed. Must be in the range [-rank(`x`), rank(`x`)). Outputs: Tensor, has the same dtype as the `x`. - If `axis` is ``()`` , `keep_dims` is ``False`` , and `skip_mode` is ``False`` , the output is a 0-D tensor representing the sum of all elements in the input tensor. - If `axis` is ``()`` , and `skip_mode` is ``True`` , the ReduceSum operation is not performed, output tensor is equal to the input tensor. - If `axis` is int, set as 2, and `keep_dims` is ``False`` , the shape of output is :math:`(x_1, x_3, ..., x_R)`. - If `axis` is tuple(int) or list(int), set as (2, 3), and `keep_dims` is ``False`` , the shape of output is :math:`(x_1, x_4, ..., x_R)`. - If `axis` is 1-D Tensor, set as [2, 3], and `keep_dims` is ``False`` , the shape of output is :math:`(x_1, x_4, ..., x_R)`. Raises: TypeError: If `keep_dims` is not a bool. TypeError: If `skip_mode` is not a bool. TypeError: If `x` is not a Tensor. ValueError: If `axis` is None. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = ops.ReduceSum(keep_dims=True) >>> output = op(x, 1) >>> output.shape (3, 1, 5, 6) >>> # case 1: Reduces a dimension by summing all elements in the dimension. >>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]], ... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]], ... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32) >>> output = op(x) >>> print(output) [[[270.]]] >>> print(output.shape) (1, 1, 1) >>> # case 2: Reduces a dimension along axis 0. >>> output = op(x, 0) >>> print(output) [[[12. 12. 12. 12. 12. 12.] [15. 15. 15. 15. 15. 15.] [18. 18. 18. 18. 18. 18.]]] >>> # case 3: Reduces a dimension along axis 1. >>> output = op(x, 1) >>> print(output) [[[ 6. 6. 6. 6. 6. 6.]] [[15. 15. 15. 15. 15. 15.]] [[24. 24. 24. 24. 24. 24.]]] >>> # case 4: Reduces a dimension along axis 2. >>> output = op(x, 2) >>> print(output) [[[ 6.] [12.] [18.]] [[24.] [30.] [36.]] [[42.] [48.] [54.]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('axis', default=()), ) @prim_arg_register def __init__(self, keep_dims=False, skip_mode=False): self._set_prim_arg("keep_dims", keep_dims) self._set_prim_arg("skip_mode", skip_mode) def __call__(self, x, axis=()): return super().__call__(x, axis, self.keep_dims, self.skip_mode)
class ReflectionPad1DGrad(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, grad_output, input, padding): return _convert_stub(pyboost_reflection_pad_1d_grad(self, [grad_output, input, padding])) reflection_pad_1d_grad_op=ReflectionPad1DGrad() class ReflectionPad1D(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, padding): return _convert_stub(pyboost_reflection_pad_1d(self, [input, padding])) reflection_pad_1d_op=ReflectionPad1D() class ReflectionPad2DGrad(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, grad_output, input, padding): return _convert_stub(pyboost_reflection_pad_2d_grad(self, [grad_output, input, padding])) reflection_pad_2d_grad_op=ReflectionPad2DGrad() class ReflectionPad2D(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, padding): return _convert_stub(pyboost_reflection_pad_2d(self, [input, padding])) reflection_pad_2d_op=ReflectionPad2D() class ReflectionPad3DGrad(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, grad_output, input, padding): return _convert_stub(pyboost_reflection_pad_3d_grad(self, [grad_output, input, padding])) reflection_pad_3d_grad_op=ReflectionPad3DGrad() class ReflectionPad3D(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, padding): return _convert_stub(pyboost_reflection_pad_3d(self, [input, padding])) reflection_pad_3d_op=ReflectionPad3D() class ReLU6Grad(Primitive): r""" Computes gradient for the ReLU6 activation. Args: y_backprop (Tensor): Input gradients tensor, has the same dtype and shape as `x`. x (Tensor): Origin input tensor. Returns: Tensor, has the same dtype and shape as `x`. """ @prim_arg_register def __init__(self): pass def __call__(self, y_backprop, x): return super().__call__(y_backprop, x) relu6_grad_op=ReLU6Grad()
[docs]class ReLU6(Primitive): r""" .. code-block:: prim = ops.ReLU6() out = prim(x) is equivalent to .. code-block:: ops.relu6(x) Refer to :func:`mindspore.ops.relu6` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, x): return super().__call__(x)
relu6_op=ReLU6() class ReluGrad(Primitive): r""" Computes gradient for the ReLU activation. Args: y_backprop (Tensor): Input gradients tensor, has the same dtype and shape as `x`. x (Tensor): Origin input tensor. Returns: Tensor, has the same dtype and shape as `x`. """ @prim_arg_register def __init__(self): pass def __call__(self, y_backprop, x): return _convert_stub(pyboost_relu_grad(self, [y_backprop, x])) relu_grad_op=ReluGrad()
[docs]class ReLU(Primitive): r""" .. code-block:: prim = ops.ReLU() out = prim(input) is equivalent to .. code-block:: ops.relu(input) Refer to :func:`mindspore.ops.relu` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_relu(self, [input]))
relu_op=ReLU() class RemainderScalarTensor(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_remainder_scalar_tensor(self, [input, other])) remainder_scalar_tensor_op=RemainderScalarTensor() class RemainderTensorScalar(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_remainder_tensor_scalar(self, [input, other])) remainder_tensor_scalar_op=RemainderTensorScalar() class RemainderTensorTensor(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_remainder_tensor_tensor(self, [input, other])) remainder_tensor_tensor_op=RemainderTensorTensor() class RepeatInterleaveGrad(Primitive): r""" Gradients of RepeatInterleave operation. """ @prim_arg_register def __init__(self): pass def __call__(self, input, repeats, dim): return _convert_stub(pyboost_repeat_interleave_grad(self, [input, repeats, dim])) repeat_interleave_grad_op=RepeatInterleaveGrad() class RepeatInterleaveInt(Primitive): r""" Repeat elements of a tensor along an axis, like `numpy.repeat`. Args: input (Tensor): The tensor to repeat values for. Must be of type: float16, float32, int8, uint8, int16, int32, or int64. repeats (int): The number of times to repeat, must be positive. dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None, the input Tensor will be flattened and the output will alse be flattened. output_size (int, optional): Total output size for the given axis (e.g. sum of repeats), Default: ``None``. Returns: One tensor with values repeated along the specified dim. If input has shape :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ..., si * repeats, ..., sn)`. The output type will be the same as the type of `input`. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('repeats'), sig.make_sig('dim', default=None), sig.make_sig('output_size', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, repeats, dim=None, output_size=None): return _convert_stub(pyboost_repeat_interleave_int(self, [input, repeats, dim, output_size])) repeat_interleave_int_op=RepeatInterleaveInt() class RepeatInterleaveTensor(Primitive): r""" Repeat elements of a tensor along an axis, like `numpy.repeat`. Args: input (Tensor): The tensor to repeat values for. Must be of type: float16, float32, int8, uint8, int16, int32, or int64. repeats (Union[tuple, list, Tensor]): The number of times to repeat, must be positive. dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None, the input Tensor will be flattened and the output will alse be flattened. output_size (int, optional): Total output size for the given axis (e.g. sum of repeats), Default: ``None``. Returns: One tensor with values repeated along the specified dim. If input has shape :math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ..., si * repeats, ..., sn)`. The output type will be the same as the type of `input`. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('repeats'), sig.make_sig('dim', default=None), sig.make_sig('output_size', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, repeats, dim=None, output_size=None): return _convert_stub(pyboost_repeat_interleave_tensor(self, [input, repeats, dim, output_size])) repeat_interleave_tensor_op=RepeatInterleaveTensor() class ReplicationPad1DGrad(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, grad_output, input, padding): return _convert_stub(pyboost_replication_pad_1d_grad(self, [grad_output, input, padding])) replication_pad_1d_grad_op=ReplicationPad1DGrad() class ReplicationPad1D(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, padding): return _convert_stub(pyboost_replication_pad_1d(self, [input, padding])) replication_pad_1d_op=ReplicationPad1D() class ReplicationPad2DGrad(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, grad_output, input, padding): return _convert_stub(pyboost_replication_pad_2d_grad(self, [grad_output, input, padding])) replication_pad_2d_grad_op=ReplicationPad2DGrad() class ReplicationPad2D(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, padding): return _convert_stub(pyboost_replication_pad_2d(self, [input, padding])) replication_pad_2d_op=ReplicationPad2D() class ReplicationPad3DGrad(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, grad_output, input, padding): return _convert_stub(pyboost_replication_pad_3d_grad(self, [grad_output, input, padding])) replication_pad_3d_grad_op=ReplicationPad3DGrad() class ReplicationPad3D(Primitive): r""" """ @prim_arg_register def __init__(self): pass def __call__(self, input, padding): return _convert_stub(pyboost_replication_pad_3d(self, [input, padding])) replication_pad_3d_op=ReplicationPad3D() class ReshapeAndCache(Primitive): r""" .. code-block:: prim = ops.ReshapeAndCache() out = prim(key, value, key_cache, value_cache, slot_mapping) is equivalent to .. code-block:: ops.reshape_and_cache(key, value, key_cache, value_cache, slot_mapping) Refer to :func:`mindspore.ops.reshape_and_cache` for more details. """ __mindspore_signature__ = ( sig.make_sig('key', dtype=sig.sig_dtype.T), sig.make_sig('value', dtype=sig.sig_dtype.T), sig.make_sig('key_cache', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), sig.make_sig('value_cache', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), sig.make_sig('slot_mapping', dtype=sig.sig_dtype.T1), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, key, value, key_cache, value_cache, slot_mapping): return super().__call__(key, value, key_cache, value_cache, slot_mapping) reshape_and_cache_op=ReshapeAndCache()
[docs]class Reshape(Primitive): r""" .. code-block:: prim = ops.Reshape() out = prim(input, shape) is equivalent to .. code-block:: ops.reshape(input, shape) Refer to :func:`mindspore.ops.reshape` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, shape): return _convert_stub(pyboost_reshape(self, [input, shape]))
reshape_op=Reshape() class ResizeBicubicGrad(Primitive): r""" Computes gradients for ResizeBicubicGrad operation. Args: grads (Tensor): A Tensor of type float. 4-D with shape [batch, height, width, channels]. The format must be NHWC. image (Tensor): A Tensor. Must be one of the following types: float, double. 4-D with shape [batch, orig_height, orig_width, channels], The origin image tensor that was resized. The format must be NHWC. align_corners (bool): If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels.Default: ``False``. half_pixel_centers (bool): An optional bool. Default: ``False``. Outputs: A 4-D Tensor , with the same shape and data type as `image`. Rasise: TypeError: If `grads` is not allowed. TypeError: If `image` is not allowed. ValueError: If `image` dim is not 4. ValueError: If `size` dim is not 4. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` """ @prim_arg_register def __init__(self, align_corners=False, half_pixel_centers=False): self._set_prim_arg("align_corners", align_corners) self._set_prim_arg("half_pixel_centers", half_pixel_centers) def __call__(self, grads, image): return super().__call__(grads, image, self.align_corners, self.half_pixel_centers)
[docs]class ResizeBicubic(Primitive): r""" Resize images to size using bicubic interpolation. .. warning:: This is an experimental API that is subject to change or deletion. Args: align_corners (bool, optional): If ``True`` , the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Default: ``False`` . half_pixel_centers (bool, optional): Whether to use half-pixel center alignment. If set to ``True`` , `align_corners` should be ``False`` . Default: ``False`` . Inputs: - **images** (Tensor) - The input image must be a 4-D tensor of shape :math:`(batch, channels, height, width)`. The format must be NCHW. Types allowed: float16, float32, float64. - **size** (Union[tuple[int], Tensor[int]]) - A 1-D tensor or tuple with 2 elements: new_height, new_width. Besides, tuple[int] is recommended. Outputs: A 4-D tensor with shape :math:`(batch, channels, new\_height, new\_width)` whose dtype is the same as `images` . Raises: TypeError: If the type of `images` is not allowed. TypeError: If the type of `align_corners` is not bool. TypeError: If the type of `half_pixel_centers` is not bool. ValueError: If the dim of `images` is not 4. ValueError: If the dim of `size` is not 1 when `size` is a tensor. ValueError: If the number of elements in `size` is not 2. ValueError: If any value of `size` is not positive. ValueError: If the values of `align_corners` and `half_pixel_centers` are both ``True`` . Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops, nn >>> class NetResizeBicubic(nn.Cell): ... def __init__(self): ... super(NetResizeBicubic, self).__init__() ... align_corners = False ... half_pixel_centers = False ... self.resize = ops.ResizeBicubic(align_corners, half_pixel_centers) ... ... def construct(self, images, size): ... return self.resize(images, size) ... >>> images = Tensor(np.array([1, 2, 3, 4]).reshape(1, 1, 2, 2).astype(np.float32)) >>> size = Tensor([1, 4], mindspore.int32) >>> resizebicubic = NetResizeBicubic() >>> output = resizebicubic(images, size) >>> print(output) [[[[1. 1.5 2. 2.09375]]]] """ @prim_arg_register def __init__(self, align_corners=False, half_pixel_centers=False): self._set_prim_arg("align_corners", align_corners) self._set_prim_arg("half_pixel_centers", half_pixel_centers) def __call__(self, image, size): return super().__call__(image, size, self.align_corners, self.half_pixel_centers)
class ResizeBilinearGrad(Primitive): r""" Performs grad of ResizeBilinear operation. Args: grads (Tensor): A 4-D Tensor with shape [batch, channel, height, width]. image (Tensor): A 4-D Tensor with shape [batch, channel, height, width], The origin image tensor that was resized. align_corners (bool): If true, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels.Default: ``False``. half_pixel_centers (bool): An optional bool. Default: ``False``. Outputs: A 4-D Tensor , with the same shape and data type as `image`. """ @prim_arg_register def __init__(self, align_corners=False, half_pixel_centers=False): self._set_prim_arg("align_corners", align_corners) self._set_prim_arg("half_pixel_centers", half_pixel_centers) def __call__(self, grads, image): return super().__call__(grads, image, self.align_corners, self.half_pixel_centers)
[docs]class ResizeBilinearV2(Primitive): r""" Resizes an image to a certain size using the bilinear interpolation. The resizing only affects the lower two dimensions which represent the height and width. .. warning:: This is an experimental API that is subject to change or deletion. Args: align_corners (bool, optional): If ``True`` , rescale input by :math:`(new\_height - 1) / (height - 1)`, which exactly aligns the 4 corners of images and resized images. If ``False`` , rescale by :math:`new\_height / height`. Default: ``False`` . half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` , `align_corners` should be ``False`` . Default: ``False`` . Inputs: - **x** (Tensor) - Image to be resized. Input images must be a 4-D tensor with shape :math:`(batch, channels, height, width)`, with data type of float32 or float16. - **size** (Union[tuple[int], list[int], Tensor]) - The new size of the images. A tuple or list or Tensor of 2 int elements :math:`(new\_height, new\_width)`. Outputs: Tensor, resized image. 4-D with shape :math:`(batch, channels, new\_height, new\_width)`, with the same data type as input `x`. Raises: TypeError: If `align_corners` is not a bool. TypeError: If `half_pixel_centers` is not a bool. TypeError: If `align_corners` and `half_pixel_centers` are all ``True`` . ValueError: If `half_pixel_centers` is ``True`` and device_target is CPU. ValueError: If dim of `x` is not 4. ValueError: If `size` is Tensor and its dim is not 1. ValueError: If `size` contains other than 2 elements. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> from mindspore import Tensor, ops >>> x = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32) >>> output = ops.ResizeBilinearV2()(x, (5, 5)) >>> print(output) [[[[1. 2. 3. 4. 5.] [1. 2. 3. 4. 5.] [1. 2. 3. 4. 5.] [1. 2. 3. 4. 5.] [1. 2. 3. 4. 5.]]]] """ @prim_arg_register def __init__(self, align_corners=False, half_pixel_centers=False): self._set_prim_arg("align_corners", align_corners) self._set_prim_arg("half_pixel_centers", half_pixel_centers) def __call__(self, image, size): return super().__call__(image, size, self.align_corners, self.half_pixel_centers)
class ResizeLinear1DGrad(Primitive): r""" Compute gradient of `ResizeLinear1D` operator. .. warning:: This is an experimental API that is subject to change. Args: grads (Tensor): A Tensor of type float. 3-D with shape [batch, channel, width]. x (Tensor): A origin input Tensor. 3-D with shape [batch, channel, orig_width], The origin tensor that was resized. coordinate_transformation_mode (string): Default is 'align_corners'. Describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'. """ @prim_arg_register def __init__(self, coordinate_transformation_mode='align_corners'): self._set_prim_arg_with_handler("coordinate_transformation_mode", coordinate_transformation_mode, str_to_enum) def __call__(self, grads, x): return super().__call__(grads, x, self.coordinate_transformation_mode) class ResizeLinear1D(Primitive): r""" Using the linear interpolate method resize the input tensor 'x'. For general resize, refer to :func:`mindspore.ops.interpolate` for more details. .. warning:: - This is an experimental API that is subject to change. - Currently, the Ascend platform only supports scenarios where the input `size` is Tuple or List. Args: coordinate_transformation_mode (str): Default is ``'align_corners'`` . Describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'. Inputs: - **x** (Tensor) - A 3-D tensor which to resize, with shape [batch, channel, width]. Must be one of the following types: float16, float32, float64. - **size** (Union[Tuple[int], List[int], Tensor[int]]) - describes the new width of `x` . A tuple or list or 1-D tensor with only one int element :math:`(new\_width)`. Outputs: A 3-D tensor which shape is [batch, channel, new_width] with the same type as `x`. Raises: TypeError: If dtype of `x` is not in the support list. TypeError: If `size` is not in Union[Tuple[int], List[int], Tensor[int]]. TypeError: If `coordinate_transformation_mode` is not a string. TypeError: If `coordinate_transformation_mode` is not in the support list. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> x = Tensor([[[1, 2, 3], [4, 5, 6]]], mindspore.float32) >>> size = (6,) >>> resize_linear_1d = ops.ResizeLinear1D(coordinate_transformation_mode="align_corners") >>> output = resize_linear_1d(x, size) >>> print(output) [[[1. 1.4 1.8 2.2 2.6 3.] [4. 4.4 4.8 5.2 5.6 6.]]] """ @prim_arg_register def __init__(self, coordinate_transformation_mode='align_corners'): self._set_prim_arg_with_handler("coordinate_transformation_mode", coordinate_transformation_mode, str_to_enum) def __call__(self, x, size): return super().__call__(x, size, self.coordinate_transformation_mode) class ResizeNearestNeighborGrad(Primitive): r""" Compute gradient of `ResizeNearestNeighbor` operator. Note: The shape of input parameter `size` must be (height, width). Inputs: - **align_corners** (bool) - Whether the centers of the 4 corner pixels of the input and output tensors are aligned. Default: ``False``. - **half_pixel_centers** (bool, optional) - Whether half pixel center. If set to ``True``, `align_corners` should be False. Default: ``False``. """ @prim_arg_register def __init__(self, align_corners=False, half_pixel_centers=False): self._set_prim_arg("align_corners", align_corners) self._set_prim_arg("half_pixel_centers", half_pixel_centers) def __call__(self, grads, size): return super().__call__(grads, size, self.align_corners, self.half_pixel_centers)
[docs]class ResizeNearestNeighbor(Primitive): r""" Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest neighbor algorithm selects the value of the nearest point and does not consider the values of neighboring points at all, yielding a piecewise-constant interpolant. Args: size (Union[tuple, list]): The target size. The dimension of size must be 2. align_corners (bool): Whether the centers of the 4 corner pixels of the input and output tensors are aligned. Default: ``False`` . half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` , `align_corners` should be False. Default: ``False`` . Inputs: - **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`. Outputs: Tensor, the shape of the output tensor is :math:`(N, C, NEW\_H, NEW\_W)`. The data type is the same as the `input_x`. Raises: TypeError: If `size` is neither tuple nor list. TypeError: If `align_corners` is not a bool. ValueError: If length of `size` is not equal to 2. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> import mindspore >>> from mindspore import Tensor, ops >>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32) >>> size = (2, 2) >>> output = ops.ResizeNearestNeighbor(size=size)(input_tensor) >>> print(output) [[[[-0.1 0.3] [ 0.4 0.5]]]] """ @prim_arg_register def __init__(self, size, align_corners=False, half_pixel_centers=False): self._set_prim_arg("size", type_it('ResizeNearestNeighbor', 'size', size, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT)) self._set_prim_arg("align_corners", align_corners) self._set_prim_arg("half_pixel_centers", half_pixel_centers) def __call__(self, input_x): return super().__call__(input_x, self.size, self.align_corners, self.half_pixel_centers)
class ResizeNearestNeighborV2Grad(Primitive): r""" Compute gradient of `ResizeNearestNeighborV2` operator. Args: grads (Tensor): A 4-D Tensor with shape [batch, channel, height, width]. size (Union[tuple[int], Tensor]): The size for the input image. 2 elements: [`height, width`]. align_corners (bool): Whether the centers of the 4 corner pixels of the input and output tensors are aligned. Default: ``False``. half_pixel_centers (bool): Default: ``False``. Outputs: A 4-D Tensor , with the same shape and data type as `image`. """ @prim_arg_register def __init__(self, align_corners=False, half_pixel_centers=False): self._set_prim_arg("align_corners", align_corners) self._set_prim_arg("half_pixel_centers", half_pixel_centers) def __call__(self, grads, size): return super().__call__(grads, size, self.align_corners, self.half_pixel_centers) class ResizeNearestNeighborV2(Primitive): r""" Resizes the input tensor to specific size by using the nearest neighbor algorithm. The nearest neighbor algorithm selects the value of the nearest point and does not consider the values of neighboring points at all, yielding a piecewise-constant interpolant. Args: align_corners (bool, optional): If ``True`` , the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Default: ``False`` . half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` , `align_corners` should be False. Default: ``False`` . Inputs: - **x** (Tensor) - 4-D with shape :math:`(batch, channels, height, width)` . - **size** (Tensor) - The new size for the images. A 1-D int32 Tensor of 2 elements: [`new_height, new_width`]. Outputs: - **y** (Tensor) - The resized images. A 4-D with shape :math:`(batch, channels, new\_height, new\_width)`. It has the same dtype as `x`. Raises: TypeError: If `x` or `size` is not a Tensor. TypeError: If the data type of `size` is not int32. TypeError: If `align_corners` or `half_pixel_centers` is not bool. ValueError: If any value of `size` is non positive. ValueError: If the dimension of `x` is not 4. ValueError: If the dimension of `size` is not 1. ValueError: If the elements number of `size` is not 2. ValueError: If attr `half_pixel_centers` and `align_corners` are True at the same time. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> from mindspore import dtype as mstype >>> input_tensor = Tensor(np.ones((1, 1, 4, 4)), mstype.float32) >>> size = Tensor([2, 2], mstype.int32) >>> resize = ops.ResizeNearestNeighborV2() >>> output = resize(input_tensor, size) >>> print(output) [[[[1. 1.] [1. 1.]]]] >>> print(output.shape) (1, 1, 2, 2) """ @prim_arg_register def __init__(self, align_corners=False, half_pixel_centers=False): self._set_prim_arg("align_corners", align_corners) self._set_prim_arg("half_pixel_centers", half_pixel_centers) def __call__(self, image, size): return super().__call__(image, size, self.align_corners, self.half_pixel_centers)
[docs]class ReverseV2(Primitive): r""" .. code-block:: prim = ops.ReverseV2(axis) out = prim(input) is equivalent to .. code-block:: ops.flip(input, axis) Refer to :func:`mindspore.ops.flip` for more details. """ @prim_arg_register def __init__(self, axis): self._set_prim_arg("axis", type_it('ReverseV2', 'axis', axis, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT)) def __call__(self, input): return _convert_stub(pyboost_reverse_v2(self, [input, self.axis]))
class RFFT2(Primitive): r""" .. code-block:: prim = ops.RFFT2() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.rfft2(input, s, dim, norm) Refer to :func:`mindspore.ops.rfft2` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=(-2, -1)), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=(-2, -1), norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('RFFT2', 'norm', norm)) rfft2_op=RFFT2() class RFFT(Primitive): r""" .. code-block:: prim = ops.RFFT() out = prim(input, n, dim, norm) is equivalent to .. code-block:: ops.rfft(input, n, dim, norm) Refer to :func:`mindspore.ops.rfft` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('n', default=None), sig.make_sig('dim', default=-1), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, n=None, dim=-1, norm=None): return super().__call__(input, n, dim, norm if norm is None else str_to_enum('RFFT', 'norm', norm)) rfft_op=RFFT() class RFFTFreq(Primitive): r""" .. code-block:: prim = ops.RFFTFreq() out = prim(n, d, dtype) is equivalent to .. code-block:: ops.rfftfreq(n, d, dtype) Refer to :func:`mindspore.ops.rfftfreq` for more details. """ __mindspore_signature__ = ( sig.make_sig('n'), sig.make_sig('d', default=1.0), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, n, d=1.0, dtype=None): return super().__call__(n, d, dtype if dtype is None else dtype_to_type_id('RFFTFreq', 'dtype', dtype)) rfftfreq_op=RFFTFreq() class RFFTN(Primitive): r""" .. code-block:: prim = ops.RFFTN() out = prim(input, s, dim, norm) is equivalent to .. code-block:: ops.rfftn(input, s, dim, norm) Refer to :func:`mindspore.ops.rfftn` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('s', default=None), sig.make_sig('dim', default=None), sig.make_sig('norm', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, s=None, dim=None, norm=None): return super().__call__(input, s, dim, norm if norm is None else str_to_enum('RFFTN', 'norm', norm)) rfftn_op=RFFTN()
[docs]class RightShift(Primitive): r""" Shift the value of each position of Tensor `input_x` to the right by corresponding bits in Tensor `input_y`. The inputs are two tensors, dtypes of them must be consistent, and the shapes of them could be broadcast. .. math:: \begin{aligned} &out_{i} =x_{i} >> y_{i} \end{aligned} .. warning:: This is an experimental API that is subject to change or deletion. Inputs: - **input_x** (Tensor) - The target tensor, will be shifted to the right by `input_y` bits element-wise. Support all int and uint types. - **input_y** (Tensor) - Number of bits shifted, the tensor must have the same type as `input_x`. Outputs: - **output** (Tensor) - The output tensor, has the same type as `input_x`. Raises: TypeError: If `input_x` or `input_y` is not tensor. TypeError: If `input_x` and `input_y` could not be broadcast. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.uint8)) >>> input_y = Tensor(np.array([1, 1, 1]).astype(np.uint8)) >>> output = ops.RightShift()(input_x, input_y) >>> print(output) [0 1 1] """ @prim_arg_register def __init__(self): pass def __call__(self, input_x, input_y): return super().__call__(input_x, input_y)
right_shift_op=RightShift() class RmsNormGrad(Primitive): r""" Calculates the gradient of RmsNorm operation. .. warning:: This is an experimental API that is subject to change or deletion. Inputs: - **dy** (Tensor) - The grad of previous operator, support data type: float16, float32, bfloat16. - **x** (Tensor) - Input data of RmsNorm, support data type: float16, float32, bfloat16. - **rstd** (Tensor) - The second output of RmsNorm, support data type: float16, float32, bfloat16. - **gamma** (Tensor) - Support data type: float16, float32, bfloat16. Returns: - **dx** (Tensor) - Has the same type and shape as `dy`. - **dgamma** (Tensor) - A float32 Tensor with the same shape as `gamma`. Supported Platforms: ``Ascend`` """ @prim_arg_register def __init__(self): pass def __call__(self, dy, x, rstd, gamma): return _convert_stub(pyboost_rms_norm_grad(self, [dy, x, rstd, gamma])) rms_norm_grad_op=RmsNormGrad() class RmsNorm(Primitive): r""" .. code-block:: prim = ops.RmsNorm(epsilon) out = prim(x, gamma) is equivalent to .. code-block:: ops.rms_norm(x, gamma, epsilon) Refer to :func:`mindspore.ops.rms_norm` for more details. """ @prim_arg_register def __init__(self, epsilon=1e-6): self._set_prim_arg("epsilon", epsilon) def __call__(self, x, gamma): return _convert_stub(pyboost_rms_norm(self, [x, gamma, self.epsilon])) class Roll(Primitive): r""" Rolls the elements of a tensor along an axis. Refer to :func:`mindspore.ops.roll` for more details. Args: shift (Union[list(int), tuple(int), int]): Specifies the number of places by which elements are shifted positively (towards larger indices) along the specified dimension. Negative shifts will roll the elements in the opposite direction. axis (Union[list(int), tuple(int), int]): Specifies the dimension indexes of shape to be rolled. Default: ``None`` . Inputs: - **input** (Tensor) - Input tensor. Outputs: Tensor, has the same shape and type as `input`. Supported Platforms: ``Ascend`` ``GPU`` Examples: >>> input = Tensor(np.array([0, 1, 2, 3, 4]).astype(np.float32)) >>> op = ops.Roll(shift=2, axis=0) >>> output = op(input) >>> print(output) [3. 4. 0. 1. 2.] >>> input = Tensor(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]).astype(np.float32)) >>> op = ops.Roll(shift=-1, axis=0) >>> output = op(input) >>> print(output) [[5. 6. 7. 8. 9.] [0. 1. 2. 3. 4.]] """ @prim_arg_register def __init__(self, shift, axis=None): self._set_prim_arg("shift", type_it('Roll', 'shift', shift, (OpDtype.DT_INT, OpDtype.DT_LIST_INT), OpDtype.DT_TUPLE_INT)) self._set_prim_arg("axis", type_it('Roll', 'axis', axis, (OpDtype.DT_INT, OpDtype.DT_LIST_INT), OpDtype.DT_TUPLE_INT)) def __call__(self, input): return _convert_stub(pyboost_roll(self, [input, self.shift, self.axis])) class RotaryPositionEmbeddingGrad(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('cos'), sig.make_sig('sin'), sig.make_sig('dx', default=None), sig.make_sig('mode', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, dy, cos, sin, dx=None, mode=0): return _convert_stub(pyboost_rotary_position_embedding_grad(self, [dy, cos, sin, dx, mode])) rotary_position_embedding_grad_op=RotaryPositionEmbeddingGrad() class RotaryPositionEmbedding(Primitive): r""" .. code-block:: prim = ops.RotaryPositionEmbedding() out = prim(x, cos, sin, mode) is equivalent to .. code-block:: ops.rotary_position_embedding(x, cos, sin, mode) Refer to :func:`mindspore.ops.rotary_position_embedding` for more details. """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('cos'), sig.make_sig('sin'), sig.make_sig('mode', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, x, cos, sin, mode=0): return _convert_stub(pyboost_rotary_position_embedding(self, [x, cos, sin, mode])) rotary_position_embedding_op=RotaryPositionEmbedding() class RotatedIou(Primitive): r""" Calculate the overlap area between rotated rectangles. .. note:: The input data types supported by the Ascend platform include bfloat16, float16, float32. Args: trans (bool): Distinguish the rectangles representations of boxes and query_boxes. If ``True``, the format of boxes and query_boxes is ``'xyxyt'``, else the format is ``'xywht'``. The default value is ``False``. mode (int): Distinguish the calculation mode. If the value is ``1``, the calculation mode is ``'iof'``, else the calculation mode is ``'iou'``. The default value is ``0``. is_cross (bool): If ``True``, use cross-calculation, else use one-to-one calculation. The default value is ``True``. v_threshold (float): Provide condition relaxation for intersection calculation. The default value is ``0.0``. e_threshold (float): Provide condition relaxation for intersection calculation. The default value is ``0.0``. Inputs: boxes (Tensor): The first set of rectangles which has a shape of :math:`(B, N, 5)`. query_boxes (Tensor): The second set of rectangles which has a shape of :math:`(B, K, 5)`. Outputs: Tensor, the shape is :math:`(B, N, K)`. Raises: TypeError: If `boxes` is not a Tensor. TypeError: If `query_boxes` is not a Tensor. ValueError: If `boxes` and `query_boxes` do not has same first dim. ValueError: If the third dimension of `boxes` or `query_boxes` is not ``5``. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> a = np.random.uniform(0,1,(2,2,5)).astype(np.float16) >>> b = np.random.uniform(0,1,(2,3,5)).astype(np.float16) >>> box1 = Tensor(a) >>> box2 = Tensor(b) >>> output = ops.rotated_iou(box1, box2, trans=False, mode=0, is_cross=True) """ __mindspore_signature__ = ( sig.make_sig('boxes'), sig.make_sig('query_boxes'), sig.make_sig('trans', default=False), sig.make_sig('mode', default=0), sig.make_sig('is_cross', default=True), sig.make_sig('v_threshold', default=0.0), sig.make_sig('e_threshold', default=0.0), ) @prim_arg_register def __init__(self): pass def __call__(self, boxes, query_boxes, trans=False, mode=0, is_cross=True, v_threshold=0.0, e_threshold=0.0): return super().__call__(boxes, query_boxes, trans, mode, is_cross, v_threshold, e_threshold) rotated_iou_op=RotatedIou()
[docs]class Round(Primitive): r""" Returns half to even of a tensor element-wise. .. math:: out_i \approx input_i .. note:: The input data types supported by the Ascend platform include bfloat16 (Atlas training series products are not supported), float16, float32, float64, int32, and int64. Inputs: - **input** (Tensor) - The input tensor. - **decimals** (int, optional) - Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. It supports converting the single-element tensor to an int. Outputs: Tensor, has the same shape and type as the `input`. Raises: TypeError: If `input` is not a Tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32) >>> round = ops.Round() >>> output = round(input) >>> print(output) [ 1. 2. 2. 2. -4.] """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('decimals', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, input, decimals=0): return _convert_stub(pyboost_round(self, [input, decimals]))
round_op=Round() class RsqrtGrad(Primitive): r""" Computes gradients for the Rsqrt. Args: y_backprop (Tensor): Input gradients tensor, has the same dtype and shape as `x`. x (Tensor): Origin input tensor. Returns: Tensor, has the same dtype and shape as `x`. """ @prim_arg_register def __init__(self): pass def __call__(self, y_backprop, x): return super().__call__(y_backprop, x) rsqrt_grad_op=RsqrtGrad()
[docs]class Rsqrt(Primitive): r""" .. code-block:: prim = ops.Rsqrt() out = prim(input) is equivalent to .. code-block:: ops.rsqrt(input) Refer to :func:`mindspore.ops.rsqrt` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_rsqrt(self, [input]))
rsqrt_op=Rsqrt() class ScalarCast(Primitive): r""" .. code-block:: prim = ops.ScalarCast() out = prim(input_x, input_y) is equivalent to .. code-block:: ops.scalar_cast(input_x, input_y) Refer to :func:`mindspore.ops.scalar_cast` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input_x, input_y): return super().__call__(input_x, dtype_to_type_id('ScalarCast', 'input_y', input_y)) scalar_cast_op=ScalarCast() class ScatterAddExt(Primitive): r""" Add all elements in `src` to the index specified by `index` to `input` along dimension specified by `dim`. It takes three inputs `input`, `src` and `index` of the same rank r >= 1. For a 3-D tensor, the operation updates input as follows: .. code-block:: input[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0 input[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1 input[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2 Inputs: - **input** (Tensor) - The target tensor. The rank must be at least 1. - **dim** (int) - Which dim to scatter. Accepted range is [-r, r) where r = rank(`input`). Default: ``0``. - **index** (Tensor) - The index of `input` to do scatter operation whose data type must be mindspore.int32 or mindspore.int64. Same rank as `input`. Except for the dimension specified by `dim`, the size of each dimension of `index` must be less than or equal to the size of the corresponding dimension of `input`. - **src** (Tensor) - The tensor doing the scatter operation with `input`, has the same type as `input` and the size of each dimension must be greater than or equal to that of `index`. Outputs: Tensor, has the same shape and type as `input`. Raises: TypeError: If `index` is neither int32 nor int64. ValueError: If anyone of the rank among `input`, `index` and `src` less than 1. ValueError: If the rank of `input`, `index` and `src` is not the same. ValueError: If, outside dimension `dim`, the size of any dimension of `index` is greater than the size of the corresponding dimension of `input` . ValueError: If the size of any dimension of `src` is less than that of `index`. Supported Platforms: ``Ascend`` Examples: >>> import numpy as np >>> import mindspore as ms >>> from mindspore import Tensor, ops >>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32) >>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32) >>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64) >>> out = ops.auto_generate.ScatterAddExt()(input=input, dim=1, index=index, src=src) >>> print(out) [[1. 2. 11. 4. 13.]] >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32) >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32) >>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64) >>> out = ops.auto_generate.ScatterAddExt()(input=input, dim=0, index=index, src=src) >>> print(out) [[1. 2. 3. 0. 0.] [0. 0. 0. 0. 0.] [4. 5. 6. 0. 0.] [0. 0. 0. 0. 0.] [7. 8. 9. 0. 0.]] >>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32) >>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32) >>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64) >>> out = ops.auto_generate.ScatterAddExt()(input=input, dim=1, index=index, src=src) >>> print(out) [[1. 0. 2. 0. 3.] [4. 0. 5. 0. 6.] [7. 0. 8. 0. 9.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.]] """ @prim_arg_register def __init__(self): pass def __call__(self, input, dim, index, src): return _convert_stub(pyboost_scatter_add_ext(self, [input, dim, index, src])) scatter_add_ext_op=ScatterAddExt()
[docs]class ScatterNd(Primitive): r""" .. code-block:: prim = ops.ScatterNd() out = prim(indices, updates, shape) is equivalent to .. code-block:: ops.scatter_nd(indices, updates, shape) Refer to :func:`mindspore.ops.scatter_nd` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, indices, updates, shape): return super().__call__(indices, updates, shape)
scatter_nd_op=ScatterNd() class Scatter(Primitive): r""" reverse operation of gather """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim'), sig.make_sig('index'), sig.make_sig('src'), sig.make_sig('reduce', default='none'), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim, index, src, reduce='none'): return _convert_stub(pyboost_scatter(self, [input, dim, index, src, str_to_enum('Scatter', 'reduce', reduce)])) scatter_op=Scatter() class ScatterValue(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim'), sig.make_sig('index'), sig.make_sig('src'), sig.make_sig('reduce', default='none'), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim, index, src, reduce='none'): return _convert_stub(pyboost_scatter_value(self, [input, dim, index, src, str_to_enum('ScatterValue', 'reduce', reduce)])) scatter_value_op=ScatterValue()
[docs]class SearchSorted(Primitive): r""" Return the position indices such that after inserting the values into the `sorted_sequence`, the order of innermost dimension of the `sorted_sequence` remains unchanged. .. warning:: This is an experimental API that is subject to change or deletion. Refer to :func:`mindspore.ops.searchsorted` for more details. Args: dtype (mindspore.dtype, optional): The specified type of output tensor. Optional values are: ``mstype.int32`` and ``mstype.int64``. Default value: ``mstype.int64``. right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found; if ``False`` , return the first such index. Default: ``False`` . Inputs: - **sorted_sequence** (Tensor) - The input tensor. It must contain a monotonically increasing sequence on the innermost dimension. - **values** (Tensor) - The value that should be inserted. - **sorter** (Tensor, optional) - if provided, a tensor matching the shape of the unsorted sorted_sequence containing a sequence of indices that sort it in the ascending order on the innermost dimension and type must be int64. Default: ``None`` . CPU and GPU can only use default values Outputs: Tensor containing the indices from the innermost dimension of `sorted_sequence` such that, if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved, whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of `values`. Raises: ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of `sorted_sequence` and `values` are different. ValueError: If `sorted_sequence` value is a scalar. ValueError: If `values` is a scalar when `sorted_sequence` dimension is not 1. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> searchsorted = ops.SearchSorted() >>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32) >>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32) >>> output = searchsorted(sorted_sequence, values) >>> print(output) [[2 4 5] [1 2 4]] """ __mindspore_signature__ = ( sig.make_sig('sorted_sequence'), sig.make_sig('values'), sig.make_sig('sorter', default=None), ) @prim_arg_register def __init__(self, dtype=mstype.int64, right=False): self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id) self._set_prim_arg("right", right) def __call__(self, sorted_sequence, values, sorter=None): return _convert_stub(pyboost_searchsorted(self, [sorted_sequence, values, sorter, self.dtype, self.right]))
class SelectExt(Primitive): r""" .. code-block:: prim = ops.SelectExt() out = prim(input, dim, index) is equivalent to .. code-block:: ops.select_ext(input, dim, index) Refer to :func:`mindspore.ops.select_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, dim, index): return _convert_stub(pyboost_select_ext(self, [input, dim, index])) select_ext_op=SelectExt()
[docs]class Select(Primitive): r""" .. code-block:: prim = ops.Select() out = prim(condition, input, other) is equivalent to .. code-block:: ops.select(condition, input, other) Refer to :func:`mindspore.ops.select` for more details. """ __mindspore_signature__ = ( sig.make_sig('condition', dtype=sig.sig_dtype.T), sig.make_sig('input', dtype=sig.sig_dtype.T1), sig.make_sig('other', dtype=sig.sig_dtype.T1), ) @prim_arg_register def __init__(self): pass def __call__(self, condition, input, other): return _convert_stub(pyboost_select(self, [condition, input, other]))
select_op=Select() class SelectV2(Primitive): r""" .. code-block:: prim = ops.SelectV2() out = prim(condition, input, other) is equivalent to .. code-block:: ops.select_v2(condition, input, other) Refer to :func:`mindspore.ops.select_v2` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, condition, input, other): return _convert_stub(pyboost_select_v2(self, [condition, input, other])) select_v2_op=SelectV2() class SeLUExt(Primitive): r""" .. code-block:: prim = ops.SeLUExt() out = prim(input) is equivalent to .. code-block:: ops.selu_ext(input) Refer to :func:`mindspore.ops.selu_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_selu_ext(self, [input])) selu_ext_op=SeLUExt() class SeluGrad(Primitive): r""" .. code-block:: prim = ops.SeluGrad() out = prim(gradient, result) is equivalent to .. code-block:: ops.selu_grad(gradient, result) Refer to :func:`mindspore.ops.selu_grad` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, gradient, result): return _convert_stub(pyboost_selu_grad(self, [gradient, result])) selu_grad_op=SeluGrad() class SequenceConcat(Primitive): r""" .. code-block:: prim = ops.SequenceConcat(axis) out = prim(x) is equivalent to .. code-block:: ops.sequence_concat(x, axis) Refer to :func:`mindspore.ops.sequence_concat` for more details. """ @prim_arg_register def __init__(self, axis=0): self._set_prim_arg("axis", axis) def __call__(self, x): return super().__call__(x, self.axis) class AShardIdentity(Primitive): r""" .. code-block:: prim = ops.AShardIdentity() out = prim(input) is equivalent to .. code-block:: ops.shard_identity(input) Refer to :func:`mindspore.ops.shard_identity` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input) shard_identity_op=AShardIdentity() class SigmoidGrad(Primitive): r""" Gets the gradient of Sigmoid operation. """ @prim_arg_register def __init__(self): pass def __call__(self, y, dy): return _convert_stub(pyboost_sigmoid_grad(self, [y, dy])) sigmoid_grad_op=SigmoidGrad()
[docs]class Sigmoid(Primitive): r""" .. code-block:: prim = ops.Sigmoid() out = prim(input) is equivalent to .. code-block:: ops.sigmoid(input) Refer to :func:`mindspore.ops.sigmoid` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_sigmoid(self, [input]))
sigmoid_op=Sigmoid()
[docs]class Sign(Primitive): r""" .. code-block:: prim = ops.Sign() out = prim(input) is equivalent to .. code-block:: ops.sign(input) Refer to :func:`mindspore.ops.sign` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_sign(self, [input]))
sign_op=Sign() class SilentCheckV2(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('val'), sig.make_sig('input_grad'), sig.make_sig('sfda'), sig.make_sig('step'), sig.make_sig('c_min_steps', default=7), sig.make_sig('c_thresh_l1', default=1000000.0), sig.make_sig('c_coeff_l1', default=100000.0), sig.make_sig('c_thresh_l2', default=10000.0), sig.make_sig('c_coeff_l2', default=5000.0), sig.make_sig('npu_asd_detect', default=1), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, val, input_grad, sfda, step, c_min_steps=7, c_thresh_l1=1000000.0, c_coeff_l1=100000.0, c_thresh_l2=10000.0, c_coeff_l2=5000.0, npu_asd_detect=1): return _convert_stub(pyboost_silent_check_v2(self, [val, input_grad, sfda, step, c_min_steps, c_thresh_l1, c_coeff_l1, c_thresh_l2, c_coeff_l2, npu_asd_detect])) silent_check_v2_op=SilentCheckV2() class SiLUGrad(Primitive): r""" Performs grad of SiLU operation. """ @prim_arg_register def __init__(self): pass def __call__(self, dout, x): return _convert_stub(pyboost_silu_grad(self, [dout, x])) silu_grad_op=SiLUGrad() class SiLU(Primitive): r""" .. code-block:: prim = ops.SiLU() out = prim(input) is equivalent to .. code-block:: ops.silu(input) Refer to :func:`mindspore.ops.silu` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_silu(self, [input])) silu_op=SiLU()
[docs]class Sin(Primitive): r""" .. code-block:: prim = ops.Sin() out = prim(input) is equivalent to .. code-block:: ops.sin(input) Refer to :func:`mindspore.ops.sin` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_sin(self, [input]))
sin_op=Sin()
[docs]class Sinc(Primitive): r""" .. code-block:: prim = ops.Sinc() out = prim(input) is equivalent to .. code-block:: ops.sinc(input) Refer to :func:`mindspore.ops.sinc` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_sinc(self, [input]))
sinc_op=Sinc()
[docs]class Sinh(Primitive): r""" .. code-block:: prim = ops.Sinh() out = prim(input) is equivalent to .. code-block:: ops.sinh(input) Refer to :func:`mindspore.ops.sinh` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_sinh(self, [input]))
sinh_op=Sinh() class SliceExt(Primitive): r""" Returns a sliced tensor from input tensor, and the dimension axis is input from start to end by step. Args: input (Tensor): the tensor to slice. dim (int): dimension along which to slice. start (int): the starting dimension. end (int): the ending dimension. step (int): the slice step size Returns: Tensor. Raises: ValueError: If dim is out of range [-input.ndim, input.ndim). ValueError: If start is out of range [-input.shape[dim], input.shape[dim]]. ValueError: It end is out of range [start, input.shape[dim]]. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> from mindspore import ops >>> from mindspore import Tensor >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32) >>> output = ops.SliceExt()(x, 0, 0, 2, 1) >>> print(output) [[ 1 2 3] [ 4 5 6]] >>> output = ops.SliceExt()(x, 1, 1, 3, 1) >>> print(output) [[ 2 3] [ 5 6] [ 8 9]] """ @prim_arg_register def __init__(self): pass def __call__(self, input, dim, start, end, step): return _convert_stub(pyboost_slice_ext(self, [input, dim, start, end, step])) slice_ext_op=SliceExt() class SmoothL1LossGrad(Primitive): r""" Computes gradient for prediction on SmoothL1Loss. """ @prim_arg_register def __init__(self, beta=1.0, reduction='none'): self._set_prim_arg("beta", type_it('SmoothL1LossGrad', 'beta', beta, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT)) self._set_prim_arg_with_handler("reduction", reduction, str_to_enum) def __call__(self, prediction, target, dout): return _convert_stub(pyboost_smooth_l1_loss_grad(self, [prediction, target, dout, self.beta, self.reduction]))
[docs]class SmoothL1Loss(Primitive): r""" """ @prim_arg_register def __init__(self, beta=1.0, reduction='none'): self._set_prim_arg("beta", type_it('SmoothL1Loss', 'beta', beta, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT)) self._set_prim_arg_with_handler("reduction", reduction, str_to_enum) def __call__(self, prediction, target): return _convert_stub(pyboost_smooth_l1_loss(self, [prediction, target, self.beta, self.reduction]))
class SoftmaxBackward(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('dout'), sig.make_sig('out'), sig.make_sig('dim', default=-1), ) @prim_arg_register def __init__(self): pass def __call__(self, dout, out, dim=-1): return _convert_stub(pyboost_softmax_backward(self, [dout, out, dim])) softmax_backward_op=SoftmaxBackward()
[docs]class Softmax(Primitive): r""" Applies the Softmax operation to the input tensor on the specified axis. Refer to :func:`mindspore.ops.softmax` for more details. Args: axis (Union[int, tuple], optional): The axis to perform the Softmax operation. Default: ``-1`` . Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of additional dimensions. Outputs: Tensor, with the same type and shape as the input. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32) >>> softmax = ops.Softmax() >>> output = softmax(input) >>> print(output) [0.01165623 0.03168492 0.08612854 0.23412167 0.6364086 ] """ @prim_arg_register def __init__(self, axis=-1): self._set_prim_arg("axis", type_it('Softmax', 'axis', axis, OpDtype.DT_INT, OpDtype.DT_TUPLE_INT)) def __call__(self, input): return _convert_stub(pyboost_softmax(self, [input, self.axis]))
class SoftplusExt(Primitive): r""" .. code-block:: prim = ops.SoftplusExt() out = prim(input, beta, threshold) is equivalent to .. code-block:: ops.softplus_ext(input, beta, threshold) Refer to :func:`mindspore.ops.softplus_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('beta', default=1), sig.make_sig('threshold', default=20), ) @prim_arg_register def __init__(self): pass def __call__(self, input, beta=1, threshold=20): return _convert_stub(pyboost_softplus_ext(self, [input, beta, threshold])) softplus_ext_op=SoftplusExt() class SoftplusGradExt(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('dout'), sig.make_sig('x'), sig.make_sig('beta', default=1), sig.make_sig('threshold', default=20), ) @prim_arg_register def __init__(self): pass def __call__(self, dout, x, beta=1, threshold=20): return _convert_stub(pyboost_softplus_grad_ext(self, [dout, x, beta, threshold])) softplus_grad_ext_op=SoftplusGradExt() class SoftShrinkGrad(Primitive): r""" .. code-block:: prim = ops.SoftShrinkGrad(lambd) out = prim(input_grad, input_x) is equivalent to .. code-block:: ops.softshrink_grad(input_grad, input_x, lambd) Refer to :func:`mindspore.ops.softshrink_grad` for more details. """ @prim_arg_register def __init__(self, lambd=0.5): self._set_prim_arg("lambd", type_it('SoftShrinkGrad', 'lambd', lambd, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT)) def __call__(self, input_grad, input_x): return _convert_stub(pyboost_softshrink_grad(self, [input_grad, input_x, self.lambd]))
[docs]class SoftShrink(Primitive): r""" .. code-block:: prim = ops.SoftShrink(lambd) out = prim(input) is equivalent to .. code-block:: ops.softshrink(input, lambd) Refer to :func:`mindspore.ops.softshrink` for more details. """ @prim_arg_register def __init__(self, lambd=0.5): self._set_prim_arg("lambd", type_it('SoftShrink', 'lambd', lambd, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT)) def __call__(self, input): return _convert_stub(pyboost_softshrink(self, [input, self.lambd]))
class SolveTriangular(Primitive): r""" .. code-block:: prim = ops.SolveTriangular() out = prim(a, b, trans, lower, unit_diagonal) is equivalent to .. code-block:: ops.solve_triangular(a, b, trans, lower, unit_diagonal) Refer to :func:`mindspore.ops.solve_triangular` for more details. """ __mindspore_signature__ = ( sig.make_sig('a'), sig.make_sig('b'), sig.make_sig('trans', default=0), sig.make_sig('lower', default=False), sig.make_sig('unit_diagonal', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, a, b, trans=0, lower=False, unit_diagonal=False): return super().__call__(a, b, trans, lower, unit_diagonal) solve_triangular_op=SolveTriangular() class SortExt(Primitive): r""" .. code-block:: prim = ops.SortExt() out = prim(input, dim, descending, stable) is equivalent to .. code-block:: ops.sort_ext(input, dim, descending, stable) Refer to :func:`mindspore.ops.sort_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=-1), sig.make_sig('descending', default=False), sig.make_sig('stable', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=-1, descending=False, stable=False): return _convert_stub(pyboost_sort_ext(self, [input, dim, descending, stable])) sort_ext_op=SortExt()
[docs]class Split(Primitive): r""" Splits the input tensor into output_num of tensors along the given axis and output numbers. Refer to :func:`mindspore.ops.split` for more details. Args: axis (int): Index of the split position. Default: ``0`` . output_num (int): The number of output tensors. Must be positive int. Default: ``1`` . Inputs: - **input_x** (Tensor) - The shape of tensor is :math:`(x_0, x_1, ..., x_{R-1})`, R >= 1. Outputs: tuple[Tensor], the shape of each output tensor is the same, which is :math:`(x_0, x_1, ..., x_{axis}/{output\_num}, ..., x_{R-1})`. And the data type is the same as `input_x`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> split = ops.Split(1, 2) >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32) >>> print(x) [[1 1 1 1] [2 2 2 2]] >>> output = split(x) >>> print(output) (Tensor(shape=[2, 2], dtype=Int32, value= [[1, 1], [2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value= [[1, 1], [2, 2]])) >>> split = ops.Split(1, 4) >>> output = split(x) >>> print(output) (Tensor(shape=[2, 1], dtype=Int32, value= [[1], [2]]), Tensor(shape=[2, 1], dtype=Int32, value= [[1], [2]]), Tensor(shape=[2, 1], dtype=Int32, value= [[1], [2]]), Tensor(shape=[2, 1], dtype=Int32, value= [[1], [2]])) """ @prim_arg_register def __init__(self, axis=0, output_num=1): self._set_prim_arg("axis", axis) self._set_prim_arg("output_num", output_num) def __call__(self, input_x): return super().__call__(input_x, self.axis, self.output_num)
class SplitTensor(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input_x'), sig.make_sig('split_int'), sig.make_sig('axis', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, input_x, split_int, axis=0): return _convert_stub(pyboost_split_tensor(self, [input_x, split_int, axis])) split_tensor_op=SplitTensor() class SplitWithSize(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input_x'), sig.make_sig('split_sections'), sig.make_sig('axis', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, input_x, split_sections, axis=0): return _convert_stub(pyboost_split_with_size(self, [input_x, split_sections, axis])) split_with_size_op=SplitWithSize() class SqrtGrad(Primitive): r""" Performs grad of Sqrt operation. """ @prim_arg_register def __init__(self): pass def __call__(self, dy, y): return super().__call__(dy, y) sqrt_grad_op=SqrtGrad()
[docs]class Sqrt(Primitive): r""" .. code-block:: prim = ops.Sqrt() out = prim(x) is equivalent to .. code-block:: ops.sqrt(x) Refer to :func:`mindspore.ops.sqrt` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, x): return _convert_stub(pyboost_sqrt(self, [x]))
sqrt_op=Sqrt()
[docs]class Square(Primitive): r""" .. code-block:: prim = ops.Square() out = prim(input) is equivalent to .. code-block:: ops.square(input) Refer to :func:`mindspore.ops.square` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_square(self, [input]))
square_op=Square() class StackExt(Primitive): r""" .. code-block:: prim = ops.StackExt(dim) out = prim(tensors) is equivalent to .. code-block:: ops.stack_ext(tensors, dim) Refer to :func:`mindspore.ops.stack_ext` for more details. """ @prim_arg_register def __init__(self, dim=0): self._set_prim_arg("dim", dim) def __call__(self, tensors): return _convert_stub(pyboost_stack_ext(self, [tensors, self.dim]))
[docs]class StridedSlice(Primitive): r""" .. code-block:: prim = ops.StridedSlice(begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask) out = prim(input_x, begin, end, strides) is equivalent to .. code-block:: ops.strided_slice(input_x, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask) Refer to :func:`mindspore.ops.strided_slice` for more details. """ @prim_arg_register def __init__(self, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0): self._set_prim_arg("begin_mask", begin_mask) self._set_prim_arg("end_mask", end_mask) self._set_prim_arg("ellipsis_mask", ellipsis_mask) self._set_prim_arg("new_axis_mask", new_axis_mask) self._set_prim_arg("shrink_axis_mask", shrink_axis_mask) def __call__(self, input_x, begin, end, strides): return super().__call__(input_x, begin, end, strides, self.begin_mask, self.end_mask, self.ellipsis_mask, self.new_axis_mask, self.shrink_axis_mask)
class SubExt(Primitive): r""" .. code-block:: prim = ops.SubExt() out = prim(input, other, alpha) is equivalent to .. code-block:: ops.sub_ext(input, other, alpha) Refer to :func:`mindspore.ops.sub_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input', dtype=sig.sig_dtype.T), sig.make_sig('other', dtype=sig.sig_dtype.T), sig.make_sig('alpha', dtype=sig.sig_dtype.T1, default=1), ) @prim_arg_register def __init__(self): pass def __call__(self, input, other, alpha=1): return _convert_stub(pyboost_sub_ext(self, [input, other, alpha])) sub_ext_op=SubExt() class Sub(Primitive): r""" .. code-block:: prim = ops.Sub() out = prim(input, other) is equivalent to .. code-block:: ops.sub(input, other) Refer to :func:`mindspore.ops.sub` for more details. """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_sub(self, [input, other])) sub_op=Sub() class SumExt(Primitive): r""" Alias for :func:`mindspore.mint.transpose` . .. warning:: This is an experimental API that is subject to change or deletion. Refer to :func:`mindspore.mint.transpose` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=None), sig.make_sig('keepdim', default=False), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=None, keepdim=False, dtype=None): return _convert_stub(pyboost_sum_ext(self, [input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('SumExt', 'dtype', dtype)])) sum_ext_op=SumExt() class SwigluGrad(Primitive): r""" .. code-block:: prim = ops.SwigluGrad() out = prim(grad_output, input, dim) is equivalent to .. code-block:: ops.swiglu_grad(grad_output, input, dim) Refer to :func:`mindspore.ops.swiglu_grad` for more details. """ __mindspore_signature__ = ( sig.make_sig('grad_output'), sig.make_sig('input'), sig.make_sig('dim', default=-1), ) @prim_arg_register def __init__(self): pass def __call__(self, grad_output, input, dim=-1): return _convert_stub(pyboost_swiglu_grad(self, [grad_output, input, dim])) swiglu_grad_op=SwigluGrad() class Swiglu(Primitive): r""" .. code-block:: prim = ops.Swiglu() out = prim(input, dim) is equivalent to .. code-block:: ops.swiglu(input, dim) Refer to :func:`mindspore.ops.swiglu` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dim', default=-1), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dim=-1): return _convert_stub(pyboost_swiglu(self, [input, dim])) swiglu_op=Swiglu() class TExt(Primitive): r""" .. code-block:: prim = ops.TExt() out = prim(input) is equivalent to .. code-block:: ops.t_ext(input) Refer to :func:`mindspore.ops.t_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_t_ext(self, [input])) t_ext_op=TExt() class Tan(Primitive): r""" .. code-block:: prim = ops.Tan() out = prim(input) is equivalent to .. code-block:: ops.tan(input) Refer to :func:`mindspore.ops.tan` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_tan(self, [input])) tan_op=Tan() class TanhGrad(Primitive): r""" Computes TanhGrad of input element-wise. Returns: Tensor, has the same type as input. """ @prim_arg_register def __init__(self): pass def __call__(self, y, dy): return _convert_stub(pyboost_tanh_grad(self, [y, dy])) tanh_grad_op=TanhGrad()
[docs]class Tanh(Primitive): r""" .. code-block:: prim = ops.Tanh() out = prim(input) is equivalent to .. code-block:: ops.tanh(input) Refer to :func:`mindspore.ops.tanh` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_tanh(self, [input]))
tanh_op=Tanh() class TensorCopySlices(Primitive): r""" Copy continues memory. Inputs: - **x** (Tensor) - The target Tensor. - **value** (Tensor) - The tensor to update x. - **begin** (tuple[int]) - A tuple which represents the location where to start. Only constant value is allowed. - **end** (tuple[int]) - A tuple or which represents the maximum location where to end. Only constant value is allowed. - **strides** (tuple[int]) - A tuple which represents the stride is continuously added before reaching the maximum location. Only constant value is allowed. Outputs: - **y** (Tensor), has the same shape and data type of x. Examples: >>> import numpy as np >>> from mindspore.ops.operations import _inner_ops >>> copy_slices = _inner_ops.TensorCopySlices() >>> out = copy_slices(Tensor(np.zeros((5, 5))), Tensor(np.ones((2, 5))), (3, 0), (5, 5), (1, 1)) >>> print(out) [[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]] Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` """ @prim_arg_register def __init__(self): pass def __call__(self, x, value, begin, end, strides): return super().__call__(x, value, begin, end, strides) tensor_copy_slices_op=TensorCopySlices() class TensorScatterElements(Primitive): r""" .. code-block:: prim = ops.TensorScatterElements(axis, reduce) out = prim(data, indices, updates) is equivalent to .. code-block:: ops.tensor_scatter_elements(data, indices, updates, axis, reduce) Refer to :func:`mindspore.ops.tensor_scatter_elements` for more details. """ @prim_arg_register def __init__(self, axis=0, reduce='none'): self._set_prim_arg("axis", type_it('TensorScatterElements', 'axis', axis, OpDtype.DT_TENSOR, OpDtype.DT_INT)) self._set_prim_arg_with_handler("reduce", reduce, str_to_enum) def __call__(self, data, indices, updates): return super().__call__(data, indices, updates, self.axis, self.reduce)
[docs]class TensorShape(Primitive): r""" Returns the shape of the input tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) >>> output = ops.TensorShape()(input_x) >>> print(output) [3 2 1] """ @prim_arg_register def __init__(self): pass def __call__(self, input_x): return super().__call__(input_x)
tensor_shape_op=TensorShape() class TopkExt(Primitive): r""" .. code-block:: prim = ops.TopkExt() out = prim(input, k, dim, largest, sorted) is equivalent to .. code-block:: ops.topk_ext(input, k, dim, largest, sorted) Refer to :func:`mindspore.ops.topk_ext` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('k'), sig.make_sig('dim', default=-1), sig.make_sig('largest', default=True), sig.make_sig('sorted', default=True), ) @prim_arg_register def __init__(self): pass def __call__(self, input, k, dim=-1, largest=True, sorted=True): return _convert_stub(pyboost_topk_ext(self, [input, k, dim, largest, sorted])) topk_ext_op=TopkExt() class TopKRouter(Primitive): r""" .. code-block:: prim = ops.TopKRouter() out = prim(input, capacity, expert_num, drop_type) is equivalent to .. code-block:: ops.topkrouter(input, capacity, expert_num, drop_type) Refer to :func:`mindspore.ops.topkrouter` for more details. """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('capacity'), sig.make_sig('expert_num'), sig.make_sig('drop_type', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, input, capacity, expert_num, drop_type=0): return super().__call__(input, capacity, expert_num, drop_type) topkrouter_op=TopKRouter() class TraceExt(Primitive): r""" .. code-block:: prim = ops.TraceExt() out = prim(input) is equivalent to .. code-block:: ops.trace_ext(input) Refer to :func:`mindspore.ops.trace_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_trace_ext(self, [input])) trace_ext_op=TraceExt()
[docs]class Trace(Primitive): r""" .. code-block:: prim = ops.Trace() out = prim(input) is equivalent to .. code-block:: ops.trace(input) Refer to :func:`mindspore.ops.trace` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return super().__call__(input)
trace_op=Trace() class TraceV2Grad(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('dout'), sig.make_sig('shape'), sig.make_sig('offset', default=0), sig.make_sig('axis1', default=1), sig.make_sig('axis2', default=0), ) @prim_arg_register def __init__(self): pass def __call__(self, dout, shape, offset=0, axis1=1, axis2=0): return super().__call__(dout, shape, offset, axis1, axis2) tracev2_grad_op=TraceV2Grad() class TraceV2(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('offset', default=0), sig.make_sig('axis1', default=1), sig.make_sig('axis2', default=0), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, offset=0, axis1=1, axis2=0, dtype=None): return super().__call__(input, offset, axis1, axis2, dtype if dtype is None else dtype_to_type_id('TraceV2', 'dtype', dtype)) trace_v2_op=TraceV2() class TransposeExt(Primitive): r""" .. code-block:: prim = ops.TransposeExt() out = prim(input, dim0, dim1) is equivalent to .. code-block:: ops.transpose_ext(input, dim0, dim1) Refer to :func:`mindspore.ops.transpose_ext` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, dim0, dim1): return _convert_stub(pyboost_transpose_ext(self, [input, dim0, dim1])) transpose_ext_op=TransposeExt()
[docs]class Transpose(Primitive): r""" .. code-block:: prim = ops.Transpose() out = prim(input, input_perm) is equivalent to .. code-block:: ops.transpose(input, input_perm) Refer to :func:`mindspore.ops.transpose` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, input_perm): return _convert_stub(pyboost_transpose(self, [input, input_perm]))
transpose_op=Transpose() class TrilExt(Primitive): r""" .. code-block:: prim = ops.TrilExt(diagonal) out = prim(input) is equivalent to .. code-block:: ops.tril_ext(input, diagonal) Refer to :func:`mindspore.ops.tril_ext` for more details. """ @prim_arg_register def __init__(self, diagonal=0): self._set_prim_arg("diagonal", type_it('TrilExt', 'diagonal', diagonal, OpDtype.DT_TENSOR, OpDtype.DT_INT)) def __call__(self, input): return _convert_stub(pyboost_tril_ext(self, [input, self.diagonal]))
[docs]class Triu(Primitive): r""" .. code-block:: prim = ops.Triu(diagonal) out = prim(input) is equivalent to .. code-block:: ops.triu(input, diagonal) Refer to :func:`mindspore.ops.triu` for more details. """ @prim_arg_register def __init__(self, diagonal=0): self._set_prim_arg("diagonal", type_it('Triu', 'diagonal', diagonal, OpDtype.DT_TENSOR, OpDtype.DT_INT)) def __call__(self, input): return _convert_stub(pyboost_triu(self, [input, self.diagonal]))
[docs]class Trunc(Primitive): r""" .. code-block:: prim = ops.Trunc() out = prim(input) is equivalent to .. code-block:: ops.trunc(input) Refer to :func:`mindspore.ops.trunc` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input): return _convert_stub(pyboost_trunc(self, [input]))
trunc_op=Trunc() class TupleToTensor(Primitive): r""" .. code-block:: prim = ops.TupleToTensor() out = prim(input_tuple, dtype) is equivalent to .. code-block:: ops.tuple_to_tensor(input_tuple, dtype) Refer to :func:`mindspore.ops.tuple_to_tensor` for more details. """ __mindspore_signature__ = ( sig.make_sig('input_tuple'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input_tuple, dtype=None): return super().__call__(input_tuple, dtype if dtype is None else dtype_to_type_id('TupleToTensor', 'dtype', dtype)) tuple_to_tensor_op=TupleToTensor() class TypeAs(Primitive): r""" .. code-block:: prim = ops.TypeAs() out = prim(input, tensor) is equivalent to .. code-block:: ops.type_as(input, tensor) Refer to :func:`mindspore.ops.type_as` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, tensor): return _convert_stub(pyboost_type_as(self, [input, tensor])) type_as_op=TypeAs() class UniformExt(Primitive): r""" Generates random numbers according to the Uniform random number distribution. Inputs: - **tensor** (Tensor) - The tensor of random tensor to be generated. - **a** (number) - Lower bound of the random numbers. Default: 0.0. - **b** (number) - Upper bound of the random numbers. Default: 0.0. - **seed** (int) - Seed for random number generation. Default: 0. - **offset** (int) - Positional offset in the tensor to start filling with random numbers. Default: 0. Raises: TypeError: If `a` or `b` is not a float. TypeError: If `tensor` is not a Tensor. ValueError: If `a` is larger than `b`. Outputs: - **output** (Tensor) - With the same type and shape as the 'tensor'. Supported Platforms: ``Ascend`` Examples: >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore.ops.operations.random_ops import UniformExt >>> x = Tensor(np.random.randn(3,4), mstype.float64) >>> uniform = UniformExt() >>> y = uniform(x, a=1.0, b=2.0, seed=10, offset=5) >>> print(y.shape) (3, 4) """ @prim_arg_register def __init__(self): pass def __call__(self, tensor, a, b, seed, offset): return _convert_stub(pyboost_uniform_ext(self, [tensor, a, b, seed, offset])) uniform_ext_op=UniformExt() class Unique2(Primitive): r""" Returns the unique elements of input tensor. when `return_inverse=True`, also return a tensor containing the index of each value of input tensor corresponding to the output unique tensor. when `return_counts=True`, also return a tensor containing the number of occurrences for each unique value or tensor Inputs: - **input**(Tensor) - The input tensor. - **sorted**(bool) - Whether to sort the unique elements in ascending order before returning as output. Default: ``True`` . - **return_inverse**(bool) - Whether to also return the indices for where elements in the original input ended up in the returned unique list. Default: ``False`` . - **return_counts**(bool) - Whether to also return the counts for each unique element. Default: ``False`` . Returns: A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`). - **output**(Tensor) - the output list of unique scalar elements. - **inverse_indices**(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where elements in the original input map to in the output; The shape is input.shape[dim]. - **counts**(Tensor) - Return when ``return_counts`` is True. It represents the number of occurrences for each unique value or tensor. The shape is output.shape(dim). Raises: TypeError: If `input` is not a Tensor. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, nn >>> from mindspore import ops >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32) >>> unique = ops.auto_generate.Unique2() >>> output = unique(x, return_inverse=True) >>> print(output) (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1])) >>> y = output[0] >>> print(y) [1 2 5] >>> idx = output[1] >>> print(idx) [0 1 2 1] """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('sorted', default=True), sig.make_sig('return_inverse', default=False), sig.make_sig('return_counts', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, input, sorted=True, return_inverse=False, return_counts=False): return _convert_stub(pyboost_unique2(self, [input, sorted, return_inverse, return_counts])) unique2_op=Unique2() class UniqueDim(Primitive): r""" Returns the unique elements of input tensor. when `return_inverse=True`, also return a tensor containing the index of each value of input tensor corresponding to the output unique tensor. Inputs: - **input**(Tensor) - The input tensor. - **sorted**(bool) - Whether to sort the unique elements in ascending order before returning as output. - **return_inverse**(bool) - Whether to also return the indices for where elements in the original input ended up in the returned unique list. - **dim**(int) - the dimension to operate upon. Returns: A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`). - **output**(Tensor) - the output list of unique scalar elements. - **inverse_indices**(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where elements in the original input map to in the output; The shape is input.shape[dim]. - **counts**(Tensor) - Return the number of occurrences for each unique value or tensor. The shape is output.shape(dim). Raises: TypeError: If `input` is not a Tensor. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, nn >>> from mindspore import ops >>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32) >>> unique = ops.auto_generate.UniqueDim() >>> output = unique(x, sorted=True, return_inverse=True, dim=0) >>> print(output) (Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1])) >>> y = output[0] >>> print(y) [1 2 5] >>> idx = output[1] >>> print(idx) [0 1 2 1] >>> counts = output[1] >>> print(counts) [1 2 1] """ @prim_arg_register def __init__(self): pass def __call__(self, input, sorted, return_inverse, dim): return _convert_stub(pyboost_unique_dim(self, [input, sorted, return_inverse, dim])) unique_dim_op=UniqueDim()
[docs]class UnsortedSegmentSum(Primitive): r""" .. code-block:: prim = ops.UnsortedSegmentSum() out = prim(input_x, segment_ids, num_segments) is equivalent to .. code-block:: ops.unsorted_segment_sum(input_x, segment_ids, num_segments) Refer to :func:`mindspore.ops.unsorted_segment_sum` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input_x, segment_ids, num_segments): return super().__call__(input_x, segment_ids, num_segments)
unsorted_segment_sum_op=UnsortedSegmentSum() class UpsampleBicubic2DGrad(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('input_size'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), sig.make_sig('align_corners', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False): return _convert_stub(pyboost_upsample_bicubic2d_grad(self, [dy, input_size, output_size, scales, align_corners])) upsample_bicubic2d_grad_op=UpsampleBicubic2DGrad() class UpsampleBicubic2D(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), sig.make_sig('align_corners', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, x, output_size=None, scales=None, align_corners=False): return _convert_stub(pyboost_upsample_bicubic2d(self, [x, output_size, scales, align_corners])) upsample_bicubic2d_op=UpsampleBicubic2D() class UpsampleBilinear2DGrad(Primitive): r""" Upsample the 2-D gradient data with bilinear interpolation algorithm. Note: One of 'scales' and 'output_size' must be specified. And it is an error if both are specified. Inputs: - **dy** (Tensor) - Tensor of shape [N, C, H, W]. Must be one of the following types: float16, float32, float64. - **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 4 elements: [batch, channels, height, width]. Must: input_size[0] == dy.shape[0] input_size[1] == dy.shape[1]. - **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``. It contains 2 elements: height, width, whose elements should be the same as `dy`. Must: dy.shape[2] == output_size[0], dy.shape[3] == output_size[1]. - **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``. The scale array along each dimension, contain 2 elements: scale_height, scale_width. Must: dy.shape[2] == floor(input_size[2] * scales[0], dy.shape[3] == floor(input_size[3] * scales[1]. - **align_corners** (bool): An optional bool. Default: ``False``. Outputs: - **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`. """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('input_size'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), sig.make_sig('align_corners', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False): return _convert_stub(pyboost_upsample_bilinear2d_grad(self, [dy, input_size, output_size, scales, align_corners])) upsample_bilinear2d_grad_op=UpsampleBilinear2DGrad() class UpsampleBilinear2D(Primitive): r""" Performs upsampling with trilinear interpolation across 2dims for 4dim input Tensor. This operator scale up the volumetric input with specified `output_size` or `scales` factors, using trilinear upscaling algorithm. Note: One of `scales` and `output_size` must be specified. And it is an error if both are specified. Inputs: - **x** (Tensor) - 4D tensor of shape :math:`(N, C, H_{in}, W_{in})`. Supporting types: float16, float32, float64]. - **output_size** (Union[tuple[int], list[int]]): A tuple or list of 2 int elements :math:`(output\_height, output\_width)`. Default: ``None``. - **scales** (Union[tuple[float], list[float]]): A tuple or list of 2 float elements :math:`(scale\_height, scale\_width)`. Default: ``None``. - **align_corners** (bool, optional): An optional bool. Default: ``False``. If ``True``, the input and output tensors are aligned by the center points of their corner pixels, preserving the values at the corner pixels. If ``False`` , the input and output tensors are aligned by the corner points of their corner pixels, and the interpolation use edge value padding for out of boundary values. Outputs: - **y** (Tensor) - Upsampled output with the same data type as `x`, whose shape is :math:`(N, C, H_{out}, W_{out})`. Raises: TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int]. TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float]. TypeError: If dtype of `x` is not in [float16, float32, float64]. TypeError: If type of `align_corners` is not bool. ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``. ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``. ValueError: If shape of `x` is not 4D. ValueError: If none of `scales` and `output_size` is specified or both specified. ValueError: If size of `scales` is not equal 2 when `scales` is specified. ValueError: If size of `output_size` is not equal 2 when `output_size` is specified. Supported Platforms: ``Ascend`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> net = ops.UpsampleTrilinear3D() >>> in_x = Tensor(input_data=np.random.randn(2, 3, 4, 512, 256)) >>> output_size=[4, 64, 48] >>> out = net(in_x, output_size, None) >>> print(out.shape) (2, 3, 4, 64, 48) >>> >>> net = ops.auto_generate.UpsampleBilinear2D() >>> in_x = Tensor(np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], [[0.7, 0.8, 0.9], [1.0, 1.1, 1.2]]]]).astype(np.float32)) >>> output_size=[4, 5] >>> out = net(in_x, output_size, None, True) >>> print(out) [[[[0.1000, 0.1500, 0.2000, 0.2500, 0.3000], [0.2000, 0.2500, 0.3000, 0.3500, 0.4000], [0.3000, 0.3500, 0.4000, 0.4500, 0.5000], [0.4000, 0.4500, 0.5000, 0.5500, 0.6000]], [[0.7000, 0.7500, 0.8000, 0.8500, 0.9000], [0.8000, 0.8500, 0.9000, 0.9500, 1.0000], [0.9000, 0.9500, 1.0000, 1.0500, 1.1000], [1.0000, 1.0500, 1.1000, 1.1500, 1.2000]]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), sig.make_sig('align_corners', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, x, output_size=None, scales=None, align_corners=False): return _convert_stub(pyboost_upsample_bilinear2d(self, [x, output_size, scales, align_corners])) upsample_bilinear2d_op=UpsampleBilinear2D() class UpsampleLinear1DGrad(Primitive): r""" Upsample the 1-D gradient data with linear interpolation algorithm. Note: One of 'scales' and 'output_size' must be specified. And it is an error if both are specified. Inputs: - **dy** (Tensor) - Tensor of shape [N, C, L]. Must be one of the following types: float16, float32, float64. - **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 3 elements: [batch, channels, length]. Must: input_size[0] == dy.shape[0] input_size[1] == dy.shape[1]. - **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``. It contains 1 elements: length, whose elements should be the same as `dy`. Must: dy.shape[2] == output_size[0]. - **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``. The scale array along each dimension, contain 1 elements: length_depth. Must: dy.shape[2] == floor(input_size[2] * scales[0]. - **align_corners** (bool): An optional bool. Default: ``False``. Outputs: - **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`. """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('input_size'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), sig.make_sig('align_corners', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False): return _convert_stub(pyboost_upsample_linear1d_grad(self, [dy, input_size, output_size, scales, align_corners])) upsample_linear1d_grad_op=UpsampleLinear1DGrad() class UpsampleLinear1D(Primitive): r""" """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), sig.make_sig('align_corners', default=False), ) @prim_arg_register def __init__(self): pass def __call__(self, x, output_size=None, scales=None, align_corners=False): return _convert_stub(pyboost_upsample_linear1d(self, [x, output_size, scales, align_corners])) upsample_linear1d_op=UpsampleLinear1D() class UpsampleNearest1DGrad(Primitive): r""" Upsample the 1-D gradient data with the nearest neighbor interpolation algorithm. Note: Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified. Inputs: - **dy** (Tensor) - Tensor of shape [N, C, L], Must be one of the following types: float16, float32, float64. - **input_size** (tuple[int]): An required tuple[int], which contain 3 elements: [min_batch, channels, length]. Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1]. - **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``. It contains 1 elements: length, whose elements should be the same as `dy`. Must: dy.shape[2] == output_size[0]. - **scales** (tuple[float]): An optional tuple[float]. Default: ``None``. The scale array along each dimension, contain 1 elements: scale_length. Must: dy.shape[2] == floor(input_size[2] * scales[0]. Outputs: - **dx**- (Tensor) - A 3-D tensor. Has the same type as `dy`, shape depends on `input_size`. """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('input_size'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, dy, input_size, output_size=None, scales=None): return _convert_stub(pyboost_upsample_nearest1d_grad(self, [dy, input_size, output_size, scales])) upsample_nearest1d_grad_op=UpsampleNearest1DGrad() class UpsampleNearest1D(Primitive): r""" Performs nearest neighbor upsampling operation. This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest neighbor algorithm. One of `output_size` or `scales` must be given, and can not specified both at the same time. Inputs: - **x** (Tensor) - 3D tensor of shape :math:`(N, C, L_{in})`. Supporting types: [uint8, float16, float32, float64]. - **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size. Default: ``None``. - **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors. Default: ``None``. Outputs: - **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is :math:`(N, C, L_{out})`. Raises: TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int]. TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float]. TypeError: If dtype of `x` is not int [uint8, float16, float32, float64]. ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``. ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``. ValueError: If shape of `x` is not 3D. ValueError: If none of `scales` and `output_size` is specified or both specified. ValueError: If size of `scales` is not equal 1 when `scales` is specified. ValueError: If size of `output_size` is not equal 1 when `output_size` is specified. Supported Platforms: ``Ascend`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> from mindspore import dtype as mstype >>> x = Tensor(np.arange(10).reshape(1, 2, 5), mstype.float32) >>> output_size = [8,] >>> net = ops.auto_generate.UpsampleNearest1D() >>> output = net(x, output_size, None) >>> print(output) [[[0., 0., 1., 1., 2., 3., 3., 4.], [5., 5., 6., 6., 7., 8., 8., 9.]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, x, output_size=None, scales=None): return _convert_stub(pyboost_upsample_nearest1d(self, [x, output_size, scales])) upsample_nearest1d_op=UpsampleNearest1D() class UpsampleNearest2DGrad(Primitive): r""" Upsample the 2-D gradient data with the nearest neighbor interpolation algorithm. Note: Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified. Inputs: - **dy** (Tensor) - Tensor of shape [N, C, H, W], Must be one of the following types: float16, float32, float64. - **input_size** (tuple[int]): An required tuple[int], which contain 4 elements: [min_batch, channels, height, width]. Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1]. - **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``. It contains 2 elements: height, width, whose elements should be the same as `dy`. Must: dy.shape[2] == output_size[0], dy.shape[3] == output_size[1]. - **scales** (tuple[float]): An optional tuple[float]. Default: ``None``. The scale array along each dimension, contain 2 elements: scale_height, scale_width. Must: dy.shape[2] == floor(input_size[2] * scales[0], dy.shape[3] == floor(input_size[3] * scales[1]. Outputs: - **dx**- (Tensor) - A 4-D tensor. Has the same type as `dy`, shape depends on `input_size`. """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('input_size'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, dy, input_size, output_size=None, scales=None): return _convert_stub(pyboost_upsample_nearest2d_grad(self, [dy, input_size, output_size, scales])) upsample_nearest2d_grad_op=UpsampleNearest2DGrad() class UpsampleNearest2D(Primitive): r""" Performs nearest neighbor upsampling operation. This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest neighbor algorithm. One of `output_size` or `scales` must be given, and can not specified both at the same time. Inputs: - **x** (Tensor) - 4D tensor of shape :math:`(N, C, H_{in}, W_{in})`. Supporting types: [uint8, float16, float32, float64]. - **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size. Default: ``None``. - **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors. Default: ``None``. Outputs: - **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is :math:`(N, C, H_{out}, W_{out})`. Raises: TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int]. TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float]. TypeError: If dtype of `x` is not int [uint8, float16, float32, float64]. ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``. ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``. ValueError: If shape of `x` is not 4D. ValueError: If none of `scales` and `output_size` is specified or both specified. ValueError: If size of `scales` is not equal 2 when `scales` is specified. ValueError: If size of `output_size` is not equal 2 when `output_size` is specified. Supported Platforms: ``Ascend`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> from mindspore import dtype as mstype >>> x = Tensor(np.arange(12).astype(np.float32).reshape(1, 2, 2, 3)) >>> output_size = [4, 4] >>> net = ops.auto_generate.UpsampleNearest2D() >>> output = net(x, output_size, None) >>> print(output) [[[[0., 0., 1., 2.], [0., 0., 1., 2.], [3., 3., 4., 5.], [3., 3., 4., 5.]], [[6., 6., 7., 8.], [6., 6., 7., 8.], [9., 9., 10., 10.], [9., 9., 10., 10.]]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, x, output_size=None, scales=None): return _convert_stub(pyboost_upsample_nearest2d(self, [x, output_size, scales])) upsample_nearest2d_op=UpsampleNearest2D() class UpsampleNearest3DGrad(Primitive): r""" Upsample the 3-D gradient data with the nearest neighbor interpolation algorithm. Note: Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified. Inputs: - **dy** (Tensor) - Tensor of shape [N, C, D, H, W], Must be one of the following types: float16, float32, float64. - **input_size** (tuple[int]): An required tuple[int], which contain 5 elements: [min_batch, channels, depth, height, width]. Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1]. - **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``. It contains 3 elements: depth, height, width, whose elements should be the same as `dy`. Must: dy.shape[2] == output_size[0], dy.shape[3] == output_size[1], dy.shape[4] == output_size[2]. - **scales** (tuple[float]): An optional tuple[float]. Default: ``None``. The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. Must: dy.shape[2] == floor(input_size[2] * scales[0], dy.shape[3] == floor(input_size[3] * scales[1], dy.shape[4] == floor(input_size[4] * scales[2]. Outputs: - **dx**- (Tensor) - A 5-D tensor. Has the same type as `dy`, shape depends on `input_size`. """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('input_size'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, dy, input_size, output_size=None, scales=None): return _convert_stub(pyboost_upsample_nearest3d_grad(self, [dy, input_size, output_size, scales])) upsample_nearest3d_grad_op=UpsampleNearest3DGrad()
[docs]class UpsampleNearest3D(Primitive): r""" Performs nearest neighbor upsampling operation. This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest neighbor algorithm. One of `output_size` or `scales` must be given, and can not specified both at the same time. Inputs: - **x** (Tensor) - 5D tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`. Supporting types: [float16, float32, float64]. - **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size. Default: ``None``. - **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors. Default: ``None``. Outputs: - **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is :math:`(N, C, D_{out}, H_{out}, W_{out})`. Raises: TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int]. TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float]. TypeError: If dtype of `x` is not int [float16, float32, float64]. ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``. ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``. ValueError: If shape of `x` is not 5D. ValueError: If none of `scales` and `output_size` is specified or both specified. ValueError: If size of `scales` is not equal 3 when `scales` is specified. ValueError: If size of `output_size` is not equal 3 when `output_size` is specified. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> from mindspore import dtype as mstype >>> x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]) ... .reshape([1, 1, 2, 2, 4]), mstype.float32) >>> output_size = [3, 4, 5] >>> net = ops.UpsampleNearest3D() >>> output = net(x, output_size, None) >>> print(output) [[[[[ 1. 1. 2. 3. 4.] [ 1. 1. 2. 3. 4.] [ 5. 5. 6. 7. 8.] [ 5. 5. 6. 7. 8.]] [[ 1. 1. 2. 3. 4.] [ 1. 1. 2. 3. 4.] [ 5. 5. 6. 7. 8.] [ 5. 5. 6. 7. 8.]] [[ 9. 9. 10. 11. 12.] [ 9. 9. 10. 11. 12.] [13. 13. 14. 15. 16.] [13. 13. 14. 15. 16.]]]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, x, output_size=None, scales=None): return _convert_stub(pyboost_upsample_nearest3d(self, [x, output_size, scales]))
upsample_nearest3d_op=UpsampleNearest3D() class UpsampleTrilinear3DGrad(Primitive): r""" Upsample the 3-D gradient data with trilinear interpolation algorithm. Note: One of 'scales' and 'output_size' must be specified. And it is an error if both are specified. Inputs: - **dy** (Tensor) - Tensor of shape [N, C, D, H, W]. Must be one of the following types: float16, float32, float64. - **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 5 elements: [batch, channels, depth, height, width]. Must: input_size[0] == dy.shape[0] input_size[1] == dy.shape[1]. - **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``. It contains 3 elements: depth, height, width, whose elements should be the same as `dy`. Must: dy.shape[2] == output_size[0], dy.shape[3] == output_size[1], dy.shape[4] == output_size[2]. - **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``. The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. Must: dy.shape[2] == floor(input_size[2] * scales[0], dy.shape[3] == floor(input_size[3] * scales[1], dy.shape[4] == floor(input_size[4] * scales[2]. - **align_corners** (bool): An optional bool. Default: ``False``. Outputs: - **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`. """ __mindspore_signature__ = ( sig.make_sig('dy'), sig.make_sig('input_size'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), ) @prim_arg_register def __init__(self, align_corners=False): self._set_prim_arg("align_corners", align_corners) def __call__(self, dy, input_size, output_size=None, scales=None): return _convert_stub(pyboost_upsample_trilinear3d_grad(self, [dy, input_size, output_size, scales, self.align_corners]))
[docs]class UpsampleTrilinear3D(Primitive): r""" Performs upsampling with trilinear interpolation across 3dims for 5dim input Tensor. This operator scale up the volumetric input with specified `output_size` or `scales` factors, using trilinear upscaling algorithm. Note: One of `scales` and `output_size` must be specified. And it is an error if both are specified. Args: align_corners (bool, optional): An optional bool. Default: ``False``. If ``True``, the input and output tensors are aligned by the center points of their corner pixels, preserving the values at the corner pixels. If ``False`` , the input and output tensors are aligned by the corner points of their corner pixels, and the interpolation use edge value padding for out of boundary values. Inputs: - **x** (Tensor) - 5D tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`. Supporting types: [float16, float32, float64]. - **output_size** (Union[tuple[int], list[int]]): A tuple or list of 3 int elements :math:`(output\_depth, output\_height, output\_width)`. Default: ``None``. - **scales** (Union[tuple[float], list[float]]): A tuple or list of 3 float elements :math:`(scale\_depth, scale\_height, scale\_width)`. Default: ``None``. Outputs: - **y** (Tensor) - Upsampled output with the same data type as `x`, whose shape is :math:`(N, C, D_{out}, H_{out}, W_{out})`. Raises: TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int]. TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float]. TypeError: If dtype of `x` is not in [float16, float32, float64]. TypeError: If type of `align_corners` is not bool. ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``. ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``. ValueError: If shape of `x` is not 5D. ValueError: If none of `scales` and `output_size` is specified or both specified. ValueError: If size of `scales` is not equal 3 when `scales` is specified. ValueError: If size of `output_size` is not equal 3 when `output_size` is specified. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> net = ops.UpsampleTrilinear3D() >>> in_x = Tensor(input_data=np.random.randn(2, 3, 4, 512, 256)) >>> output_size=[4, 64, 48] >>> out = net(in_x, output_size, None) >>> print(out.shape) (2, 3, 4, 64, 48) >>> >>> net = ops.UpsampleTrilinear3D() >>> in_x = Tensor(np.arange(1, 5, dtype=np.float32).reshape((1, 1, 1, 2, 2))) >>> output_size=[2, 4, 4] >>> out = net(in_x, output_size, None) >>> print(out) [[[[[1. 1.25 1.75 2. ] [1.5 1.75 2.25 2.5 ] [2.5 2.75 3.25 3.5 ] [3. 3.25 3.75 4. ]] [[1. 1.25 1.75 2. ] [1.5 1.75 2.25 2.5 ] [2.5 2.75 3.25 3.5 ] [3. 3.25 3.75 4. ]]]]] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('output_size', default=None), sig.make_sig('scales', default=None), ) @prim_arg_register def __init__(self, align_corners=False): self._set_prim_arg("align_corners", align_corners) def __call__(self, x, output_size=None, scales=None): return _convert_stub(pyboost_upsample_trilinear3d(self, [x, output_size, scales, self.align_corners]))
class ViewAs(Primitive): r""" .. code-block:: prim = ops.ViewAs() out = prim(input, other) is equivalent to .. code-block:: ops.view_as(input, other) Refer to :func:`mindspore.ops.view_as` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_view_as(self, [input, other])) view_as_op=ViewAs() class View(Primitive): r""" .. code-block:: prim = ops.View() out = prim(input, shape) is equivalent to .. code-block:: ops.view(input, shape) Refer to :func:`mindspore.ops.view` for more details. """ @prim_arg_register def __init__(self): pass def __call__(self, input, shape): return super().__call__(input, shape) view_op=View() class XLogYScalarOther(Primitive): r""" Computes the first input tensor multiplied by the logarithm of second input tensor element-wise. Returns zero when `input` is zero. .. math:: out_i = input_{i}\ln{other_{i}} Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent. Inputs: - **input** (Tensor) - The first input is a tensor. - **other** (number) - The second input is a number. Outputs: - **y** (Tensor) - the shape is the same as the first input, and the data type is the one with higher precision or higher digits among the two inputs. Raises: TypeError: If `input` is not a Tensor. TypeError: If `other` is not a number. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> from mindspore.ops.auto_generate import XLogYScalarOther >>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32) >>> other = 2 >>> op = XLogYScalarOther() >>> output = op(input, other) >>> print(output) [-3.465736 0. 2.7725887] """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_xlogy_scalar_other(self, [input, other])) xlogy_scalar_other_op=XLogYScalarOther() class XLogYScalarSelf(Primitive): r""" Computes the first input tensor multiplied by the logarithm of second input tensor element-wise. Returns zero when `input` is zero. .. math:: out_i = input_{i}\ln{other_{i}} Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent. Inputs: - **input** (number) - The first input is a number. - **other** (Tensor) - The second input is a tensor. Outputs: - **y** (Tensor) - the shape is the same as the second input, and the data type is the one with higher precision or higher digits among the two inputs. Raises: TypeError: If `input` is not a number. TypeError: If `other` is not a Tensor. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor >>> from mindspore.ops.auto_generate import XLogYScalarSelf >>> input = 3 >>> other = Tensor(np.array([2, 2, 2]), mindspore.float32) >>> op = XLogYScalarSelf() >>> output = op(input, other) >>> print(output) [2.07944155 2.07944155 2.07944155] """ @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_xlogy_scalar_self(self, [input, other])) xlogy_scalar_self_op=XLogYScalarSelf()
[docs]class Xlogy(Primitive): r""" Computes the first input tensor multiplied by the logarithm of second input tensor element-wise. Returns zero when `input` is zero. .. math:: out_i = input_{i}\ln{other_{i}} Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent. Inputs: - **input** (Tensor, numbers.Number, bool) - The first input is a numbers.Number or a bool or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_. - **other** (Tensor, numbers.Number, bool) - The second input is a numbers.Number or a bool or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_. Outputs: - **y** (Tensor) - the shape is the broadcast of `input` and `other`, and the data type is the one with higher precision or higher digits among the two inputs. Raises: TypeError: If `input` is not a Tensor, number or bool. TypeError: If `other` is not a Tensor, number or bool. ValueError: If `input` and `other` can not broadcast. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32) >>> other = Tensor(np.array([2, 2, 2]), mindspore.float32) >>> op = ops.Xlogy() >>> output = op(input, other) >>> print(output) [-3.465736 0. 2.7725887] """ __mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T) @prim_arg_register def __init__(self): pass def __call__(self, input, other): return _convert_stub(pyboost_xlogy(self, [input, other]))
xlogy_op=Xlogy() class ZerosLikeExt(Primitive): r""" Returns a Tensor with a value of 0 and its shape and data type is the same as the input. Refer to :func:`mindspore.ops.zeros_like` for more details. Args: - **input_x** (Tensor) - Tensor of any dimension. Returns: Return a tensor filled with the value 0, with the same size as input. Supported Platforms: ``Ascend`` """ __mindspore_signature__ = ( sig.make_sig('input'), sig.make_sig('dtype', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, input, dtype=None): return _convert_stub(pyboost_zeros_like_ext(self, [input, dtype if dtype is None else dtype_to_type_id('ZerosLikeExt', 'dtype', dtype)])) zeros_like_ext_op=ZerosLikeExt()
[docs]class ZerosLike(Primitive): r""" Returns a Tensor with a value of 0 and its shape and data type is the same as the input. Inputs: - **input_x** (Tensor) - Input Tensor of any dimension. Outputs: Tensor, has the same shape and data type as `input_x` but filled with zeros. Raises: TypeError: If `input_x` is not a Tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import numpy as np >>> from mindspore import Tensor, ops >>> zeroslike = ops.ZerosLike() >>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32)) >>> output = zeroslike(input_x) >>> print(output) [[0. 0.] [0. 0.]] """ @prim_arg_register def __init__(self): pass def __call__(self, x): return super().__call__(x)
zeros_like_op=ZerosLike() class DynamicQuantExt(Primitive): r""" Performs dynamic quantization on the input tensor. Note: - Dynamic quantization is performed by adjusting the scale of the input tensor dynamically. - The `smooth_scales` tensor provides a mechanism to smooth out the scaling factors to avoid sudden changes. - The input tensor `x` must be at least 1-dimensional, with shape :math:`(batches, n)`. - The `smooth_scales` tensor must have shape `(n)`. - The output `scale` tensor has shape `(batches)`. .. math:: \begin{array}{ll} \\ \text{scale} = \frac{\max(\left| x \right|, \text{axis}=-1)}{127} \\ \text{y} = \text{round}\left(\frac{x}{\text{scale}}\right) \\ \end{array} Inputs: x (Tensor): The first input is a tensor of data type float16 or bfloat16. It contains the data to be quantized. smooth_scales (Tensor): The second input is a tensor of data type float16 or bfloat16. It contains the scaling factors used for dynamic quantization. Outputs: tuple[Tensor], tuple of 2 tensors, representing the quantized values and the scales used. - **y** (Tensor) - The quantized tensor. - **scale** (Tensor) - The scales used for quantization. Raises: ValueError: If the rank of `x` is not at least 1. Supported Platforms: ``Ascend`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, ops >>> input = Tensor(np.random.rand(2, 3), mindspore.float16) >>> smooth_scales = Tensor(np.random.rand(3), mindspore.float16) >>> output = ops.auto_generate.DynamicQuantExt()(input, smooth_scales) >>> print(output.shape) (2, 3) """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('smooth_scales', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, x, smooth_scales=None): return _convert_stub(pyboost_dynamic_quant_ext(self, [x, smooth_scales])) dynamic_quant_ext_op=DynamicQuantExt() class FusedInferAttentionScore(Primitive): r""" The interface for fully inference. .. warning:: This is an experimental API that is subject to change or deletion. Args: num_heads (int): The number of heads, equal to `N` when input_layout is `BNSD`. scale_value (float): The scale value indicating the scale coefficient, which is used as the scalar of Muls in the calculation. Generally, the value is 1.0 / (D ** 0.5). Default: ``1.0``. pre_tokens (int): Parameter for sparse computation, represents how many tokens are counted forward. Default: ``2147483647``. Invalid when Q_S is 1. next_tokens (int): Parameter for sparse computation, represents how many tokens are counted backward. Default: ``2147483647``. Invalid when Q_S is 1. input_layout (str): Specifies the layout of input `query`, key and value. "BSH", "BNSD" or "BSND" is supported. Default "BSH". num_key_value_heads (int): head numbers of key/value which are used in GQA algorithm. Default: ``0``. The value 0 indicates if the key and value have the same head nums, num_heads must be divisible by num_key_value_heads. sparse_mode (int): Indicates sparse mode. Default ``0``. - 0: Indicates the defaultMask mode. If attn_mask is not passed, the mask operation is not performed, and preTokens and nextTokens(internally assigned as INT_MAX) are ignored. If passed in, the full attn_mask matrix (S1 * S2) needs to be passed in, indicating that the part between preTokens and nextTokens needs to be calculated. - 1: Represents allMask, that is, passing in the complete attn_mask matrix. - 2: Representing the leftUpCausal mode corresponds to the lower triangle scenario divided by the left vertex, and the optimized attn_mask matrix (2048*2048) is required. - 3: Representing the rightDownCausal model corresponds to the lower triangle scene divided by the lower right vertex, and the optimized attn_mask matrix (2048*2048) is required. - 4: Represents the band scenario, that is, the part between counting preTokens and nextTokens, and the optimized attn_mask matrix (2048*2048) is required. - 5: Represents the prefix scenario, not implemented yet. - 6: Represents the global scenario, not implemented yet. - 7: Represents the dilated scenario, not implemented yet. - 8: Represents the block_local scenario, not implemented yet. inner_precise (int): There are four modes: 0, 1, 2, and 3. Only support 0 and 1 when Q_S is 1. Default: ``1``. - 0: Enable high-precise mode, without row invalid correction. - 1: High-performance mode, without row invalid correction. - 2: Enable high-precise mode, with row invalid correction. - 3: High-performance mode, with row invalid correction. block_size (int): Maximum number of tokens per block in the KV cache block for PageAttention. Default: ``0``. antiquant_mode (int): Pseudo-quantization mode, 0: per-channel, 1: per-token. This parameter is invalid when Q_S greater than or equal to 2. Default: ``0``. softmax_lse_flag (bool): Whether to output softmax_lse. Default: ``False``. Inputs: - **query** (Tensor) - The query tensor with data type of Int8, float16 or BFloat16. Input tensor of shape :math:`(B, S, H)`, :math:`(B, N, S, D)`, or :math:`(B, S, N, D)`. - **key** (TensorList) - The key tensor with data type of float16 or BFloat16. Input tensor of shape :math:`(B, S, H)`, :math:`(B, N, S, D)`, or :math:`(B, S, N, D)`. - **value** (TensorList) - The value tensor with data type of float16 or BFloat16. Input tensor of shape :math:`(B, S, H)`, :math:`(B, N, S, D)`, or :math:`(B, S, N, D)`. - **pse_shift** (Tensor) - The padding mask tensor with data type of float16 or BFloat16. Default: ``None``. - **attn_mask** (Tensor) - The attention mask tensor with data type of int8, uint8 or bool. For each element, 0 indicates retention and 1 indicates discard. Default: ``None``. - **actual_seq_lengths** (Tensor) - Describe actual sequence length of each input with data type of int64. Default: ``None``. - **actual_seq_lengths_kv** (Tensor) - Describe actual sequence length of each input with data type of int64. Default: ``None``. - **dequant_scale1** (Tensor) - Quantization factor for inverse quantization after BMM1 with data type of uint64. Default: ``None``. - **quant_scale1** (Tensor) - Quantization factors for quantization before BMM2 with data type of float32. Default: ``None``. - **dequant_scale2** (Tensor) - Quantization factors for quantification after BMM2 with data type of uint64. Default: ``None``. - **quant_scale2** (Tensor) - Quantization factors for output quantization with data type of float32, BFloat16. Default: ``None``. - **quant_offset2** (Tensor) - Quantization offset for output quantization with data type of float32, BFloat16. Default: ``None``. - **antiquant_scale** (Tensor) - Inverse quantization factor with data type of float16, float32, BFloat16. Only support float16 when Q_S greater than or equal to 2. Default: ``None``. - **antiquant_offset** (Tensor) - Inverse quantization offset with data type of float16, float32, BFloat16. Only support float16 when Q_S greater than or equal to 2. Default: ``None``. - **block_table** (Tensor) - Block mapping table in KV cache for PageAttention. Default: ``None``. - **query_padding_size** (Tensor) - Whether each batch of data in the Query is right-aligned. If yes, the number of alignment times is provided. Reserved parameter, not supported yet. Default: ``None``. - **kv_padding_size** (Tensor) - Whether each batch of data in the Key/Value is right-aligned. If yes, the number of alignment times is provided. Valid only when Q_S is 1. Default: ``None``. Outputs: - **attention_out** (Tensor) - Input tensor, and the shape is :math:`(B, S, H)`, :math:`(B, N, S, D)`, or :math:`(B, S, N, D)`. - **softmas_lse** (Tensor[Float32]) - Shape is `(B, N, Q_S, 1)`. The softmas_lse is calculated only if softmax_lse_flag is 1. Supported Platforms: ``Ascend`` Examples: >>> from mindspore.ops.operations import _infer_ops as infer_ops >>> from mindspore import Tensor >>> import numpy as np >>> B = 1 >>> N = 16 >>> S = 256 >>> D = 16 >>> query = Tensor(np.ones((B, N, S, D), dtype=np.float16)) >>> key = [Tensor(np.ones((B, N, S, D), dtype=np.float16))] >>> value = [Tensor(np.ones((B, N, S, D), dtype=np.float16))] >>> fias = infer_ops.FusedInferAttentionScore(num_heads=N, input_layout='BNSD') >>> out = fias(query, key, value, None, None, ... None, None, None, None, None, ... None, None, None, None, None, ... None, None) >>> print(out[0].shape) (1, 16, 256, 16) """ __mindspore_signature__ = ( sig.make_sig('query'), sig.make_sig('key'), sig.make_sig('value'), sig.make_sig('pse_shift', default=None), sig.make_sig('attn_mask', default=None), sig.make_sig('actual_seq_lengths', default=None), sig.make_sig('actual_seq_lengths_kv', default=None), sig.make_sig('dequant_scale1', default=None), sig.make_sig('quant_scale1', default=None), sig.make_sig('dequant_scale2', default=None), sig.make_sig('quant_scale2', default=None), sig.make_sig('quant_offset2', default=None), sig.make_sig('antiquant_scale', default=None), sig.make_sig('antiquant_offset', default=None), sig.make_sig('block_table', default=None), sig.make_sig('query_padding_size', default=None), sig.make_sig('kv_padding_size', default=None), ) @prim_arg_register def __init__(self, num_heads, scale_value=1.0, pre_tokens=2147483647, next_tokens=2147483647, input_layout='BSH', num_key_value_heads=0, sparse_mode=0, inner_precise=1, block_size=0, antiquant_mode=0, softmax_lse_flag=False): self._set_prim_arg("num_heads", num_heads) self._set_prim_arg("scale_value", scale_value) self._set_prim_arg("pre_tokens", pre_tokens) self._set_prim_arg("next_tokens", next_tokens) self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum) self._set_prim_arg("num_key_value_heads", num_key_value_heads) self._set_prim_arg("sparse_mode", sparse_mode) self._set_prim_arg("inner_precise", inner_precise) self._set_prim_arg("block_size", block_size) self._set_prim_arg("antiquant_mode", antiquant_mode) self._set_prim_arg("softmax_lse_flag", softmax_lse_flag) def __call__(self, query, key, value, pse_shift=None, attn_mask=None, actual_seq_lengths=None, actual_seq_lengths_kv=None, dequant_scale1=None, quant_scale1=None, dequant_scale2=None, quant_scale2=None, quant_offset2=None, antiquant_scale=None, antiquant_offset=None, block_table=None, query_padding_size=None, kv_padding_size=None): return super().__call__(query, key, value, pse_shift, attn_mask, actual_seq_lengths, actual_seq_lengths_kv, dequant_scale1, quant_scale1, dequant_scale2, quant_scale2, quant_offset2, antiquant_scale, antiquant_offset, block_table, query_padding_size, kv_padding_size, self.num_heads, self.scale_value, self.pre_tokens, self.next_tokens, self.input_layout, self.num_key_value_heads, self.sparse_mode, self.inner_precise, self.block_size, self.antiquant_mode, self.softmax_lse_flag) class GroupedMatmul(Primitive): r""" .. code-block:: prim = ops.GroupedMatmul(split_item, group_type) out = prim(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list) is equivalent to .. code-block:: ops.grouped_matmul(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type) Refer to :func:`mindspore.ops.grouped_matmul` for more details. """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('weight'), sig.make_sig('bias', default=None), sig.make_sig('scale', default=None), sig.make_sig('offset', default=None), sig.make_sig('antiquant_scale', default=None), sig.make_sig('antiquant_offset', default=None), sig.make_sig('group_list', default=None), ) @prim_arg_register def __init__(self, split_item=0, group_type=-1): self._set_prim_arg("split_item", split_item) self._set_prim_arg("group_type", group_type) def __call__(self, x, weight, bias=None, scale=None, offset=None, antiquant_scale=None, antiquant_offset=None, group_list=None): return _convert_stub(pyboost_grouped_matmul(self, [x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, self.split_item, self.group_type])) class KVCacheScatterUpdate(Primitive): r""" .. code-block:: prim = ops.KVCacheScatterUpdate() out = prim(var, indices, updates, axis, reduce) is equivalent to .. code-block:: ops.kv_cache_scatter_update(var, indices, updates, axis, reduce) Refer to :func:`mindspore.ops.kv_cache_scatter_update` for more details. """ __mindspore_signature__ = ( sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T), sig.make_sig('indices', dtype=sig.sig_dtype.T1), sig.make_sig('updates', dtype=sig.sig_dtype.T), sig.make_sig('axis', dtype=sig.sig_dtype.T2), sig.make_sig('reduce', dtype=sig.sig_dtype.T3, default='none'), ) @prim_arg_register def __init__(self): self.add_prim_attr("side_effect_mem", True) def __call__(self, var, indices, updates, axis, reduce='none'): return super().__call__(var, indices, updates, axis, str_to_enum('KVCacheScatterUpdate', 'reduce', reduce)) kv_cache_scatter_update_op=KVCacheScatterUpdate() class MoeFinalizeRouting(Primitive): r""" .. code-block:: prim = ops.MoeFinalizeRouting() out = prim(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx) is equivalent to .. code-block:: ops.moe_finalize_routing(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx) Refer to :func:`mindspore.ops.moe_finalize_routing` for more details. """ __mindspore_signature__ = ( sig.make_sig('expanded_x'), sig.make_sig('x1'), sig.make_sig('x2', default=None), sig.make_sig('bias', default=None), sig.make_sig('scales', default=None), sig.make_sig('expanded_row_idx', default=None), sig.make_sig('expanded_expert_idx', default=None), ) @prim_arg_register def __init__(self): pass def __call__(self, expanded_x, x1, x2=None, bias=None, scales=None, expanded_row_idx=None, expanded_expert_idx=None): return _convert_stub(pyboost_moe_finalize_routing(self, [expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx])) moe_finalize_routing_op=MoeFinalizeRouting() class QuantBatchMatmul(Primitive): r""" .. code-block:: prim = ops.QuantBatchMatmul(transpose_x1, transpose_x2, dtype) out = prim(x1, x2, scale, offset, bias, pertokenScaleOptional) is equivalent to .. code-block:: ops.quant_batch_matmul(x1, x2, scale, offset, bias, pertokenScaleOptional, transpose_x1, transpose_x2, dtype) Refer to :func:`mindspore.ops.quant_batch_matmul` for more details. """ __mindspore_signature__ = ( sig.make_sig('x1', dtype=sig.sig_dtype.T), sig.make_sig('x2', dtype=sig.sig_dtype.T), sig.make_sig('scale', dtype=sig.sig_dtype.T1), sig.make_sig('offset', dtype=sig.sig_dtype.T2, default=None), sig.make_sig('bias', dtype=sig.sig_dtype.T3, default=None), sig.make_sig('pertokenScaleOptional', dtype=sig.sig_dtype.T4, default=None), ) @prim_arg_register def __init__(self, transpose_x1=False, transpose_x2=False, dtype=mstype.float16): self._set_prim_arg("transpose_x1", transpose_x1) self._set_prim_arg("transpose_x2", transpose_x2) self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id) def __call__(self, x1, x2, scale, offset=None, bias=None, pertokenScaleOptional=None): return _convert_stub(pyboost_quant_batch_matmul(self, [x1, x2, scale, offset, bias, pertokenScaleOptional, self.transpose_x1, self.transpose_x2, self.dtype])) class QuantLinearSparse(Primitive): r""" Matmul with a8w8 quant and weight compressed. .. warning:: This is an experimental API that is subject to change or deletion. Note: - The input `weight` and `compressIdx` should be generated by the compress tool of model_slim. - Only support Ascend 310p. Inputs: x (Tensor): The left matrix with data type of int8. weight (Tensor): The compressed 1-D weight with data type of int8. deq_scale (Tensor): The dequant scale with data type of int64. compress_idx (Tensor): The index for decompress weight with data type of int8. bias (Tensor): The bias with data type of int32. Outputs: A 2-D Tensor with data type of float16. Supported Platforms: ``Ascend`` """ @prim_arg_register def __init__(self): pass def __call__(self, x, weight, deq_scale, compress_idx, bias): return super().__call__(x, weight, deq_scale, compress_idx, bias) quant_linear_sparse_op=QuantLinearSparse() class QuantV2(Primitive): r""" Returns the quantized value of input x. If `sqrt_mode` is False: .. math:: y = round(scale * x + offset) If `sqrt_mode` is True: .. math:: y = round(scale * x * scale + offset) Inputs: x (Tensor) : Input tensor. Its data type must be mindspore.float16, mindspore.float32 or mindspore.bfloat32. scale (Tensor) : Scaling ratio tensor in quantization. Its type is the same as x. Its shape in the last axis must equal to the shape of x in the last axis, and shapes of other dimensions must be 1. offset (Tensor) : Offset tensor in quantization. Its type is the same as x. Its shape in the last axis must equal to the shape of x in the last axis, and shapes of other dimensions must be 1. sqrt_mode (bool) : Specifies whether to perform square root on `scale`. Only support: ``False``. rounding_mode (str) : Specifies the way to round. Only support: "ROUND". dst_type (Type) : Specifies the output type. Only support ``int8``. Returns: Tensor, the quantized output tensor of type mindspore.int8. Its shape is the same as x. Raises: TypeError: If input, scale or offset is not a Tensor. ValueError: The shape of scale or offset in the last axis is different from the shape of x in the last axis. Supported Platforms: ``Ascend`` Examples: >>> from mindspore import Tensor >>> from mindspore.ops.operations import _infer_ops as infer_ops >>> x = Tensor([100.0, 150.0], mstype.float32) >>> scale = Tensor([80.0, 40.0], mstype.float32) >>> offset = Tensor([0.0, 2.0], mstype.float32) >>> quant = infer_ops.QuantV2() >>> y = quant(x, scale, offset, False, "Round", mstype.int8) [127 127] """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('scale'), sig.make_sig('offset'), sig.make_sig('sqrt_mode', default=False), sig.make_sig('rounding_mode', default='ROUND'), sig.make_sig('dst_type', default=mstype.int8), ) @prim_arg_register def __init__(self): pass def __call__(self, x, scale, offset, sqrt_mode=False, rounding_mode='ROUND', dst_type=mstype.int8): return _convert_stub(pyboost_quant_v2(self, [x, scale, offset, sqrt_mode, str_to_enum('QuantV2', 'rounding_mode', rounding_mode), dtype_to_type_id('QuantV2', 'dst_type', dst_type)])) quant_v2_op=QuantV2() class WeightQuantBatchMatmul(Primitive): r""" .. code-block:: prim = ops.WeightQuantBatchMatmul(transpose_x, transpose_weight, antiquant_group_size) out = prim(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias) is equivalent to .. code-block:: ops.weight_quant_batch_matmul(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size) Refer to :func:`mindspore.ops.weight_quant_batch_matmul` for more details. """ __mindspore_signature__ = ( sig.make_sig('x'), sig.make_sig('weight'), sig.make_sig('antiquant_scale'), sig.make_sig('antiquant_offset', default=None), sig.make_sig('quant_scale', default=None), sig.make_sig('quant_offset', default=None), sig.make_sig('bias', default=None), ) @prim_arg_register def __init__(self, transpose_x=False, transpose_weight=False, antiquant_group_size=0): self._set_prim_arg("transpose_x", transpose_x) self._set_prim_arg("transpose_weight", transpose_weight) self._set_prim_arg("antiquant_group_size", antiquant_group_size) def __call__(self, x, weight, antiquant_scale, antiquant_offset=None, quant_scale=None, quant_offset=None, bias=None): return _convert_stub(pyboost_weight_quant_batch_matmul(self, [x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, self.transpose_x, self.transpose_weight, self.antiquant_group_size]))