# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators definition generated by gen_ops.py, includes primitive classes."""
from mindspore.ops.primitive import Primitive, prim_arg_register
from mindspore.ops import signature as sig
from mindspore.common import dtype as mstype
from mindspore.common._decorator import deprecated
from mindspore.ops._primitive_cache import _get_cache_prim
from mindspore.ops._utils.arg_dtype_cast import type_it
from mindspore.ops._utils.arg_handler import *
from mindspore._c_expression import OpDtype
from mindspore.common._stub_tensor import _convert_stub
from mindspore.common.jit_context import jit_context
from mindspore._checkparam import is_stub_tensor
from mindspore._c_expression import pyboost_select_v2
from mindspore._c_expression import pyboost_atan_ext
from mindspore._c_expression import pyboost_argmin_with_value
from mindspore._c_expression import pyboost_adaptive_avg_pool2d_ext
from mindspore._c_expression import pyboost_log_softmax
from mindspore._c_expression import pyboost_embedding_dense_backward
from mindspore._c_expression import pyboost_max
from mindspore._c_expression import pyboost_elu_ext
from mindspore._c_expression import pyboost_pow_tensor_scalar
from mindspore._c_expression import pyboost_exp
from mindspore._c_expression import pyboost_matmul_ext
from mindspore._c_expression import pyboost_moe_token_unpermute
from mindspore._c_expression import pyboost_convolution_grad
from mindspore._c_expression import pyboost_softplus_ext
from mindspore._c_expression import pyboost_rms_norm_grad
from mindspore._c_expression import pyboost_reduce_max
from mindspore._c_expression import pyboost_dist_comm_barrier
from mindspore._c_expression import pyboost_index_fill_scalar
from mindspore._c_expression import pyboost_eye
from mindspore._c_expression import pyboost_min
from mindspore._c_expression import pyboost_not_equal
from mindspore._c_expression import pyboost_gather_d_grad_v2
from mindspore._c_expression import pyboost_inplace_masked_fill_tensor
from mindspore._c_expression import pyboost_batch_norm_grad_ext
from mindspore._c_expression import pyboost_add_layer_norm_grad
from mindspore._c_expression import pyboost_roll
from mindspore._c_expression import pyboost_hswish_grad
from mindspore._c_expression import pyboost_dropout_ext
from mindspore._c_expression import pyboost_gcd
from mindspore._c_expression import pyboost_inplace_normal
from mindspore._c_expression import pyboost_type_as
from mindspore._c_expression import pyboost_upsample_nearest3d
from mindspore._c_expression import pyboost_logical_xor
from mindspore._c_expression import pyboost_remainder_tensor_scalar
from mindspore._c_expression import pyboost_dist_comm_broadcast
from mindspore._c_expression import pyboost_inplace_fill_tensor
from mindspore._c_expression import pyboost_unique_consecutive
from mindspore._c_expression import pyboost_tan
from mindspore._c_expression import pyboost_inplace_scatter_value
from mindspore._c_expression import pyboost_inplace_relu
from mindspore._c_expression import pyboost_softplus_grad_ext
from mindspore._c_expression import pyboost_expand_dims
from mindspore._c_expression import pyboost_logical_not
from mindspore._c_expression import pyboost_batch_norm_reduce_grad
from mindspore._c_expression import pyboost_reshape
from mindspore._c_expression import pyboost_l1_loss_backward_ext
from mindspore._c_expression import pyboost_dist_comm_isend
from mindspore._c_expression import pyboost_batch_norm_ext
from mindspore._c_expression import pyboost_polar
from mindspore._c_expression import pyboost_acosh_ext
from mindspore._c_expression import pyboost_upsample_linear1d
from mindspore._c_expression import pyboost_upsample_nearest1d_grad
from mindspore._c_expression import pyboost_xlogy
from mindspore._c_expression import pyboost_scatter_value
from mindspore._c_expression import pyboost_max_dim
from mindspore._c_expression import pyboost_conv3d_padding
from mindspore._c_expression import pyboost_broadcast_to
from mindspore._c_expression import pyboost_fill_tensor
from mindspore._c_expression import pyboost_pow_scalar_tensor
from mindspore._c_expression import pyboost_floor_div_scalar
from mindspore._c_expression import pyboost_ones_like_ext
from mindspore._c_expression import pyboost_adaptive_avg_pool3d_ext
from mindspore._c_expression import pyboost_reflection_pad_1d_grad
from mindspore._c_expression import pyboost_asinh_ext
from mindspore._c_expression import pyboost_abs
from mindspore._c_expression import pyboost_rand_like_ext
from mindspore._c_expression import pyboost_minimum
from mindspore._c_expression import pyboost_inplace_hardtanh
from mindspore._c_expression import pyboost_log
from mindspore._c_expression import pyboost_upsample_bilinear2d
from mindspore._c_expression import pyboost_frac
from mindspore._c_expression import pyboost_max_pool_with_indices
from mindspore._c_expression import pyboost_pow
from mindspore._c_expression import pyboost_bitwise_and_scalar
from mindspore._c_expression import pyboost_mv
from mindspore._c_expression import pyboost_divmod
from mindspore._c_expression import pyboost_inplace_divmod
from mindspore._c_expression import pyboost_inner_comm_all_reduce
from mindspore._c_expression import pyboost_gelu_grad
from mindspore._c_expression import pyboost_bitwise_or_scalar
from mindspore._c_expression import pyboost_soft_margin_loss
from mindspore._c_expression import pyboost_ffn_ext
from mindspore._c_expression import pyboost_inplace_fill_scalar
from mindspore._c_expression import pyboost_log10
from mindspore._c_expression import pyboost_inner_non_zero
from mindspore._c_expression import pyboost_dist_comm_all_gather_into_tensor
from mindspore._c_expression import pyboost_new_ones
from mindspore._c_expression import pyboost_contiguous
from mindspore._c_expression import pyboost_inplace_elu
from mindspore._c_expression import pyboost_equal
from mindspore._c_expression import pyboost_histc_ext
from mindspore._c_expression import pyboost_threshold
from mindspore._c_expression import pyboost_adamw
from mindspore._c_expression import pyboost_erf
from mindspore._c_expression import pyboost_randint_like
from mindspore._c_expression import pyboost_atan2_ext
from mindspore._c_expression import pyboost_avg_pool3d_ext
from mindspore._c_expression import pyboost_smooth_l1_loss
from mindspore._c_expression import pyboost_inplace_floor
from mindspore._c_expression import pyboost_multinomial_ext
from mindspore._c_expression import pyboost_split
from mindspore._c_expression import pyboost_bitwise_xor_tensor
from mindspore._c_expression import pyboost_matmul
from mindspore._c_expression import pyboost_avg_pool2d_grad
from mindspore._c_expression import pyboost_select_ext
from mindspore._c_expression import pyboost_mse_loss_grad_ext
from mindspore._c_expression import pyboost_rsqrt
from mindspore._c_expression import pyboost_concat
from mindspore._c_expression import pyboost_logaddexp
from mindspore._c_expression import pyboost_std_mean
from mindspore._c_expression import pyboost_fmod_scalar
from mindspore._c_expression import pyboost_adaptive_max_pool2d
from mindspore._c_expression import pyboost_randn_like
from mindspore._c_expression import pyboost_inner_inplace_index_put
from mindspore._c_expression import pyboost_col2im_ext
from mindspore._c_expression import pyboost_silu_grad
from mindspore._c_expression import pyboost_normal_tensor_float
from mindspore._c_expression import pyboost_layer_norm_ext
from mindspore._c_expression import pyboost_prod_ext
from mindspore._c_expression import pyboost_inplace_div
from mindspore._c_expression import pyboost_add
from mindspore._c_expression import pyboost_log_softmax_grad
from mindspore._c_expression import pyboost_inner_comm_irecv
from mindspore._c_expression import pyboost_conv1d_padding
from mindspore._c_expression import pyboost_grid_sampler_3d_grad
from mindspore._c_expression import pyboost_sort_ext
from mindspore._c_expression import pyboost_fill_scalar
from mindspore._c_expression import pyboost_greater
from mindspore._c_expression import pyboost_slice
from mindspore._c_expression import pyboost_speed_fusion_attention_grad
from mindspore._c_expression import pyboost_kl_div_grad
from mindspore._c_expression import pyboost_inplace_scatter_src
from mindspore._c_expression import pyboost_silent_check_v3
from mindspore._c_expression import pyboost_binary_cross_entropy_with_logits_backward
from mindspore._c_expression import pyboost_randint
from mindspore._c_expression import pyboost_conv3d_ext
from mindspore._c_expression import pyboost_im2col_ext
from mindspore._c_expression import pyboost_linalg_qr
from mindspore._c_expression import pyboost_log1p
from mindspore._c_expression import pyboost_replication_pad_1d
from mindspore._c_expression import pyboost_slice_ext
from mindspore._c_expression import pyboost_cummax
from mindspore._c_expression import pyboost_inplace_mul
from mindspore._c_expression import pyboost_lin_space_ext
from mindspore._c_expression import pyboost_convolution
from mindspore._c_expression import pyboost_inplace_index_put
from mindspore._c_expression import pyboost_cast
from mindspore._c_expression import pyboost_flatten_ext
from mindspore._c_expression import pyboost_softshrink_grad
from mindspore._c_expression import pyboost_inplace_copy
from mindspore._c_expression import pyboost_maximum
from mindspore._c_expression import pyboost_inplace_tanh
from mindspore._c_expression import pyboost_avg_pool3d_grad_ext
from mindspore._c_expression import pyboost_meshgrid
from mindspore._c_expression import pyboost_nan_to_num
from mindspore._c_expression import pyboost_cumsum_ext
from mindspore._c_expression import pyboost_sqrt
from mindspore._c_expression import pyboost_xlogy_scalar_self
from mindspore._c_expression import pyboost_floor_div
from mindspore._c_expression import pyboost_masked_select_grad
from mindspore._c_expression import pyboost_leaky_relu_grad_ext
from mindspore._c_expression import pyboost_exp2
from mindspore._c_expression import pyboost_inplace_index_add
from mindspore._c_expression import pyboost_cos
from mindspore._c_expression import pyboost_mean_ext
from mindspore._c_expression import pyboost_smooth_l1_loss_grad
from mindspore._c_expression import pyboost_generator
from mindspore._c_expression import pyboost_selu_ext
from mindspore._c_expression import pyboost_count_nonzero
from mindspore._c_expression import pyboost_divmods
from mindspore._c_expression import pyboost_var
from mindspore._c_expression import pyboost_group_norm
from mindspore._c_expression import pyboost_cross
from mindspore._c_expression import pyboost_zeros
from mindspore._c_expression import pyboost_convolution_str_grad
from mindspore._c_expression import pyboost_max_pool_grad_with_indices
from mindspore._c_expression import pyboost_upsample_trilinear3d_grad
from mindspore._c_expression import pyboost_mul
from mindspore._c_expression import pyboost_sub_ext
from mindspore._c_expression import pyboost_dist_comm_gather_into_tensor
from mindspore._c_expression import pyboost_reduce_all
from mindspore._c_expression import pyboost_view_as
from mindspore._c_expression import pyboost_matrix_inverse_ext
from mindspore._c_expression import pyboost_inplace_clamp_tensor
from mindspore._c_expression import pyboost_dropout_gen_mask_ext
from mindspore._c_expression import pyboost_replication_pad_2d
from mindspore._c_expression import pyboost_prelu
from mindspore._c_expression import pyboost_logical_or
from mindspore._c_expression import pyboost_logical_and
from mindspore._c_expression import pyboost_moe_token_unpermute_grad
from mindspore._c_expression import pyboost_reduce_min
from mindspore._c_expression import pyboost_conv2d_padding
from mindspore._c_expression import pyboost_ceil
from mindspore._c_expression import pyboost_square
from mindspore._c_expression import pyboost_index
from mindspore._c_expression import pyboost_replication_pad_3d
from mindspore._c_expression import pyboost_batch_norm_elemt_grad
from mindspore._c_expression import pyboost_layer_norm_grad_ext
from mindspore._c_expression import pyboost_unique_dim
from mindspore._c_expression import pyboost_sinh
from mindspore._c_expression import pyboost_addbmm
from mindspore._c_expression import pyboost_greater_equal_scalar
from mindspore._c_expression import pyboost_argsort
from mindspore._c_expression import pyboost_fmod_tensor
from mindspore._c_expression import pyboost_bitwise_and_tensor
from mindspore._c_expression import pyboost_add_layernorm_v2
from mindspore._c_expression import pyboost_l1_loss_ext
from mindspore._c_expression import pyboost_dist_comm_all_gather
from mindspore._c_expression import pyboost_adaptive_avg_pool2d_grad_ext
from mindspore._c_expression import pyboost_selu_grad
from mindspore._c_expression import pyboost_cosh
from mindspore._c_expression import pyboost_uniform_ext
from mindspore._c_expression import pyboost_inplace_exp
from mindspore._c_expression import pyboost_tanh_grad
from mindspore._c_expression import pyboost_median_ext
from mindspore._c_expression import pyboost_all_finite
from mindspore._c_expression import pyboost_prelu_grad
from mindspore._c_expression import pyboost_dist_comm_scatter
from mindspore._c_expression import pyboost_rand_ext
from mindspore._c_expression import pyboost_inplace_scatter_add
from mindspore._c_expression import pyboost_elu_grad_ext
from mindspore._c_expression import pyboost_randperm_ext
from mindspore._c_expression import pyboost_hswish
from mindspore._c_expression import pyboost_inplace_erfinv
from mindspore._c_expression import pyboost_bincount_ext
from mindspore._c_expression import pyboost_transpose
from mindspore._c_expression import pyboost_atanh
from mindspore._c_expression import pyboost_reflection_pad_3d_grad
from mindspore._c_expression import pyboost_index_add_ext
from mindspore._c_expression import pyboost_less
from mindspore._c_expression import pyboost_isfinite
from mindspore._c_expression import pyboost_index_select
from mindspore._c_expression import pyboost_logaddexp2
from mindspore._c_expression import pyboost_max_unpool2d_ext
from mindspore._c_expression import pyboost_group_norm_grad
from mindspore._c_expression import pyboost_cummin_ext
from mindspore._c_expression import pyboost_as_strided
from mindspore._c_expression import pyboost_reflection_pad_3d
from mindspore._c_expression import pyboost_asin_ext
from mindspore._c_expression import pyboost_ne_scalar
from mindspore._c_expression import pyboost_masked_select
from mindspore._c_expression import pyboost_dist_comm_all_to_all_v
from mindspore._c_expression import pyboost_binary_cross_entropy_with_logits
from mindspore._c_expression import pyboost_rotary_position_embedding
from mindspore._c_expression import pyboost_searchsorted
from mindspore._c_expression import pyboost_avg_pool1d
from mindspore._c_expression import pyboost_inplace_divs
from mindspore._c_expression import pyboost_inplace_stop_gradient
from mindspore._c_expression import pyboost_glu_grad
from mindspore._c_expression import pyboost_bernoulli_ext
from mindspore._c_expression import pyboost_inplace_scatter_value_reduce
from mindspore._c_expression import pyboost_dist_comm_reduce_scatter
from mindspore._c_expression import pyboost_median_dim
from mindspore._c_expression import pyboost_flash_attention_score
from mindspore._c_expression import pyboost_add_rms_norm
from mindspore._c_expression import pyboost_sinc
from mindspore._c_expression import pyboost_hardtanh_grad
from mindspore._c_expression import pyboost_max_pool_grad_with_mask
from mindspore._c_expression import pyboost_narrow
from mindspore._c_expression import pyboost_gmm_backward
from mindspore._c_expression import pyboost_erfc
from mindspore._c_expression import pyboost_topk_ext
from mindspore._c_expression import pyboost_bitwise_xor_scalar
from mindspore._c_expression import pyboost_dropout_do_mask_ext
from mindspore._c_expression import pyboost_upsample_trilinear3d
from mindspore._c_expression import pyboost_softshrink
from mindspore._c_expression import pyboost_lerp_scalar
from mindspore._c_expression import pyboost_avg_pool2d
from mindspore._c_expression import pyboost_add_ext
from mindspore._c_expression import pyboost_trunc
from mindspore._c_expression import pyboost_remainder_scalar_tensor
from mindspore._c_expression import pyboost_std
from mindspore._c_expression import pyboost_inplace_addmm
from mindspore._c_expression import pyboost_dropout_grad_ext
from mindspore._c_expression import pyboost_baddbmm
from mindspore._c_expression import pyboost_nllloss_2d_grad
from mindspore._c_expression import pyboost_mish_ext
from mindspore._c_expression import pyboost_binary_cross_entropy
from mindspore._c_expression import pyboost_reduce_any
from mindspore._c_expression import pyboost_reverse_v2
from mindspore._c_expression import pyboost_sign
from mindspore._c_expression import pyboost_acos_ext
from mindspore._c_expression import pyboost_inplace_muls
from mindspore._c_expression import pyboost_dist_comm_reduce
from mindspore._c_expression import pyboost_normal_tensor_tensor
from mindspore._c_expression import pyboost_upsample_nearest2d_grad
from mindspore._c_expression import pyboost_batch_norm_elemt
from mindspore._c_expression import pyboost_nllloss_2d
from mindspore._c_expression import pyboost_clamp_scalar
from mindspore._c_expression import pyboost_divs
from mindspore._c_expression import pyboost_hsigmoid
from mindspore._c_expression import pyboost_bitwise_not
from mindspore._c_expression import pyboost_threshold_grad
from mindspore._c_expression import pyboost_split_tensor
from mindspore._c_expression import pyboost_stack_ext
from mindspore._c_expression import pyboost_randn
from mindspore._c_expression import pyboost_repeat_interleave_int
from mindspore._c_expression import pyboost_replication_pad_3d_grad
from mindspore._c_expression import pyboost_triangular_solve
from mindspore._c_expression import pyboost_silu
from mindspore._c_expression import pyboost_argmin_ext
from mindspore._c_expression import pyboost_scatter
from mindspore._c_expression import pyboost_upsample_linear1d_grad
from mindspore._c_expression import pyboost_dist_comm_gather
from mindspore._c_expression import pyboost_neg
from mindspore._c_expression import pyboost_sigmoid_grad
from mindspore._c_expression import pyboost_conv1d_ext
from mindspore._c_expression import pyboost_embedding
from mindspore._c_expression import pyboost_flash_attention_score_grad
from mindspore._c_expression import pyboost_mse_loss_ext
from mindspore._c_expression import pyboost_logsigmoid
from mindspore._c_expression import pyboost_split_with_size
from mindspore._c_expression import pyboost_triu
from mindspore._c_expression import pyboost_normal_float_float
from mindspore._c_expression import pyboost_multi_scale_deformable_attn
from mindspore._c_expression import pyboost_mish_grad_ext
from mindspore._c_expression import pyboost_reflection_pad_1d
from mindspore._c_expression import pyboost_all_gather_matmul
from mindspore._c_expression import pyboost_sub_scalar
from mindspore._c_expression import pyboost_adaptive_avg_pool1d
from mindspore._c_expression import pyboost_equal_ext
from mindspore._c_expression import pyboost_convolution_str
from mindspore._c_expression import pyboost_log_softmax_ext
from mindspore._c_expression import pyboost_inplace_floor_divides
from mindspore._c_expression import pyboost_inner_comm_isend
from mindspore._c_expression import pyboost_inplace_fill_diagonal
from mindspore._c_expression import pyboost_leaky_relu_ext
from mindspore._c_expression import pyboost_hshrink
from mindspore._c_expression import pyboost_swiglu_grad
from mindspore._c_expression import pyboost_diag_ext
from mindspore._c_expression import pyboost_inner_comm_all_to_all_v
from mindspore._c_expression import pyboost_logsigmoid_grad
from mindspore._c_expression import pyboost_expm1
from mindspore._c_expression import pyboost_swiglu
from mindspore._c_expression import pyboost_round
from mindspore._c_expression import pyboost_reflection_pad_2d_grad
from mindspore._c_expression import pyboost_tile
from mindspore._c_expression import pyboost_erfinv
from mindspore._c_expression import pyboost_tanh
from mindspore._c_expression import pyboost_addmv
from mindspore._c_expression import pyboost_logsumexp
from mindspore._c_expression import pyboost_unique2
from mindspore._c_expression import pyboost_log2
from mindspore._c_expression import pyboost_squeeze
from mindspore._c_expression import pyboost_binary_cross_entropy_grad
from mindspore._c_expression import pyboost_unstack_ext
from mindspore._c_expression import pyboost_less_equal
from mindspore._c_expression import pyboost_dist_comm_all_to_all_v_single
from mindspore._c_expression import pyboost_dist_comm_batch_isend_irecv
from mindspore._c_expression import pyboost_gather_d
from mindspore._c_expression import pyboost_div
from mindspore._c_expression import pyboost_add_scalar
from mindspore._c_expression import pyboost_speed_fusion_attention
from mindspore._c_expression import pyboost_argmax_with_value
from mindspore._c_expression import pyboost_softmax_backward
from mindspore._c_expression import pyboost_full_like
from mindspore._c_expression import pyboost_relu
from mindspore._c_expression import pyboost_inner_comm_reduce_scatter
from mindspore._c_expression import pyboost_xlogy_scalar_other
from mindspore._c_expression import pyboost_sub
from mindspore._c_expression import pyboost_chunk
from mindspore._c_expression import pyboost_floor
from mindspore._c_expression import pyboost_reflection_pad_2d
from mindspore._c_expression import pyboost_silent_check_v2
from mindspore._c_expression import pyboost_greater_equal
from mindspore._c_expression import pyboost_var_mean
from mindspore._c_expression import pyboost_isneginf
from mindspore._c_expression import pyboost_dist_comm_irecv
from mindspore._c_expression import pyboost_mm_ext
from mindspore._c_expression import pyboost_conv2d_ext
from mindspore._c_expression import pyboost_sin
from mindspore._c_expression import pyboost_multi_scale_deformable_attn_grad
from mindspore._c_expression import pyboost_inplace_random
from mindspore._c_expression import pyboost_non_zero
from mindspore._c_expression import pyboost_inplace_zero
from mindspore._c_expression import pyboost_relu_grad
from mindspore._c_expression import pyboost_batch_norm_gather_stats_with_counts
from mindspore._c_expression import pyboost_hardtanh
from mindspore._c_expression import pyboost_inplace_divmods
from mindspore._c_expression import pyboost_sigmoid
from mindspore._c_expression import pyboost_isclose
from mindspore._c_expression import pyboost_dist_comm_all_reduce
from mindspore._c_expression import pyboost_rms_norm
from mindspore._c_expression import pyboost_grid_sampler_3d
from mindspore._c_expression import pyboost_inner_strided_slice
from mindspore._c_expression import pyboost_inplace_put
from mindspore._c_expression import pyboost_nllloss
from mindspore._c_expression import pyboost_t_ext
from mindspore._c_expression import pyboost_bitwise_or_tensor
from mindspore._c_expression import pyboost_new_zeros
from mindspore._c_expression import pyboost_upsample_nearest1d
from mindspore._c_expression import pyboost_matmul_reduce_scatter
from mindspore._c_expression import pyboost_inplace_add_ext
from mindspore._c_expression import pyboost_softmax
from mindspore._c_expression import pyboost_upsample_bicubic2d
from mindspore._c_expression import pyboost_inplace_uniform
from mindspore._c_expression import pyboost_inner_index
from mindspore._c_expression import pyboost_col2im_grad
from mindspore._c_expression import pyboost_adaptive_max_pool1d
from mindspore._c_expression import pyboost_clamp_tensor
from mindspore._c_expression import pyboost_tril_ext
from mindspore._c_expression import pyboost_isinf
from mindspore._c_expression import pyboost_normal_float_tensor
from mindspore._c_expression import pyboost_bmm_ext
from mindspore._c_expression import pyboost_incre_flash_attention
from mindspore._c_expression import pyboost_inplace_masked_fill_scalar
from mindspore._c_expression import pyboost_replication_pad_1d_grad
from mindspore._c_expression import pyboost_reciprocal
from mindspore._c_expression import pyboost_non_zero_ext
from mindspore._c_expression import pyboost_grid_sampler_2d
from mindspore._c_expression import pyboost_expand_as
from mindspore._c_expression import pyboost_inplace_sub_scalar
from mindspore._c_expression import pyboost_repeat_interleave_tensor
from mindspore._c_expression import pyboost_inplace_sub_ext
from mindspore._c_expression import pyboost_upsample_nearest2d
from mindspore._c_expression import pyboost_ones
from mindspore._c_expression import pyboost_batch_norm_stats
from mindspore._c_expression import pyboost_trace_ext
from mindspore._c_expression import pyboost_inplace_threshold
from mindspore._c_expression import pyboost_grid_sampler_2d_grad
from mindspore._c_expression import pyboost_conv_transpose2d
from mindspore._c_expression import pyboost_norm
from mindspore._c_expression import pyboost_muls
from mindspore._c_expression import pyboost_dot
from mindspore._c_expression import pyboost_hsigmoid_grad
from mindspore._c_expression import pyboost_remainder_tensor_tensor
from mindspore._c_expression import pyboost_copy
from mindspore._c_expression import pyboost_index_fill_tensor
from mindspore._c_expression import pyboost_inner_comm_all_gather
from mindspore._c_expression import pyboost_sum_ext
from mindspore._c_expression import pyboost_nllloss_grad
from mindspore._c_expression import pyboost_inplace_clamp_scalar
from mindspore._c_expression import pyboost_hshrink_grad
from mindspore._c_expression import pyboost_dist_comm_reduce_scatter_tensor
from mindspore._c_expression import pyboost_adaptive_avg_pool3d_grad_ext
from mindspore._c_expression import pyboost_inplace_adds_ext
from mindspore._c_expression import pyboost_linalg_vector_norm
from mindspore._c_expression import pyboost_lerp
from mindspore._c_expression import pyboost_nansum
from mindspore._c_expression import pyboost_inplace_floor_divide
from mindspore._c_expression import pyboost_upsample_nearest3d_grad
from mindspore._c_expression import pyboost_batch_mat_mul
from mindspore._c_expression import pyboost_upsample_bicubic2d_grad
from mindspore._c_expression import pyboost_one_hot_ext
from mindspore._c_expression import pyboost_upsample_bilinear2d_grad
from mindspore._c_expression import pyboost_dense
from mindspore._c_expression import pyboost_zeros_like_ext
from mindspore._c_expression import pyboost_constant_pad_nd
from mindspore._c_expression import pyboost_arange
from mindspore._c_expression import pyboost_gmm_v2_backward
from mindspore._c_expression import pyboost_glu
from mindspore._c_expression import pyboost_transpose_ext
from mindspore._c_expression import pyboost_max_pool_with_mask
from mindspore._c_expression import pyboost_clone
from mindspore._c_expression import pyboost_min_dim
from mindspore._c_expression import pyboost_select
from mindspore._c_expression import pyboost_inplace_scatter_src_reduce
from mindspore._c_expression import pyboost_gelu
from mindspore._c_expression import pyboost_outer
from mindspore._c_expression import pyboost_replication_pad_2d_grad
from mindspore._c_expression import pyboost_scatter_add_ext
from mindspore._c_expression import pyboost_masked_fill
from mindspore._c_expression import pyboost_rotary_position_embedding_grad
from mindspore._c_expression import pyboost_soft_margin_loss_grad
from mindspore._c_expression import pyboost_prompt_flash_attention
from mindspore._c_expression import pyboost_take
from mindspore._c_expression import pyboost_kl_div
from mindspore._c_expression import pyboost_repeat_interleave_grad
from mindspore._c_expression import pyboost_addmm
from mindspore._c_expression import pyboost_dist_comm_scatter_tensor
from mindspore._c_expression import pyboost_argmax_ext
from mindspore._c_expression import pyboost_grouped_matmul
from mindspore._c_expression import pyboost_quant_batch_matmul
from mindspore._c_expression import pyboost_grouped_matmul_v4
from mindspore._c_expression import pyboost_quant_v2
from mindspore._c_expression import pyboost_grouped_matmul_v2
from mindspore._c_expression import pyboost_add_rmsnorm_quant_v2
from mindspore._c_expression import pyboost_weight_quant_batch_matmul
from mindspore._c_expression import pyboost_matmul_allreduce_add_rmsnorm
from mindspore._c_expression import pyboost_dynamic_quant_ext
from mindspore._c_expression import pyboost_moe_init_routing
from mindspore._c_expression import pyboost_moe_gating_top_k_softmax
from mindspore._c_expression import pyboost_moe_finalize_routing
from mindspore._c_expression import pyboost_kv_cache_scatter_update
from mindspore._c_expression import pyboost_moe_compute_expert_tokens
from mindspore._c_expression import pyboost_fused_infer_attention_score
class SelectV2(Primitive):
r"""
.. code-block::
prim = ops.SelectV2()
out = prim(condition, input, other)
is equivalent to
.. code-block::
ops.select_v2(condition, input, other)
Refer to :func:`mindspore.ops.select_v2` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, condition, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_select_v2(self, [condition, input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, condition, input, other)
return res
select_v2_op=SelectV2()
[文档]class Diag(Primitive):
r"""
.. code-block::
prim = ops.Diag()
out = prim(input)
is equivalent to
.. code-block::
ops.diag(input)
Refer to :func:`mindspore.ops.diag` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
diag_op=Diag()
class AtanExt(Primitive):
r"""
.. code-block::
prim = ops.AtanExt()
out = prim(input)
is equivalent to
.. code-block::
ops.atan_ext(input)
Refer to :func:`mindspore.ops.atan_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_atan_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
atan_ext_op=AtanExt()
[文档]class ArgMinWithValue(Primitive):
r"""
Calculates the minimum value along with the given axis for the input tensor, and returns the minimum values and
indices.
Note:
In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
.. warning::
- If there are multiple minimum values, the index of the first minimum value is used.
- The value range of `axis` is [-dims, dims - 1]. "dims" is the dimension length of `input`.
Also see :func:`mindspore.ops.min`.
Args:
axis (int): The dimension to reduce. Default: ``0`` .
keep_dims (bool): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
input, the output will reduce dimension if ``false`` . Default: ``False`` .
Inputs:
- **input** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
:math:`(input_1, input_2, ..., input_N)` .Complex tensor is not supported.
Outputs:
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
tensor.
- **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int64. If `keep_dims`
is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1}, ..., input_N)`.
Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1}, ..., input_N)` .
- **values** (Tensor) - The minimum value of input tensor, with the same
shape as `index`, and same dtype as `input`.
Raises:
TypeError: If `input` is not Tensor.
TypeError: If `keep_dims` is not a bool.
TypeError: If `axis` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
>>> index, output = ops.ArgMinWithValue()(x)
>>> print(index, output)
0 0.0
>>> index, output = ops.ArgMinWithValue(keep_dims=True)(x)
>>> print(index, output)
[0] [0.0]
"""
@prim_arg_register
def __init__(self, axis=0, keep_dims=False):
self._set_prim_arg("axis", axis)
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_argmin_with_value(self, [input, self.axis, self.keep_dims]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.axis, self.keep_dims)
return res
class AdaptiveAvgPool2DExt(Primitive):
r"""
Performs 2D adaptive average pooling on a multi-plane input signal.
That is, for any input size, the size of the specified output is H x W.
The number of output features is equal to the number of input features.
The input and output data format can be "NCHW" and "CHW". N is the batch size, C is the number of channels,
H is the feature height, and W is the feature width.
For adaptive average pooling for 2D:
.. math::
\begin{align}
h_{start} &= floor(i * H_{in} / H_{out})\\
h_{end} &= ceil((i + 1) * H_{in} / H_{out})\\
w_{start} &= floor(j * W_{in} / W_{out})\\
w_{end} &= ceil((j + 1) * W_{in} / W_{out})\\
Output(i,j) &= \frac{\sum Input[h_{start}:h_{end}, w_{start}:w_{end}]}{(h_{end}- h_{start})
* (w_{end}- w_{start})}
\end{align}
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input of adaptive_avg_pool2d, which is a 3D or 4D tensor,
with float16 or float32 data type.
output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(H, W)`,
or an int H for :math:`(H, H)`. :math:`H` and :math:`W` can be int or None.
If it is None, it means the output size is the same as the input size.
Returns:
Tensor, with the same type as the `input`.
Shape of the output is `input_shape[:len(input_shape) - len(out_shape)] + out_shape`.
.. math::
out\_shape = \begin{cases}
input\_shape[-2] + output\_size[1], & \text{if } output\_size text{ is (None, w);}\\
output\_size[0] + input\_shape[-1], & \text{if } output\_size text{ is (h, None);}\\
input\_shape[-2:], & \text{if } output\_size text{ is (None, None);}\\
(h, h), & \text{if } output\_size text{ is h;}\\
(h, w), & \text{if } output\_size text{ is (h, w)}
\end{cases}
Raises:
ValueError: If `output_size` is a tuple and the length of `output_size` is not 2.
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` is not float16, float32 or float64.
ValueError: If the dimension of `input` is less than or equal to the dimension of `output_size`.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, mint
>>> # case 1: output_size=(None, 2)
>>> input = Tensor(np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]), mindspore.float32)
>>> output = mint.nn.functional.adaptive_avg_pool2d(input, (None, 2))
>>> print(output)
[[[1.5 2.5]
[4.5 5.5]
[7.5 8.5]]
[[1.5 2.5]
[4.5 5.5]
[7.5 8.5]]
[[1.5 2.5]
[4.5 5.5]
[7.5 8.5]]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, output_size):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_adaptive_avg_pool2d_ext(self, [input, to_2d_paddings('AdaptiveAvgPool2DExt', 'output_size', output_size)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, to_2d_paddings('AdaptiveAvgPool2DExt', 'output_size', output_size))
return res
adaptive_avg_pool2d_ext_op=AdaptiveAvgPool2DExt()
class TraceV2(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('offset', default=0),
sig.make_sig('axis1', default=1),
sig.make_sig('axis2', default=0),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, offset=0, axis1=1, axis2=0, dtype=None):
return super().__call__(input, offset, axis1, axis2, dtype if dtype is None else dtype_to_type_id('TraceV2', 'dtype', dtype))
trace_v2_op=TraceV2()
[文档]class LogSoftmax(Primitive):
r"""
.. code-block::
prim = ops.LogSoftmax(axis)
out = prim(logits)
is equivalent to
.. code-block::
ops.log_softmax(logits, axis)
Refer to :func:`mindspore.ops.log_softmax` for more details.
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", axis)
def __call__(self, logits):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_log_softmax(self, [logits, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, logits, self.axis)
return res
class EmbeddingFeatureMappingExport(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingFeatureMappingExport()
out = prim(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id)
is equivalent to
.. code-block::
ops.embedding_feature_mapping_export(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id)
Refer to :func:`mindspore.ops.embedding_feature_mapping_export` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id):
return super().__call__(file_path, table_name, global_step, values, embedding_dim, feature_id, offset_id)
embedding_feature_mapping_export_op=EmbeddingFeatureMappingExport()
class NPUGetFloatStatusV2(Primitive):
r"""
Get the flag for storage overflow status. This flag is located in a register at a
fixed address on the `Ascend` device, and overflow information is automatically
written to this register.
The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
If the value of flag is zero, no overflow has occurred, otherwise, overflow.
When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
status after the network execution is completed.
Note:
- In order to avoid mis-optimization by the compiler, additional input is added to
this operator. The input is defined as a shape of: math:`(8,)` and data type of
`mindspore.dtype.int32` Tensor, meaningless.
- Since this op lacks contextual dependencies with parameters in the network,
:class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
Inputs:
- **input** Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
data type is `mindspore.dtype.int32`, and has no actual meaning.
Usually use the output of `NPUClearFloatStatusV2`.
Outputs:
- **output** Tensor, shape and data type are the same as input. If all are zero, it means no overflow, otherwise, overflow.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import ops, nn, Tensor
>>> from mindspore.ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
>>> class Net(nn.Cell):
... def __init__(self):
... super().__init__()
... self.clear_status = NPUClearFloatStatusV2()
... self.get_status = NPUGetFloatStatusV2()
... self.sub = ops.Sub()
... self.neg = ops.Neg()
... self.equal = ops.Equal()
... self.reduce_all = ops.ReduceAll(keep_dims=False)
... self.base = Tensor([0], dtype=ms.int32)
... self.logic_not = ops.LogicalNot()
...
... def construct(self, x):
... init = Tensor([0]*8, dtype=ms.int32)
... clear_status = self.clear_status(init)
... x = ops.depend(x, clear_status)
... res = self.sub(x, self.neg(x))
... init = ops.depend(init, res)
... get_status = self.get_status(init)
... flag = self.equal(self.base, get_status)
... overall_finite = self.reduce_all(flag)
... overflow = self.logic_not(overall_finite)
... return overflow
...
>>> value = 65504
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
True
>>> value = 10
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
False
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
npu_get_float_status_v2_op=NPUGetFloatStatusV2()
class EmbeddingDenseBackward(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('grad'),
sig.make_sig('indices'),
sig.make_sig('num_weights'),
sig.make_sig('padding_idx', default=None),
sig.make_sig('scale_grad_by_freq', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, indices, num_weights, padding_idx=None, scale_grad_by_freq=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_embedding_dense_backward(self, [grad, indices, num_weights, padding_idx, scale_grad_by_freq]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad, indices, num_weights, padding_idx, scale_grad_by_freq)
return res
embedding_dense_backward_op=EmbeddingDenseBackward()
[文档]class AssignAdd(Primitive):
r"""
.. code-block::
prim = ops.AssignAdd()
out = prim(variable, value)
is equivalent to
.. code-block::
ops.assign_add(variable, value)
Refer to :func:`mindspore.ops.assign_add` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('value', dtype=sig.sig_dtype.T),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, variable, value):
return super().__call__(variable, value)
assign_add_op=AssignAdd()
class EmbeddingApplySgd(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingApplySgd()
out = prim(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
is equivalent to
.. code-block::
ops.embedding_apply_sgd(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
Refer to :func:`mindspore.ops.embedding_apply_sgd` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('var_handle'),
sig.make_sig('lr'),
sig.make_sig('grad'),
sig.make_sig('keys'),
sig.make_sig('global_step'),
sig.make_sig('embedding_dim'),
sig.make_sig('mask_zero', default=(0,)),
sig.make_sig('padding_key', default=(0,)),
sig.make_sig('padding_key_mask', default=(1,)),
sig.make_sig('completion_key', default=(0,)),
sig.make_sig('completion_key_mask', default=(1,)),
sig.make_sig('_embedding_dim', default=1),
sig.make_sig('_max_key_num', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("_process_node_engine_id", 'PS')
def __call__(self, var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
return super().__call__(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
embedding_apply_sgd_op=EmbeddingApplySgd()
class IRFFTN(Primitive):
r"""
.. code-block::
prim = ops.IRFFTN()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.irfftn(input, s, dim, norm)
Refer to :func:`mindspore.ops.irfftn` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=None, norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IRFFTN', 'norm', norm))
irfftn_op=IRFFTN()
class Max(Primitive):
r"""
.. code-block::
prim = ops.Max()
out = prim(input)
is equivalent to
.. code-block::
ops.max_(input)
Refer to :func:`mindspore.ops.max_` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_max(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
max_op=Max()
class EluExt(Primitive):
r"""
.. code-block::
prim = ops.EluExt(alpha)
out = prim(input)
is equivalent to
.. code-block::
ops.elu_ext(input, alpha)
Refer to :func:`mindspore.ops.elu_ext` for more details.
"""
@prim_arg_register
def __init__(self, alpha=1.0):
self._set_prim_arg("alpha", alpha)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_elu_ext(self, [input, self.alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.alpha)
return res
class FastGeLUGrad(Primitive):
r"""
Gradients of FastGeLU operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x):
return super().__call__(dy, x)
fast_gelu_grad_op=FastGeLUGrad()
[文档]class Asinh(Primitive):
r"""
.. code-block::
prim = ops.Asinh()
out = prim(input)
is equivalent to
.. code-block::
ops.asinh(input)
Refer to :func:`mindspore.ops.asinh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
asinh_op=Asinh()
class PowTensorScalar(Primitive):
r"""
Calculates the `exponent` power of each element in `input`.
When `exponent` is a Tensor, the shapes of `input` and `exponent` must be broadcastable.
.. math::
out_{i} = input_{i} ^{ exponent_{i}}
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The first input is a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
exponent (Number): The second input is a Number.
Returns:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> exponent = 3.0
>>> output = ops.pow_ext(input, exponent)
>>> print(output)
[ 1. 8. 64.]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, exponent):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_pow_tensor_scalar(self, [input, exponent]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, exponent)
return res
pow_tensor_scalar_op=PowTensorScalar()
[文档]class Exp(Primitive):
r"""
.. code-block::
prim = ops.Exp()
out = prim(input)
is equivalent to
.. code-block::
ops.exp(input)
Refer to :func:`mindspore.ops.exp` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_exp(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
exp_op=Exp()
class MatMulExt(Primitive):
r"""
.. code-block::
prim = ops.MatMulExt()
out = prim(input, other)
is equivalent to
.. code-block::
ops.matmul_ext(input, other)
Refer to :func:`mindspore.ops.matmul_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_matmul_ext(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
matmul_ext_op=MatMulExt()
class TensorScatterElements(Primitive):
r"""
.. code-block::
prim = ops.TensorScatterElements(axis, reduce)
out = prim(data, indices, updates)
is equivalent to
.. code-block::
ops.tensor_scatter_elements(data, indices, updates, axis, reduce)
Refer to :func:`mindspore.ops.tensor_scatter_elements` for more details.
"""
@prim_arg_register
def __init__(self, axis=0, reduce='none'):
self._set_prim_arg("axis", type_it('TensorScatterElements', 'axis', axis, OpDtype.DT_TENSOR, OpDtype.DT_INT))
self._set_prim_arg_with_handler("reduce", reduce, str_to_enum)
def __call__(self, data, indices, updates):
return super().__call__(data, indices, updates, self.axis, self.reduce)
class MoeTokenUnpermute(Primitive):
r"""
.. code-block::
prim = ops.MoeTokenUnpermute()
out = prim(permuted_tokens, sorted_indices, probs, padded_mode, restore_shape)
is equivalent to
.. code-block::
ops.moe_token_unpermute(permuted_tokens, sorted_indices, probs, padded_mode, restore_shape)
Refer to :func:`mindspore.ops.moe_token_unpermute` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('permuted_tokens'),
sig.make_sig('sorted_indices'),
sig.make_sig('probs', default=None),
sig.make_sig('padded_mode', default=False),
sig.make_sig('restore_shape', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, permuted_tokens, sorted_indices, probs=None, padded_mode=False, restore_shape=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_moe_token_unpermute(self, [permuted_tokens, sorted_indices, probs, padded_mode, restore_shape]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, permuted_tokens, sorted_indices, probs, padded_mode, restore_shape)
return res
moe_token_unpermute_op=MoeTokenUnpermute()
class LstsqV2(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('A'),
sig.make_sig('B'),
sig.make_sig('driver', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, A, B, driver=None):
return super().__call__(A, B, driver if driver is None else str_to_enum('LstsqV2', 'driver', driver))
lstsq_v2_op=LstsqV2()
class ConvolutionGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default=0),
sig.make_sig('dilation', default=(1, 1)),
sig.make_sig('transposed', default=False),
sig.make_sig('output_padding', default=0),
sig.make_sig('groups', default=1),
sig.make_sig('output_mask', default=()),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, input, weight, bias=None, stride=1, padding=0, dilation=(1, 1), transposed=False, output_padding=0, groups=1, output_mask=()):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_convolution_grad(self, [dout, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, output_mask]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, output_mask)
return res
convolution_grad_op=ConvolutionGrad()
class SoftplusExt(Primitive):
r"""
.. code-block::
prim = ops.SoftplusExt()
out = prim(input, beta, threshold)
is equivalent to
.. code-block::
ops.softplus_ext(input, beta, threshold)
Refer to :func:`mindspore.ops.softplus_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('beta', default=1),
sig.make_sig('threshold', default=20),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, beta=1, threshold=20):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_softplus_ext(self, [input, beta, threshold]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, beta, threshold)
return res
softplus_ext_op=SoftplusExt()
class EmbeddingFeatureMappingImport(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingFeatureMappingImport()
out = prim(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag, num)
is equivalent to
.. code-block::
ops.embedding_feature_mapping_import(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag, num)
Refer to :func:`mindspore.ops.embedding_feature_mapping_import` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('file_path'),
sig.make_sig('teble_name'),
sig.make_sig('feature_size'),
sig.make_sig('global_step'),
sig.make_sig('embedding_dim'),
sig.make_sig('only_offset_flag', default=True),
sig.make_sig('num', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag=True, num=1):
return super().__call__(file_path, teble_name, feature_size, global_step, embedding_dim, only_offset_flag, num)
embedding_feature_mapping_import_op=EmbeddingFeatureMappingImport()
class RmsNormGrad(Primitive):
r"""
Calculates the gradient of RmsNorm operation.
.. warning::
This is an experimental API that is subject to change or deletion.
Inputs:
- **dy** (Tensor) - The grad of previous operator, support data type: float16, float32, bfloat16.
- **x** (Tensor) - Input data of RmsNorm, support data type: float16, float32, bfloat16.
- **rstd** (Tensor) - The second output of RmsNorm, support data type: float16, float32, bfloat16.
- **gamma** (Tensor) - Support data type: float16, float32, bfloat16.
Returns:
- **dx** (Tensor) - Has the same type and shape as `dy`.
- **dgamma** (Tensor) - A float32 Tensor with the same shape as `gamma`.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x, rstd, gamma):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_rms_norm_grad(self, [dy, x, rstd, gamma]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, x, rstd, gamma)
return res
rms_norm_grad_op=RmsNormGrad()
class RFFTN(Primitive):
r"""
.. code-block::
prim = ops.RFFTN()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.rfftn(input, s, dim, norm)
Refer to :func:`mindspore.ops.rfftn` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=None, norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('RFFTN', 'norm', norm))
rfftn_op=RFFTN()
[文档]class ReduceMax(Primitive):
r"""
Reduces a dimension of a tensor by the maximum value in this dimension, by default. And also can
reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Must be in the range [-r, r).
Outputs:
output(Tensor): has the same dtype as the `x`.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the maximum of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
- If `axis` is tuple(int) or list(int), set as (1, 2), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> output = ops.ReduceMax(keep_dims=True)(x, 1)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by the maximum value of all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = ops.ReduceMax(keep_dims=True)(x, ())
>>> print(output)
[[[9.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = ops.ReduceMax(keep_dims=True)(x, 0)
>>> print(output)
[[[7. 7. 7. 7. 7. 7.]
[8. 8. 8. 8. 8. 8.]
[9. 9. 9. 9. 9. 9.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = ops.ReduceMax(keep_dims=True)(x, 1)
>>> print(output)
[[[3. 3. 3. 3. 3. 3.]]
[[6. 6. 6. 6. 6. 6.]]
[[9. 9. 9. 9. 9. 9.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = ops.ReduceMax(keep_dims=True)(x, 2)
>>> print(output)
[[[1.]
[2.]
[3.]]
[[4.]
[5.]
[6.]]
[[7.]
[8.]
[9.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reduce_max(self, [x, axis, self.keep_dims]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, axis, self.keep_dims)
return res
class Diagonal(Primitive):
r"""
.. code-block::
prim = ops.Diagonal(offset, dim1, dim2)
out = prim(input)
is equivalent to
.. code-block::
ops.diagonal(input, offset, dim1, dim2)
Refer to :func:`mindspore.ops.diagonal` for more details.
"""
@prim_arg_register
def __init__(self, offset=0, dim1=0, dim2=1):
self._set_prim_arg("offset", offset)
self._set_prim_arg("dim1", dim1)
self._set_prim_arg("dim2", dim2)
def __call__(self, input):
return super().__call__(input, self.offset, self.dim1, self.dim2)
class ExtractImagePatches(Primitive):
r"""
.. code-block::
prim = ops.ExtractImagePatches(ksizes, strides, rates, padding)
out = prim(input_x)
is equivalent to
.. code-block::
ops.extract_image_patches(input_x, ksizes, strides, rates, padding)
Refer to :func:`mindspore.ops.extract_image_patches` for more details.
"""
@prim_arg_register
def __init__(self, ksizes, strides, rates, padding='VALID'):
self._set_prim_arg_with_handler("ksizes", type_it('ExtractImagePatches', 'ksizes', ksizes, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT), to_kernel_size)
self._set_prim_arg_with_handler("strides", type_it('ExtractImagePatches', 'strides', strides, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT), to_strides)
self._set_prim_arg_with_handler("rates", type_it('ExtractImagePatches', 'rates', rates, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT), to_rates)
self._set_prim_arg_with_handler("padding", padding, str_to_enum)
def __call__(self, input_x):
return super().__call__(input_x, self.ksizes, self.strides, self.rates, self.padding)
class DistCommBarrier(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_barrier(self, [group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, group)
return res
dist_comm_barrier_op=DistCommBarrier()
class IndexFillScalar(Primitive):
r"""
.. code-block::
prim = ops.IndexFillScalar()
out = prim(input, dim, index, value)
is equivalent to
.. code-block::
ops.index_fill_scalar(input, dim, index, value)
Refer to :func:`mindspore.ops.index_fill_scalar` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, index, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_index_fill_scalar(self, [input, dim, index, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, value)
return res
index_fill_scalar_op=IndexFillScalar()
class AtanGrad(Primitive):
r"""
Computes AtanGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dout):
return super().__call__(x, dout)
atan_grad_op=AtanGrad()
[文档]class Eye(Primitive):
r"""
Creates a tensor with ones on the diagonal and zeros in the rest.
Refer to :func:`mindspore.ops.eye` for more details.
Note:
The data type of returned tensor can be float16, float32, int8, int16, int32, int64, uint8 or bool on Ascend platforms.
Inputs:
- **n** (int) - The number of rows of returned tensor. Constant value only.
- **m** (int) - The number of columns of returned tensor. Constant value only.
- **t** (mindspore.dtype) - MindSpore's dtype, the data type of the returned tensor.
Default: ``None`` , the data type of the returned tensor is mindspore.float32.
Outputs:
Tensor, a tensor with ones on the diagonal and the rest of elements are zero. The shape of `output` depends on
the user's Inputs `n` and `m`. And the data type depends on Inputs `t`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import ops
>>> eye = ops.Eye()
>>> output = eye(2, 2, mindspore.int32)
>>> print(output)
[[1 0]
[0 1]]
>>> print(output.dtype)
Int32
>>> output = eye(1, 2, mindspore.float32)
>>> print(output)
[[1. 0.]]
>>> print(output.dtype)
Float32
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, n, m, dtype):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_eye(self, [n, m, dtype_to_type_id('Eye', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, n, m, dtype_to_type_id('Eye', 'dtype', dtype))
return res
eye_op=Eye()
class Min(Primitive):
r"""
.. code-block::
prim = ops.Min()
out = prim(input)
is equivalent to
.. code-block::
ops.min_(input)
Refer to :func:`mindspore.ops.min_` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_min(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
min_op=Min()
[文档]class NotEqual(Primitive):
r"""
.. code-block::
prim = ops.NotEqual()
out = prim(input, other)
is equivalent to
.. code-block::
ops.not_equal(input, other)
Refer to :func:`mindspore.ops.not_equal` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_not_equal(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
not_equal_op=NotEqual()
class GatherDGradV2(Primitive):
r"""
Computes gradient for the GatherD operation. Note that the operator "GatherDGrad" has been abandoned.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dim, index, dout):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_gather_d_grad_v2(self, [x, dim, index, dout]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, dim, index, dout)
return res
gather_d_grad_v2_op=GatherDGradV2()
class InplaceMaskedFillTensor(Primitive):
r"""
.. code-block::
prim = ops.InplaceMaskedFillTensor()
out = prim(input, mask, value)
is equivalent to
.. code-block::
ops.masked_fill_tensor_(input, mask, value)
Refer to :func:`mindspore.ops.masked_fill_tensor_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('mask'),
sig.make_sig('value'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, mask, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_masked_fill_tensor(self, [input, mask, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mask, value)
return res
inplace_masked_fill_tensor_op=InplaceMaskedFillTensor()
class BatchNormGradExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('running_mean', default=None),
sig.make_sig('running_var', default=None),
sig.make_sig('saved_mean', default=None),
sig.make_sig('saved_rstd', default=None),
sig.make_sig('output_mask', default=(1, 1, 1)),
)
@prim_arg_register
def __init__(self, training=False, eps=1e-5):
self._set_prim_arg("training", training)
self._set_prim_arg("eps", eps)
def __call__(self, dout, input, weight, running_mean=None, running_var=None, saved_mean=None, saved_rstd=None, output_mask=(1, 1, 1)):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_batch_norm_grad_ext(self, [dout, input, weight, running_mean, running_var, saved_mean, saved_rstd, output_mask, self.training, self.eps]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, input, weight, running_mean, running_var, saved_mean, saved_rstd, output_mask, self.training, self.eps)
return res
class ApplyCamePart3(Primitive):
r"""
Computes Part 3 of the CAME Optimizer.
Args:
- **u** (Parameter) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **m** (Parameter) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **eps** (float) - data type must be float.
- **beta1** (float) - data type must be float.
- **clip_threshold** (float) - data type must be float.
- **sum_square_u** (Tensor) - The shape = 1D :math:`(1)`.
A Tensor of types: float32.
- **global_shape** (Tensor) - the shape = 1D :math:`(2)`.
'None' is currently supported. A Tensor of types: int64.
- **use_first_moment** (bool).
Returns:
- **m** (Tensor) - A Tensor of shape :math:`(..., n, m)`
- **sum_u_r** (Tensor) - A Tensor of shape :math:`(..., n)`
- **sum_u_c** (Tensor) - A Tensor of of shape:math:`(..., m)`
- **sum_u_rc** (Tensor) - A Tensor of of shape:math:`(...)`
Raises:
TypeError: If `u` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _inner_ops as P
>>> apply_came_part3 = P.ApplyCamePart3()
>>> u = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> m = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> eps = 0.8
>>> beta1 = 0.5
>>> clip_threshold = 0.5
>>> sum_square_u = Tensor(np.array([128]), dtype=ms.float32)
>>> global_shape = (1024, 64)
>>> use_first_moment = False
>>> output = apply_came_part3(u, m, eps, beta1, clip_threshold, sum_square_u, global_shape, use_first_moment)
>>> print(output[0].shape)
(1024, 64)
"""
__mindspore_signature__ = (
sig.make_sig('u'),
sig.make_sig('m'),
sig.make_sig('eps'),
sig.make_sig('beta1'),
sig.make_sig('clip_threshold'),
sig.make_sig('sum_square_u'),
sig.make_sig('global_shape', default=None),
sig.make_sig('use_first_moment', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, u, m, eps, beta1, clip_threshold, sum_square_u, global_shape=None, use_first_moment=False):
return super().__call__(u, m, eps, beta1, clip_threshold, sum_square_u, global_shape, use_first_moment)
apply_came_part3_op=ApplyCamePart3()
class IFFT2(Primitive):
r"""
.. code-block::
prim = ops.IFFT2()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.ifft2(input, s, dim, norm)
Refer to :func:`mindspore.ops.ifft2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=(-2, -1)),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=(-2, -1), norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IFFT2', 'norm', norm))
ifft2_op=IFFT2()
class AddLayerNormGrad(Primitive):
r"""
.. code-block::
prim = ops.AddLayerNormGrad()
out = prim(dy, x1, x2, rstd, mean, gamma, dsumOptional)
is equivalent to
.. code-block::
ops.add_layer_norm_grad(dy, x1, x2, rstd, mean, gamma, dsumOptional)
Refer to :func:`mindspore.ops.add_layer_norm_grad` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x1, x2, rstd, mean, gamma, dsumOptional):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_add_layer_norm_grad(self, [dy, x1, x2, rstd, mean, gamma, dsumOptional]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, x1, x2, rstd, mean, gamma, dsumOptional)
return res
add_layer_norm_grad_op=AddLayerNormGrad()
[文档]class Atan2(Primitive):
r"""
.. code-block::
prim = ops.Atan2()
out = prim(input, other)
is equivalent to
.. code-block::
ops.atan2(input, other)
Refer to :func:`mindspore.ops.atan2` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return super().__call__(input, other)
atan2_op=Atan2()
class Roll(Primitive):
r"""
.. code-block::
prim = ops.Roll(shifts, dims)
out = prim(input)
is equivalent to
.. code-block::
ops.roll(input, shifts, dims)
Refer to :func:`mindspore.ops.roll` for more details.
"""
@prim_arg_register
def __init__(self, shifts, dims=None):
self._set_prim_arg("shifts", type_it('Roll', 'shifts', shifts, (OpDtype.DT_INT, OpDtype.DT_LIST_INT), OpDtype.DT_TUPLE_INT))
self._set_prim_arg("dims", type_it('Roll', 'dims', dims, (OpDtype.DT_INT, OpDtype.DT_LIST_INT), OpDtype.DT_TUPLE_INT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_roll(self, [input, self.shifts, self.dims]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.shifts, self.dims)
return res
class HSwishGrad(Primitive):
r"""
Gets the gradient of HSwish operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y_grad, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_hswish_grad(self, [y_grad, x]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, y_grad, x)
return res
hswish_grad_op=HSwishGrad()
class DropoutExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_hidden", True)
def __call__(self, input, p, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dropout_ext(self, [input, p, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, p, seed, offset)
return res
dropout_ext_op=DropoutExt()
[文档]class Gcd(Primitive):
r"""
.. code-block::
prim = ops.Gcd()
out = prim(input, other)
is equivalent to
.. code-block::
ops.gcd(input, other)
Refer to :func:`mindspore.ops.gcd` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_gcd(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
gcd_op=Gcd()
class InplaceNormal(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('mean'),
sig.make_sig('std'),
sig.make_sig('seed'),
sig.make_sig('offset'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, mean, std, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_normal(self, [input, mean, std, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mean, std, seed, offset)
return res
inplace_normal_op=InplaceNormal()
class CholeskyGrad(Primitive):
r"""
Computes the reverse mode backpropgated gradient of the Cholesky algorithm.
Args:
- **x** (Tensor) - A tensor with float32 or float64 data type.
- **grad** (Tensor) - A tensor with float32 or float64 data type. `grad` should have
the same dtype with `x`.
Outputs:
Tensor, has the same dtype as `a` and `x`.
Raises:
TypeError: If x is not Tensor.
TypeError: If grad is not Tensor.
TypeError: If dtype of input x and grad is not float64 nor float32,
TypeError: If x has different dtype with grad.
ValueError: If input tensor's last two dims are not equal,
ValueError: If the shape of x and grad mismatch.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, grad):
return super().__call__(x, grad)
cholesky_grad_op=CholeskyGrad()
[文档]class UpsampleNearest3D(Primitive):
r"""
Performs nearest neighbor upsampling operation.
This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest
neighbor algorithm.
One of `output_size` or `scales` must be given, and can not specified both at the same time.
Inputs:
- **x** (Tensor) - 5D tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`.
Supporting types: [float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size.
Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors.
Default: ``None``.
Outputs:
- **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is
:math:`(N, C, D_{out}, H_{out}, W_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not int [float16, float32, float64].
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 5D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 3 when `scales` is specified.
ValueError: If size of `output_size` is not equal 3 when `output_size` is specified.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
... .reshape([1, 1, 2, 2, 4]), mstype.float32)
>>> output_size = [3, 4, 5]
>>> net = ops.UpsampleNearest3D()
>>> output = net(x, output_size, None)
>>> print(output)
[[[[[ 1. 1. 2. 3. 4.]
[ 1. 1. 2. 3. 4.]
[ 5. 5. 6. 7. 8.]
[ 5. 5. 6. 7. 8.]]
[[ 1. 1. 2. 3. 4.]
[ 1. 1. 2. 3. 4.]
[ 5. 5. 6. 7. 8.]
[ 5. 5. 6. 7. 8.]]
[[ 9. 9. 10. 11. 12.]
[ 9. 9. 10. 11. 12.]
[13. 13. 14. 15. 16.]
[13. 13. 14. 15. 16.]]]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_nearest3d(self, [x, output_size, scales]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, output_size, scales)
return res
upsample_nearest3d_op=UpsampleNearest3D()
[文档]class LogicalXor(Primitive):
r"""
Computes the "logical XOR" of two tensors element-wise.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.logical_xor` for more details.
Inputs:
- **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly
converted to bool.
- **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
a tensor whose data type can be implicitly converted to bool.
Outputs:
Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> logical_xor = ops.LogicalXor()
>>> output = logical_xor(x, y)
>>> print(output)
[ False True True]
>>> x = Tensor(1, mindspore.bool_)
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalXor()(x, y)
>>> print(output)
True
>>> x = True
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalXor()(x, y)
>>> print(output)
True
>>> x = True
>>> y = Tensor(np.array([True, False]), mindspore.bool_)
>>> output = ops.LogicalXor()(x, y)
>>> print(output)
[False True]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_logical_xor(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
logical_xor_op=LogicalXor()
class RemainderTensorScalar(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_remainder_tensor_scalar(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
remainder_tensor_scalar_op=RemainderTensorScalar()
class DistCommBroadcast(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, src, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_broadcast(self, [input, src, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, src, group)
return res
dist_comm_broadcast_op=DistCommBroadcast()
class InplaceFillTensor(Primitive):
r"""
.. code-block::
prim = ops.InplaceFillTensor()
out = prim(input, value)
is equivalent to
.. code-block::
ops.inplace_fill_tensor(input, value)
Refer to :func:`mindspore.ops.inplace_fill_tensor` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('value'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_fill_tensor(self, [input, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, value)
return res
inplace_fill_tensor_op=InplaceFillTensor()
[文档]class UniqueConsecutive(Primitive):
r"""
Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.unique_consecutive` for more details.
Args:
return_inverse (bool, optional): Whether to return the index of where the element in the original input
maps to the position in the output. Default: ``False`` .
return_counts (bool, optional): Whether to return the counts of each unique element. Default: ``False`` .
dim (int, optional): The dimension to apply unique. If ``None`` , the unique of the flattened input is
returned. If specified, it must be int32 or int64. Default: ``None`` .
Inputs:
- **x** (Tensor) - The input tensor.
Outputs:
A tensor or a tuple of tensors containing tensor objects (`output`, `idx`, `counts`).
- `output` has the same type as `x` and is used to represent the output list of unique scalar elements.
- If `return_inverse` is True, there will be an additional returned tensor, `idx`,
which has the same shape as `x` and represents
the index of where the element in the original input maps to the position in the output.
- If `return_counts` is True, there will be an additional returned tensor, `counts`,
which represents the number of occurrences for each unique value or tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int32)
>>> unique_consecutive = ops.UniqueConsecutive(True, True, None)
>>> output, idx, counts = unique_consecutive(x)
>>> print(output)
[1 2 3 1 2]
>>> print(idx)
[0 0 1 1 2 3 3 4]
>>> print(counts)
[2 2 1 2 1]
"""
@prim_arg_register
def __init__(self, return_inverse=False, return_counts=False, dim=None):
self._set_prim_arg("return_inverse", return_inverse)
self._set_prim_arg("return_counts", return_counts)
self._set_prim_arg("dim", dim)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_unique_consecutive(self, [input, self.return_inverse, self.return_counts, self.dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.return_inverse, self.return_counts, self.dim)
return res
class IHFFTN(Primitive):
r"""
.. code-block::
prim = ops.IHFFTN()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.ihfftn(input, s, dim, norm)
Refer to :func:`mindspore.ops.ihfftn` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=None, norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IHFFTN', 'norm', norm))
ihfftn_op=IHFFTN()
class ApplyCamePart2(Primitive):
r"""
Computes Part 2 of the CAME Optimizer.
Args:
- **grad** (Tensor) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **sum_grad_r** (Tensor) - The shape = 1D :math:`(..., n)`.
A Tensor of types: float32.
- **sum_grad_c** (Tensor) - The shape = 1D :math:`(..., m)`.
A Tensor of types: float32.
- **sum_grad_rc** (Tensor) - The shape = 1D :math:`(...)`.
A Tensor of types: float32.
- **r** (Tensor) - The shape = 1D :math:`(..., n)`.
The Tensor has the same data type as `grad`.
- **c** (Tensor) - The shape = 1D :math:`(..., m)`.
The Tensor has the same data type as `grad`.
- **beta2** (float) - data type must be float.
- **sum_r** (Tensor) - The shape = 1D :math:`(..., 1)`.
'None' is currently supported. A Tensor of types: float32.
- **global_shape** (Tensor) - the shape = 1D :math:`(2)`.
'None' is currently supported. A Tensor of types: int64.
Returns:
- **r** (Tensor) - A Tensor of shape :math:`(..., n)`
- **c** (Tensor) - A Tensor of shape :math:`(..., m)`
- **u** (Tensor) - A Tensor of of shape:math:`(..., n, m)`
- **sum_square_u** (Tensor) - A Tensor of of shape:math:`(1)`
Raises:
TypeError: If `grad` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _inner_ops as P
>>> apply_came_part2 = P.ApplyCamePart2()
>>> grad = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> sum_grad_r = Tensor(np.ones([1024]), dtype=ms.float32)
>>> sum_grad_c = Tensor(np.ones([64]), dtype=ms.float32)
>>> sum_grad_rc = Tensor(np.array([64]), dtype=ms.float32)
>>> r = Tensor(np.ones([1024]), dtype=ms.float32)
>>> c = Tensor(np.ones([64]), dtype=ms.float32)
>>> beta2 = 0.5
>>> sum_r = Tensor(np.array([128]), dtype=ms.float32)
>>> global_shape = (1024, 64)
>>> output = apply_came_part2(grad, sum_grad_r, sum_grad_c, sum_grad_rc, r, c, beta2, sum_r, global_shape)
>>> print(output[0].shape)
(1024,)
"""
__mindspore_signature__ = (
sig.make_sig('grad'),
sig.make_sig('sum_grad_r'),
sig.make_sig('sum_grad_c'),
sig.make_sig('sum_grad_rc'),
sig.make_sig('r'),
sig.make_sig('c'),
sig.make_sig('beta2'),
sig.make_sig('sum_r', default=None),
sig.make_sig('global_shape', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, sum_grad_r, sum_grad_c, sum_grad_rc, r, c, beta2, sum_r=None, global_shape=None):
return super().__call__(grad, sum_grad_r, sum_grad_c, sum_grad_rc, r, c, beta2, sum_r, global_shape)
apply_came_part2_op=ApplyCamePart2()
class RFFT2(Primitive):
r"""
.. code-block::
prim = ops.RFFT2()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.rfft2(input, s, dim, norm)
Refer to :func:`mindspore.ops.rfft2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=(-2, -1)),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=(-2, -1), norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('RFFT2', 'norm', norm))
rfft2_op=RFFT2()
class Tan(Primitive):
r"""
.. code-block::
prim = ops.Tan()
out = prim(input)
is equivalent to
.. code-block::
ops.tan(input)
Refer to :func:`mindspore.ops.tan` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_tan(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
tan_op=Tan()
class InplaceScatterValue(Primitive):
r"""
InplaceScatterValue is for scatter_ when using a scalar as the source element without reduce.
For details, please refer to :func:`mindspore.Tensor.scatter_`.
Examples:
>>> from mindspore import Tensor, int64, float32
>>> this_tensor = Tensor([[1, 2], [3, 4]], dtype=float32)
>>> index = Tensor([[0], [1]], dtype=int64)
>>> this_tensor.scatter_(0, index, 10)
>>> print(this_tensor)
[[10., 2.],
[10., 4.]]
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('dim'),
sig.make_sig('index'),
sig.make_sig('value'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, dim, index, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_scatter_value(self, [input, dim, index, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, value)
return res
inplace_scatter_value_op=InplaceScatterValue()
[文档]class FloorMod(Primitive):
r"""
.. code-block::
prim = ops.FloorMod()
out = prim(x, y)
is equivalent to
.. code-block::
ops.floor_mod(x, y)
Refer to :func:`mindspore.ops.floor_mod` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y):
return super().__call__(x, y)
floor_mod_op=FloorMod()
class InplaceReLU(Primitive):
r"""
ReLuComputes ReLU (Rectified Linear Unit activation function) inplace of input tensors element-wise.
It returns :math:`\max(input,\ 0)` element-wise. Specially, the neurons with the negative output
will be suppressed and the active neurons will stay the same.
.. math::
ReLU(input) = (input)^+ = \max(0, input)
ReLU Activation Function Graph:
.. image:: ../images/ReLU.png
:align: center
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input Tensor.
Returns:
Tensor, with the same dtype and shape as the `input`.
Raises:
TypeError: If dtype of `input` is not Number type.
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
>>> ops.relu_(input)
>>> print(input)
[[0. 4. 0.]
[2. 0. 9.]]
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_relu(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
inplace_relu_op=InplaceReLU()
class SoftplusGradExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('x'),
sig.make_sig('beta', default=1),
sig.make_sig('threshold', default=20),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, x, beta=1, threshold=20):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_softplus_grad_ext(self, [dout, x, beta, threshold]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, x, beta, threshold)
return res
softplus_grad_ext_op=SoftplusGradExt()
[文档]class Addcmul(Primitive):
r"""
Adds the element-wise product of `x1` by `x2`, multiplied by `value` to `input_data`.
It computes the following operation:
.. math::
output[i] = input\_data[i] + value[i] * (x1[i] * x2[i])
Inputs:
- **input_data** (Tensor) - The tensor to be added.
- **x1** (Tensor) - The tensor to be multiplied.
- **x2** (Tensor) - The tensor to be multiplied.
- **value** (Tensor) - The multiplier for tensor x1*x2.
Outputs:
Tensor, has the same shape and dtype as x1*x2.
Raises:
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same.
ValueError: If `x1` could not be broadcast to `x2`.
ValueError: If `value` could not be broadcast to `x1` * `x2`.
ValueError: If `input_data` could not be broadcast to `value*(x1*x2)`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_data = Tensor(np.array([1, 1, 1]), mindspore.float32)
>>> x1 = Tensor(np.array([[1], [2], [3]]), mindspore.float32)
>>> x2 = Tensor(np.array([[1, 2, 3]]), mindspore.float32)
>>> value = Tensor([1], mindspore.float32)
>>> addcmul = ops.Addcmul()
>>> y = addcmul(input_data, x1, x2, value)
>>> print(y)
[[ 2. 3. 4.]
[ 3. 5. 7.]
[ 4. 7. 10.]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, tensor1, tensor2, value):
return super().__call__(input, tensor1, tensor2, value)
addcmul_op=Addcmul()
[文档]class Acosh(Primitive):
r"""
.. code-block::
prim = ops.Acosh()
out = prim(input)
is equivalent to
.. code-block::
ops.acosh(input)
Refer to :func:`mindspore.ops.acosh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
acosh_op=Acosh()
class CdistGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self, p=2.0):
self._set_prim_arg("p", p)
def __call__(self, grad, input_x, input_y, cdist):
return super().__call__(grad, input_x, input_y, cdist, self.p)
[文档]class ExpandDims(Primitive):
r"""
.. code-block::
prim = ops.ExpandDims()
out = prim(input_x, axis)
is equivalent to
.. code-block::
ops.expand_dims(input_x, axis)
Refer to :func:`mindspore.ops.expand_dims` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, axis):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_expand_dims(self, [input_x, axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input_x, axis)
return res
expand_dims_op=ExpandDims()
[文档]class LogicalNot(Primitive):
r"""
Computes the "logical NOT" of a tensor element-wise.
Refer to :func:`mindspore.ops.logical_not` for more details.
Inputs:
- **x** (Tensor) - The input tensor.
Outputs:
Tensor, the shape is the same as the `x`, and the dtype is bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> logical_not = ops.LogicalNot()
>>> output = logical_not(x)
>>> print(output)
[False True False]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_logical_not(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
logical_not_op=LogicalNot()
class BatchNormReduceGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, input, mean, invstd, weight, input_g, weight_g, bias_g):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_batch_norm_reduce_grad(self, [dout, input, mean, invstd, weight, input_g, weight_g, bias_g]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, input, mean, invstd, weight, input_g, weight_g, bias_g)
return res
batch_norm_reduce_grad_op=BatchNormReduceGrad()
class ResizeNearestNeighborV2Grad(Primitive):
r"""
Compute gradient of `ResizeNearestNeighborV2` operator.
Args:
grads (Tensor): A 4-D Tensor with shape [batch, channel, height, width].
size (Union[tuple[int], Tensor]): The size for the input image. 2 elements: [`height, width`].
align_corners (bool): Whether the centers of the 4 corner pixels of the input
and output tensors are aligned. Default: ``False``.
half_pixel_centers (bool): Default: ``False``.
Outputs:
A 4-D Tensor , with the same shape and data type as `image`.
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, grads, size):
return super().__call__(grads, size, self.align_corners, self.half_pixel_centers)
[文档]class Reshape(Primitive):
r"""
.. code-block::
prim = ops.Reshape()
out = prim(input, shape)
is equivalent to
.. code-block::
ops.reshape(input, shape)
Refer to :func:`mindspore.ops.reshape` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, shape):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reshape(self, [input, shape]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, shape)
return res
reshape_op=Reshape()
class AcoshGrad(Primitive):
r"""
Performs grad of Acosh operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, out, dout):
return super().__call__(out, dout)
acosh_grad_op=AcoshGrad()
class L1LossBackwardExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('grad_output'),
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('reduction', default='mean'),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, target, reduction='mean'):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_l1_loss_backward_ext(self, [grad_output, input, target, str_to_enum('L1LossBackwardExt', 'reduction', reduction)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, target, str_to_enum('L1LossBackwardExt', 'reduction', reduction))
return res
l1_loss_backward_ext_op=L1LossBackwardExt()
class LstsqV2Grad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, gX, A, B):
return super().__call__(gX, A, B)
lstsq_v2_grad_op=LstsqV2Grad()
[文档]class ReduceProd(Primitive):
r"""
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Only constant value is allowed. Must be in the range [-r, r).
Outputs:
Tensor, has the same dtype as the `x`.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the product of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = ops.ReduceProd(keep_dims=True)
>>> output = op(x, 1)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = op(x)
>>> print(output)
[[[2.2833798e+33]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = op(x, 0)
>>> print(output)
[[[ 28. 28. 28. 28. 28. 28.]
[ 80. 80. 80. 80. 80. 80.]
[162. 162. 162. 162. 162. 162.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = op(x, 1)
>>> print(output)
[[[ 6. 6. 6. 6. 6. 6.]]
[[120. 120. 120. 120. 120. 120.]]
[[504. 504. 504. 504. 504. 504.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = op(x, 2)
>>> print(output)
[[[1.00000e+00]
[6.40000e+01]
[7.29000e+02]]
[[4.09600e+03]
[1.56250e+04]
[4.66560e+04]]
[[1.17649e+05]
[2.62144e+05]
[5.31441e+05]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
return super().__call__(x, axis, self.keep_dims)
class MaximumGrad(Primitive):
r"""
Grad for maximum.
"""
@prim_arg_register
def __init__(self, grad_x=True, grad_y=True):
self._set_prim_arg("grad_x", grad_x)
self._set_prim_arg("grad_y", grad_y)
def __call__(self, x, y, grads):
return super().__call__(x, y, grads, self.grad_x, self.grad_y)
[文档]class ResizeBilinearV2(Primitive):
r"""
Resizes an image to a certain size using the bilinear interpolation.
The resizing only affects the lower two dimensions which represent the height and width.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
align_corners (bool, optional): If ``True`` , rescale input by :math:`(new\_height - 1) / (height - 1)`,
which exactly aligns the 4 corners of images and resized images. If ``False`` ,
rescale by :math:`new\_height / height`. Default: ``False`` .
half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` , `align_corners` should be
``False`` . Default: ``False`` .
Inputs:
- **x** (Tensor) - Image to be resized. Input images must be a 4-D tensor with shape
:math:`(batch, channels, height, width)`, with data type of float32 or float16.
- **size** (Union[tuple[int], list[int], Tensor]) - The new size of the images.
A tuple or list or Tensor of 2 int elements :math:`(new\_height, new\_width)`.
Outputs:
Tensor, resized image. 4-D with shape :math:`(batch, channels, new\_height, new\_width)`,
with the same data type as input `x`.
Raises:
TypeError: If `align_corners` is not a bool.
TypeError: If `half_pixel_centers` is not a bool.
TypeError: If `align_corners` and `half_pixel_centers` are all ``True`` .
ValueError: If `half_pixel_centers` is ``True`` and device_target is CPU.
ValueError: If dim of `x` is not 4.
ValueError: If `size` is Tensor and its dim is not 1.
ValueError: If `size` contains other than 2 elements.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> x = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
>>> output = ops.ResizeBilinearV2()(x, (5, 5))
>>> print(output)
[[[[1. 2. 3. 4. 5.]
[1. 2. 3. 4. 5.]
[1. 2. 3. 4. 5.]
[1. 2. 3. 4. 5.]
[1. 2. 3. 4. 5.]]]]
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, image, size):
return super().__call__(image, size, self.align_corners, self.half_pixel_centers)
class DistCommIsend(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dst, group, tag):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_isend(self, [input, dst, group, tag]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dst, group, tag)
return res
dist_comm_isend_op=DistCommIsend()
class BatchNormExt(Primitive):
r"""
Batch Normalization for input data and updated parameters.
Batch Normalization is widely used in convolutional neural networks. This operation
applies Batch Normalization over inputs to avoid internal covariate shift as described
in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
features using a mini-batch of data and the learned parameters can be described
in the following formula,
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is weight, :math:`\beta` is bias, :math:`\epsilon` is epsilon,
:math:`mean` is the mean of :math:`x`,
:math:`variance` is the variance of :math:`x`.
Args:
input (Tensor): Tensor of shape :math:`(N, C, *)`, where :math:`*` means, any number of additional
dimensions. with bfloat16, float16 or float32 data type. For Atlas training products, the shape must be
2-4 dimensions currently.
weight (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type.
bias (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type.
running_mean (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type.
running_var (Tensor): Tensor of shape :math:`(C,)`, with bfloat16, float16 or float32 data type.
training (bool, optional): If `training` is ``True`` , `mean` and `variance` are computed during
training. If `training` is ``False`` , they're loaded from checkpoint during inference. Default: ``False`` .
momentum (float, optional): The hyper parameter to compute moving average for running_mean and
running_var (e.g. :math:`new\_running\_mean = (1 - momentum) * running\_mean + momentum * current\_mean`).
Default: ``0.1``
epsilon (float, optional): A small value added for numerical stability. Default: ``1e-5``.
returns:
Tensor, the normalized inputs, has the same shape and dtype as `input`.
Raises:
TypeError: If `training` is not a bool.
TypeError: If dtype of `epsilon` or `momentum` is not float.
TypeError: If `input`, `weight`, `bias`, `running_mean` or `running_var` is not a Tensor.
TypeError: If dtype of `input`, `weight` is not bfloat16, float16 or float32.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.ones([2, 2]), mindspore.float32)
>>> weight = Tensor(np.ones([2]), mindspore.float32)
>>> bias = Tensor(np.ones([2]), mindspore.float32)
>>> running_mean = Tensor(np.ones([2]), mindspore.float32)
>>> running_var = Tensor(np.ones([2]), mindspore.float32)
>>> output = ops.batch_norm_ext(input_x, weight, bias, running_mean, running_var)
>>> print(output)
[[1. 1.]
[1. 1.]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias'),
sig.make_sig('running_mean', default=None),
sig.make_sig('runnning_var', default=None),
sig.make_sig('training', default=False),
sig.make_sig('momentum', default=0.1),
sig.make_sig('epsilon', default=1e-5),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias, running_mean=None, runnning_var=None, training=False, momentum=0.1, epsilon=1e-5):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_batch_norm_ext(self, [input, weight, bias, running_mean, runnning_var, training, momentum, epsilon]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, running_mean, runnning_var, training, momentum, epsilon)
return res
batch_norm_ext_op=BatchNormExt()
class ResizeLinear1D(Primitive):
r"""
Using the linear interpolate method resize the input tensor 'x'.
For general resize, refer to :func:`mindspore.ops.interpolate` for more details.
.. warning::
- This is an experimental API that is subject to change.
- Currently, the Ascend platform only supports scenarios where the input `size` is Tuple or List.
Args:
coordinate_transformation_mode (str): Default is ``'align_corners'`` . Describes how to transform the
coordinate in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'.
Inputs:
- **x** (Tensor) - A 3-D tensor which to resize, with shape [batch, channel, width]. Must be one of the
following types: float16, float32, float64.
- **size** (Union[Tuple[int], List[int], Tensor[int]]) - describes the new width of `x` .
A tuple or list or 1-D tensor with only one int element :math:`(new\_width)`.
Outputs:
A 3-D tensor which shape is [batch, channel, new_width] with the same type as `x`.
Raises:
TypeError: If dtype of `x` is not in the support list.
TypeError: If `size` is not in Union[Tuple[int], List[int], Tensor[int]].
TypeError: If `coordinate_transformation_mode` is not a string.
TypeError: If `coordinate_transformation_mode` is not in the support list.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor([[[1, 2, 3], [4, 5, 6]]], mindspore.float32)
>>> size = (6,)
>>> resize_linear_1d = ops.ResizeLinear1D(coordinate_transformation_mode="align_corners")
>>> output = resize_linear_1d(x, size)
>>> print(output)
[[[1. 1.4 1.8 2.2 2.6 3.]
[4. 4.4 4.8 5.2 5.6 6.]]]
"""
@prim_arg_register
def __init__(self, coordinate_transformation_mode='align_corners'):
self._set_prim_arg_with_handler("coordinate_transformation_mode", coordinate_transformation_mode, str_to_enum)
def __call__(self, x, size):
return super().__call__(x, size, self.coordinate_transformation_mode)
[文档]class Polar(Primitive):
r"""
Converts polar coordinates to Cartesian coordinates.
Returns a complex tensor, its elements are Cartesian coordinates constructed with the polar
coordinates which is specified by radial distance `abs` and polar angle `angle`.
Refer to :func:`mindspore.ops.polar` for more details.
.. math::
y_{i} = abs_{i} * \cos(angle_{i}) + abs_{i} * \sin(angle_{i}) * j
.. warning::
This is an experimental API that is subject to change.
Inputs:
- **abs** (Tensor, float) - Radial distance. Tensor of any dimension,
with dtype required to be float32.
- **angle** (Tensor, float) - Polar angle. It has the same shape and dtype as `abs`.
Outputs:
Tensor, with the same shape as `abs` and the dtype is complex64.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> x1 = Tensor(np.array([1, 2]), mindspore.float32)
>>> x2 = Tensor(np.array([3, 4]), mindspore.float32)
>>> op_polar = ops.Polar()
>>> output = op_polar(x1, x2)
>>> print(output)
[-0.9899925 +0.14112002j -1.3072872-1.5136049j]
>>> x1 = Tensor(2.1, mindspore.float32)
>>> x2 = Tensor(2.1, mindspore.float32)
>>> output = op_polar(x1, x2)
>>> print(output)
(-1.0601765+1.8127397j)
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, abs, angle):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_polar(self, [abs, angle]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, abs, angle)
return res
polar_op=Polar()
class ReduceStd(Primitive):
r"""
Returns the standard-deviation and mean of the input Tensor along
dimension(s) specified by `axis`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
axis (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
Default: ``()`` , reduce all dimensions. Only constant value is allowed.
Let `r` be rank of `input_x`, it should be in the range :math:`[-r,r)`.
unbiased (bool, optional): Whether to use Bessel's correction.
If ``True`` , will use the Bessel correction unbiased estimation.
If ``False`` , will through the biased estimation to calculate the standard deviation.
Default: ``True`` .
keep_dims (bool, optional): Whether the output Tensor has dim retained or not.
If ``True`` , keep these reduced dimensions specified by `axis` and the length is 1.
If ``False`` , don't keep these dimensions.
Default: ``Fasle`` .
Inputs:
- **input_x** (Tensor[Number]) - The input Tensor with shape
:math:`(N, *)` where :math:`*` means any number of additional dimensions.
Supported dtypes: float16, float32.
Outputs:
Tuple(output_std, output_mean) containing the standard deviation and mean.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `input_x` is not a Tensor.
ValueError: If `axis` is not one of the following: int, tuple, list or Tensor.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import ops, Tensor
>>> input_x = Tensor(np.array([[1, 2, 3], [-1, 1, 4]]).astype(np.float32))
>>> op = ops.ReduceStd(axis=1, unbiased=True, keep_dims=False)
>>> output = op(input_x)
>>> output_std, output_mean = output[0], output[1]
>>> print(output_std)
[1. 2.5166113]
>>> print(output_mean)
[2. 1.3333334]
"""
@prim_arg_register
def __init__(self, axis=[], unbiased=True, keep_dims=False):
self._set_prim_arg("axis", type_it('ReduceStd', 'axis', axis, (OpDtype.DT_INT, OpDtype.DT_LIST_INT, OpDtype.DT_TENSOR), OpDtype.DT_TUPLE_INT))
self._set_prim_arg("unbiased", unbiased)
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x):
return super().__call__(x, self.axis, self.unbiased, self.keep_dims)
class AcoshExt(Primitive):
r"""
.. code-block::
prim = ops.AcoshExt()
out = prim(input)
is equivalent to
.. code-block::
ops.acosh_ext(input)
Refer to :func:`mindspore.ops.acosh_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_acosh_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
acosh_ext_op=AcoshExt()
class UpsampleLinear1D(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None, align_corners=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_linear1d(self, [x, output_size, scales, align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, output_size, scales, align_corners)
return res
upsample_linear1d_op=UpsampleLinear1D()
class UpsampleNearest1DGrad(Primitive):
r"""
Upsample the 1-D gradient data with the nearest neighbor interpolation algorithm.
Note:
Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, L], Must be one of the following types:
float16, float32, float64.
- **input_size** (tuple[int]): An required tuple[int], which contain 3 elements:
[min_batch, channels, length].
Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1].
- **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``.
It contains 1 elements: length, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0].
- **scales** (tuple[float]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 1 elements: scale_length. Must:
dy.shape[2] == floor(input_size[2] * scales[0].
Outputs:
- **dx**- (Tensor) - A 3-D tensor. Has the same type as `dy`, shape depends on `input_size`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_nearest1d_grad(self, [dy, input_size, output_size, scales]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, input_size, output_size, scales)
return res
upsample_nearest1d_grad_op=UpsampleNearest1DGrad()
class SolveTriangular(Primitive):
r"""
.. code-block::
prim = ops.SolveTriangular()
out = prim(a, b, trans, lower, unit_diagonal)
is equivalent to
.. code-block::
ops.solve_triangular(a, b, trans, lower, unit_diagonal)
Refer to :func:`mindspore.ops.solve_triangular` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('a'),
sig.make_sig('b'),
sig.make_sig('trans', default=0),
sig.make_sig('lower', default=False),
sig.make_sig('unit_diagonal', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, a, b, trans=0, lower=False, unit_diagonal=False):
return super().__call__(a, b, trans, lower, unit_diagonal)
solve_triangular_op=SolveTriangular()
[文档]class Xlogy(Primitive):
r"""
Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
Returns zero when `input` is zero.
.. math::
out_i = input_{i}\ln{other_{i}}
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
Inputs:
- **input** (Tensor, numbers.Number, bool) - The first input is a numbers.Number or a bool or a tensor whose data
type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
- **other** (Tensor, numbers.Number, bool) - The second input is a numbers.Number or a bool or a tensor whose data
type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
Outputs:
- **y** (Tensor) - the shape is the broadcast of `input` and `other`,
and the data type is the one with higher precision or higher digits among the two inputs.
Raises:
TypeError: If `input` is not a Tensor, number or bool.
TypeError: If `other` is not a Tensor, number or bool.
ValueError: If `input` and `other` can not broadcast.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32)
>>> other = Tensor(np.array([2, 2, 2]), mindspore.float32)
>>> op = ops.Xlogy()
>>> output = op(input, other)
>>> print(output)
[-3.465736 0. 2.7725887]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_xlogy(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
xlogy_op=Xlogy()
class MatrixExp(Primitive):
r"""
.. code-block::
prim = ops.MatrixExp()
out = prim(input)
is equivalent to
.. code-block::
ops.matrix_exp(input)
Refer to :func:`mindspore.ops.matrix_exp` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
matrix_exp_op=MatrixExp()
class ScatterValue(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim'),
sig.make_sig('index'),
sig.make_sig('src'),
sig.make_sig('reduce', default='none'),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, index, src, reduce='none'):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_scatter_value(self, [input, dim, index, src, str_to_enum('ScatterValue', 'reduce', reduce)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, src, str_to_enum('ScatterValue', 'reduce', reduce))
return res
scatter_value_op=ScatterValue()
class MaxDim(Primitive):
r"""
Calculates the maximum value along with the given dim for the input tensor, and returns the maximum values and
indices.
Args:
input (Tensor): The input tensor, can be any dimension. Set the shape of input tensor as
:math:`(input_1, input_2, ..., input_N)` .Complex tensor is not supported.
dim (int): The dimension to reduce.
keepdim (bool, optional): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
`input` , the output will reduce dimension if ``false``. Default: ``False``.
Returns:
tuple (Tensor), tuple of 2 tensors, containing the maximum value of the self tensor along the given
dimension `dim` and the corresponding index.
- **values** (Tensor) - The maximum value of input tensor, with the same shape as `index`, and same dtype as `input`.
- **index** (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keepdim`
is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
Otherwise, the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
Raises:
TypeError: If `input` is not Tensor.
TypeError: If `keepdim` is not a bool.
TypeError: If `dim` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
>>> output, index = ops.auto_generate.MaxDim()(x, 0, False)
>>> print(output, index)
0.7 3
>>> output, index = ops.auto_generate.MaxDim()(x, 0, True)
>>> print(output)
[0.7] [3]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim'),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_max_dim(self, [input, dim, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim)
return res
max_dim_op=MaxDim()
class Conv3DPadding(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default='valid'),
sig.make_sig('dilation', default=1),
sig.make_sig('groups', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None, stride=1, padding='valid', dilation=1, groups=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_conv3d_padding(self, [input, weight, bias, stride, str_to_enum('Conv3DPadding', 'padding', padding), dilation, groups]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, stride, str_to_enum('Conv3DPadding', 'padding', padding), dilation, groups)
return res
conv3d_padding_op=Conv3DPadding()
[文档]class BroadcastTo(Primitive):
r"""
.. code-block::
prim = ops.BroadcastTo(shape)
out = prim(input)
is equivalent to
.. code-block::
ops.broadcast_to(input, shape)
Refer to :func:`mindspore.ops.broadcast_to` for more details.
"""
@prim_arg_register
def __init__(self, shape):
self._set_prim_arg("shape", type_it('BroadcastTo', 'shape', shape, (OpDtype.DT_LIST_INT, OpDtype.DT_TENSOR), OpDtype.DT_TUPLE_INT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_broadcast_to(self, [input, self.shape]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.shape)
return res
class FillTensor(Primitive):
r"""
Create a Tensor of the specified shape and fill it with the specified tensor value.
Args:
size (Union(tuple[int], list[int])): The specified shape of output tensor.
fill_value (Tensor): Value to fill the returned tensor. Complex numbers are not supported for now. Must be
scalar Tensor or 1-D Tensor with shape of [1].
Keyword Args:
dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for
details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
Returns:
Tensor.
Raises:
TypeError: If `size` is not a tuple or list.
ValueError: The element in `size` is less than 0.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('size'),
sig.make_sig('fill_value'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, size, fill_value, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_fill_tensor(self, [size, fill_value, dtype if dtype is None else dtype_to_type_id('FillTensor', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, size, fill_value, dtype if dtype is None else dtype_to_type_id('FillTensor', 'dtype', dtype))
return res
fill_tensor_op=FillTensor()
class PowScalarTensor(Primitive):
r"""
Calculates the `exponent` power of `input`.
When `exponent` is a Tensor, the shapes of `input` and `exponent` must be broadcastable.
.. math::
out_{i} = input_{i} ^{ exponent_{i}}
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Number): The first input is a Number.
exponent (Tensor): The second input is a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
Returns:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = 3.0
>>> exponent = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> output = ops.pow_ext(input, exponent)
>>> print(output)
[ 3. 9. 81.]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, exponent):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_pow_scalar_tensor(self, [input, exponent]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, exponent)
return res
pow_scalar_tensor_op=PowScalarTensor()
class FloorDivScalar(Primitive):
r"""
.. code-block::
prim = ops.FloorDivScalar()
out = prim(input, other)
is equivalent to
.. code-block::
ops.floor_div_scalar(input, other)
Refer to :func:`mindspore.ops.floor_div_scalar` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_floor_div_scalar(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
floor_div_scalar_op=FloorDivScalar()
[文档]class Addcdiv(Primitive):
r"""
Adds the element-wise division of `x1` by `x2`, multiplied by `value` to `input_data`.
It computes the following operation:
.. math::
y[i] = input\_data[i] + value[i] * (x1[i] / x2[i])
Inputs:
- **input_data** (Tensor) - The tensor to be added.
- **x1** (Tensor) - The numerator tensor.
- **x2** (Tensor) - The denominator tensor.
- **value** (Tensor) - The multiplier for tensor x1/x2.
Outputs:
Tensor, has the same shape and dtype as x1/x2.
Raises:
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` are not the same.
ValueError: If `x1` could not be broadcast to `x2`.
ValueError: If `value` could not be broadcast to `x1/x2`.
ValueError: If `input_data` could not be broadcast to `value*(x1/x2)`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
>>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32)
>>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32)
>>> value = Tensor([1], mindspore.float32)
>>> addcdiv = ops.Addcdiv()
>>> y = addcdiv(input_data, x1, x2, value)
>>> print(y)
[1.25 1.6666667 2.5 5. ]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, tensor1, tensor2, value):
return super().__call__(input, tensor1, tensor2, value)
addcdiv_op=Addcdiv()
class FFTShift(Primitive):
r"""
.. code-block::
prim = ops.FFTShift()
out = prim(input, dim)
is equivalent to
.. code-block::
ops.fftshift(input, dim)
Refer to :func:`mindspore.ops.fftshift` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None):
return super().__call__(input, dim)
fftshift_op=FFTShift()
class OnesLikeExt(Primitive):
r"""
Returns a Tensor with a value of 1 and its shape and data type is the same as the input.
Refer to :func:`mindspore.ops.ones_like` for more details.
Args:
- **input_x** (Tensor) - Tensor of any dimension.
Returns:
Tensor, has the same shape and type as `input_x` but filled with ones.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_ones_like_ext(self, [input, dtype if dtype is None else dtype_to_type_id('OnesLikeExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dtype if dtype is None else dtype_to_type_id('OnesLikeExt', 'dtype', dtype))
return res
ones_like_ext_op=OnesLikeExt()
[文档]class Cummin(Primitive):
r"""
Returns the cumulative minimum of elements and the index.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.cummin` for more detail.
Args:
axis (int): The axis to accumulate the tensor's value. Must be in the range [-rank(input), rank(input)).
Inputs:
- **input** (Tensor) - The input tensor.
Outputs:
A tuple of 2 Tensors(values, indices), containing the cumulative minimum of elements and the index,
the shape of each output tensor is the same as input `input`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore import Tensor, ops
>>> import mindspore
>>> a = Tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220], mindspore.float32)
>>> axis = 0
>>> output = ops.Cummin(axis)(a)
>>> print(output[0])
[-0.2284 -0.6628 -0.6628 -0.6628 -1.3298 -1.3298]
>>> print(output[1])
[0 1 1 1 4 4]
"""
@prim_arg_register
def __init__(self, axis):
self._set_prim_arg("axis", axis)
def __call__(self, input):
return super().__call__(input, self.axis)
class AdaptiveAvgPool3DExt(Primitive):
r"""
.. code-block::
prim = ops.AdaptiveAvgPool3DExt()
out = prim(input, output_size)
is equivalent to
.. code-block::
ops.adaptive_avg_pool3d_ext(input, output_size)
Refer to :func:`mindspore.ops.adaptive_avg_pool3d_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, output_size):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_adaptive_avg_pool3d_ext(self, [input, output_size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, output_size)
return res
adaptive_avg_pool3d_ext_op=AdaptiveAvgPool3DExt()
class ReflectionPad1DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reflection_pad_1d_grad(self, [grad_output, input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, padding)
return res
reflection_pad_1d_grad_op=ReflectionPad1DGrad()
class AsinhExt(Primitive):
r"""
.. code-block::
prim = ops.AsinhExt()
out = prim(input)
is equivalent to
.. code-block::
ops.asinh_ext(input)
Refer to :func:`mindspore.ops.asinh_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_asinh_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
asinh_ext_op=AsinhExt()
class IFFTShift(Primitive):
r"""
.. code-block::
prim = ops.IFFTShift()
out = prim(input, dim)
is equivalent to
.. code-block::
ops.ifftshift(input, dim)
Refer to :func:`mindspore.ops.ifftshift` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None):
return super().__call__(input, dim)
ifftshift_op=IFFTShift()
[文档]class Abs(Primitive):
r"""
.. code-block::
prim = ops.Abs()
out = prim(input)
is equivalent to
.. code-block::
ops.abs(input)
Refer to :func:`mindspore.ops.abs` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_abs(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
abs_op=Abs()
class RandLikeExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('tensor'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, tensor, seed, offset, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_rand_like_ext(self, [tensor, seed, offset, dtype if dtype is None else dtype_to_type_id('RandLikeExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, tensor, seed, offset, dtype if dtype is None else dtype_to_type_id('RandLikeExt', 'dtype', dtype))
return res
rand_like_ext_op=RandLikeExt()
[文档]class Minimum(Primitive):
r"""
.. code-block::
prim = ops.Minimum()
out = prim(input, other)
is equivalent to
.. code-block::
ops.minimum(input, other)
Refer to :func:`mindspore.ops.minimum` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_minimum(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
minimum_op=Minimum()
class InplaceHardtanh(Primitive):
r"""
.. code-block::
prim = ops.InplaceHardtanh()
out = prim(input, min_val, max_val)
is equivalent to
.. code-block::
ops.inplace_hardtanh(input, min_val, max_val)
Refer to :func:`mindspore.ops.inplace_hardtanh` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('min_val', default=-1),
sig.make_sig('max_val', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, min_val=-1, max_val=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_hardtanh(self, [input, min_val, max_val]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, min_val, max_val)
return res
inplace_hardtanh_op=InplaceHardtanh()
[文档]class Log(Primitive):
r"""
.. code-block::
prim = ops.Log()
out = prim(input)
is equivalent to
.. code-block::
ops.log(input)
Refer to :func:`mindspore.ops.log` for more details.
"""
@prim_arg_register
def __init__(self):
self.add_prim_attr("cust_aicpu", 'Log')
self.add_prim_attr("base", -1.0)
self.add_prim_attr("scale", 1.0)
self.add_prim_attr("shift", 0.0)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_log(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
log_op=Log()
class UpsampleBilinear2D(Primitive):
r"""
Performs upsampling with trilinear interpolation across 2dims for 4dim input Tensor.
This operator scale up the volumetric input with specified `output_size` or `scales` factors,
using trilinear upscaling algorithm.
Note:
One of `scales` and `output_size` must be specified. And it is an error if both are specified.
Inputs:
- **x** (Tensor) - 4D tensor of shape :math:`(N, C, H_{in}, W_{in})`. Supporting types:
float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of 2 int elements
:math:`(output\_height, output\_width)`. Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of 2 float
elements :math:`(scale\_height, scale\_width)`. Default: ``None``.
- **align_corners** (bool, optional): An optional bool. Default: ``False``.
If ``True``, the input and output tensors are aligned by the center points of their corner pixels,
preserving the values at the corner pixels.
If ``False`` , the input and output tensors are aligned by the corner points of their corner pixels,
and the interpolation use edge value padding for out of boundary values.
Outputs:
- **y** (Tensor) - Upsampled output with the same data type as `x`, whose shape is
:math:`(N, C, H_{out}, W_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not in [float16, float32, float64].
TypeError: If type of `align_corners` is not bool.
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 4D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 2 when `scales` is specified.
ValueError: If size of `output_size` is not equal 2 when `output_size` is specified.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> net = ops.UpsampleTrilinear3D()
>>> in_x = Tensor(input_data=np.random.randn(2, 3, 4, 512, 256))
>>> output_size=[4, 64, 48]
>>> out = net(in_x, output_size, None)
>>> print(out.shape)
(2, 3, 4, 64, 48)
>>>
>>> net = ops.auto_generate.UpsampleBilinear2D()
>>> in_x = Tensor(np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], [[0.7, 0.8, 0.9], [1.0, 1.1, 1.2]]]]).astype(np.float32))
>>> output_size=[4, 5]
>>> out = net(in_x, output_size, None, True)
>>> print(out)
[[[[0.1000, 0.1500, 0.2000, 0.2500, 0.3000],
[0.2000, 0.2500, 0.3000, 0.3500, 0.4000],
[0.3000, 0.3500, 0.4000, 0.4500, 0.5000],
[0.4000, 0.4500, 0.5000, 0.5500, 0.6000]],
[[0.7000, 0.7500, 0.8000, 0.8500, 0.9000],
[0.8000, 0.8500, 0.9000, 0.9500, 1.0000],
[0.9000, 0.9500, 1.0000, 1.0500, 1.1000],
[1.0000, 1.0500, 1.1000, 1.1500, 1.2000]]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None, align_corners=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_bilinear2d(self, [x, output_size, scales, align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, output_size, scales, align_corners)
return res
upsample_bilinear2d_op=UpsampleBilinear2D()
[文档]class NextAfter(Primitive):
r"""
.. code-block::
prim = ops.NextAfter()
out = prim(input, other)
is equivalent to
.. code-block::
ops.nextafter(input, other)
Refer to :func:`mindspore.ops.nextafter` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
return super().__call__(input, other)
next_after_op=NextAfter()
class Frac(Primitive):
r"""
.. code-block::
prim = ops.Frac()
out = prim(input)
is equivalent to
.. code-block::
ops.frac_ext(input)
Refer to :func:`mindspore.ops.frac_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_frac(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
frac_op=Frac()
class MaxPoolWithIndices(Primitive):
r"""
Performs max pooling on the input Tensor and returns both max values and indices.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows:
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
.. warning::
This is an experimental API that is subject to change or deletion. Only support on Atlas training series.
Args:
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and argmax
value, is an int number that represents height and width of the kernel, or a tuple of
two int numbers that represent height and width respectively.
strides (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents
not only the height of movement but also the width of movement, or a tuple of two int numbers that
represent height and width of movement respectively. Default: ``None`` , meaning that
`strides = kernel_size`.
pads (Union[int, tuple[int]], optional): An int number that represents the depth,
height and width of movement are both strides, or a tuple of two int numbers that represent
depth, height and width of movement respectively.
Default: 0.
dilation (Union[int, tuple[int]], optional): Control the stride of elements in the kernel. Default: ``(1, 1)`` .
ceil_mode (bool, optional): Whether to use ceil instead of floor to calculate output shape. Default: ``False`` .
argmax_type (mindspore.dtype, optional) : The dtype for argmax.
Default: ``mstype.int64`` . [Disabled in Ascend.]
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})` with data type of float32 in Ascend.
Outputs:
Tuple of 2 Tensors, representing the maxpool result and where the max values are generated.
- **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`.
It has the same data type as `x`.
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{pads[0]} - \text{dilation[0]}
\times (\text{kernel_size[0]} - 1) - 1}{\text{strides[0]}} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{pads[1]} - \text{dilation[1]}
\times (\text{kernel_size[1]} - 1) - 1}{\text{strides[1]}} + 1\right\rfloor
- **argmax** (Tensor) - Index corresponding to the maximum value. Data type is int32 in Ascend.
Raises:
TypeError: If `x` is not a Tensor.
ValueError: If length of shape of `x` is not equal to 4.
TypeError: If `kernel_size` , `strides` , `pads` or `dilation` is not int or tuple.
ValueError: If `kernel_size`, `strides` or `dilation` is less than 1.
ValueError: If `pads` is less than 0.
ValueError: If `pads` is more than half of `kernel_size`.
TypeError: If `ceil_mode` is not bool.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pads", pads, to_output_padding)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("ceil_mode", ceil_mode)
self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
def __call__(self, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_max_pool_with_indices(self, [x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type)
return res
[文档]class Pow(Primitive):
r"""
.. code-block::
prim = ops.Pow()
out = prim(input, exponent)
is equivalent to
.. code-block::
ops.pow(input, exponent)
Refer to :func:`mindspore.ops.pow` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, exponent):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_pow(self, [input, exponent]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, exponent)
return res
pow_op=Pow()
class LayerNormV3(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNormV3 is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNormV3,
the value must be in [-1, rank(input_x)). Default: ``1`` .
begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to
apply LayerNormV3, the value must be in [-1, rank(input_x)). Default: ``1`` .
epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-7`` .
Inputs:
- **input_x** (Tensor) - Tensor with shape :math:`(N, \ldots)`.
The input of LayerNormV3. Supported dtypes: float16, float32, bfloat16.
- **gamma** (Tensor) - Tensor with shape `input_x_shape[begin_params_axis:]`.
- **beta** (Tensor) - Tensor with shape `input_x_shape[begin_params_axis:]`.
Outputs:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
- **mean** (Tensor) - The first `begin_norm_axis` dimensions of `mean` shape is the same as `input_x`,
and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
the shape of the `mean` is :math:`(x_1, \ldots, x_{begin_params_axis}, 1, \ldots, 1)`
(when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
- **rstd** (Tensor) - Shape is the same as `mean` .
Raises:
TypeError: If `begin_norm_axis` or `begin_params_axis` is not an int.
TypeError: If `epsilon` is not a float.
TypeError: If `input_x`, `gamma` or `beta` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> beta = Tensor(np.ones([3]), mindspore.float32)
>>> layer_norm = ops.LayerNormV3()
>>> output, mean, variance = layer_norm(input_x, gamma, beta)
>>> print(output)
[[-0.22474468 1. 2.22474468]
[-0.22474468 1. 2.22474468]]
>>> print(mean)
[[2.]
[2.]]
>>> print(variance)
[[1.2247447]
[.2247447]]
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
self._set_prim_arg("epsilon", epsilon)
def __call__(self, input_x, gamma, beta):
return super().__call__(input_x, gamma, beta, self.begin_norm_axis, self.begin_params_axis, self.epsilon)
class BitwiseAndScalar(Primitive):
r"""
Returns bitwise `and` of tensor and scalar element-wise.
Inputs:
- **input** (Tensor) - The input tensor must be of integral or Boolean types.
- **other** (number.Number) - The second input scalar with same type as the `input`.
Outputs:
Tensor, has the same type as the `input`.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bitwise_and_scalar(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
bitwise_and_scalar_op=BitwiseAndScalar()
[文档]class Range(Primitive):
r"""
.. code-block::
prim = ops.Range(maxlen)
out = prim(start, end, step)
is equivalent to
.. code-block::
ops.range(start, end, step, maxlen)
Refer to :func:`mindspore.ops.range` for more details.
"""
@prim_arg_register
def __init__(self, maxlen=1000000):
self._set_prim_arg("maxlen", maxlen)
def __call__(self, start, end, step):
return super().__call__(start, end, step, self.maxlen)
class Mv(Primitive):
r"""
.. code-block::
prim = ops.Mv()
out = prim(input, vec)
is equivalent to
.. code-block::
ops.mv(input, vec)
Refer to :func:`mindspore.ops.mv` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, vec):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_mv(self, [input, vec]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, vec)
return res
mv_op=Mv()
class DivMod(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', dtype=sig.sig_dtype.T),
sig.make_sig('other', dtype=sig.sig_dtype.T),
sig.make_sig('rounding_mode', dtype=sig.sig_dtype.T1, default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other, rounding_mode=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_divmod(self, [input, other, rounding_mode if rounding_mode is None else str_to_enum('DivMod', 'rounding_mode', rounding_mode)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, rounding_mode if rounding_mode is None else str_to_enum('DivMod', 'rounding_mode', rounding_mode))
return res
divmod_op=DivMod()
class InplaceDivMod(Primitive):
r"""
.. code-block::
prim = ops.InplaceDivMod()
out = prim(input, other, rounding_mode)
is equivalent to
.. code-block::
ops.divmod_tensor_(input, other, rounding_mode)
Refer to :func:`mindspore.ops.divmod_tensor_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
sig.make_sig('rounding_mode', default=None),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other, rounding_mode=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_divmod(self, [input, other, rounding_mode if rounding_mode is None else str_to_enum('InplaceDivMod', 'rounding_mode', rounding_mode)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, rounding_mode if rounding_mode is None else str_to_enum('InplaceDivMod', 'rounding_mode', rounding_mode))
return res
inplace_divmod_op=InplaceDivMod()
class InnerCommAllReduce(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, op_type, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_comm_all_reduce(self, [input, op_type, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, op_type, group)
return res
inner_comm_all_reduce_op=InnerCommAllReduce()
class GeLUGrad(Primitive):
r"""
Gradients of GeLU operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x, y):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_gelu_grad(self, [dy, x, y]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, x, y)
return res
gelu_grad_op=GeLUGrad()
class BitwiseOrScalar(Primitive):
r"""
Returns bitwise `or` of tensor and scalar element-wise.
Inputs:
- **input** (Tensor) - The input tensor must be of integral or Boolean types.
- **other** (number.Number) - The second input scalar with same type as the `input`.
Outputs:
Tensor, has the same type as the `input`.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bitwise_or_scalar(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
bitwise_or_scalar_op=BitwiseOrScalar()
[文档]class SoftMarginLoss(Primitive):
r"""
.. code-block::
prim = ops.SoftMarginLoss(reduction)
out = prim(input, target)
is equivalent to
.. code-block::
ops.soft_margin_loss(input, target, reduction)
Refer to :func:`mindspore.ops.soft_margin_loss` for more details.
"""
@prim_arg_register
def __init__(self, reduction='mean'):
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
def __call__(self, input, target):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_soft_margin_loss(self, [input, target, self.reduction]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, target, self.reduction)
return res
class FFNExt(Primitive):
r"""
.. code-block::
prim = ops.FFNExt(activation, inner_precise)
out = prim(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2)
is equivalent to
.. code-block::
ops.ffn_ext(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise)
Refer to :func:`mindspore.ops.ffn_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('weight1'),
sig.make_sig('weight2'),
sig.make_sig('expertTokens', default=None),
sig.make_sig('bias1', default=None),
sig.make_sig('bias2', default=None),
sig.make_sig('scale', default=None),
sig.make_sig('offset', default=None),
sig.make_sig('deqScale1', default=None),
sig.make_sig('deqScale2', default=None),
sig.make_sig('antiquant_scale1', default=None),
sig.make_sig('antiquant_scale2', default=None),
sig.make_sig('antiquant_offset1', default=None),
sig.make_sig('antiquant_offset2', default=None),
)
@prim_arg_register
def __init__(self, activation='fastgelu', inner_precise=0):
self._set_prim_arg_with_handler("activation", activation, str_to_enum)
self._set_prim_arg("inner_precise", inner_precise)
def __call__(self, x, weight1, weight2, expertTokens=None, bias1=None, bias2=None, scale=None, offset=None, deqScale1=None, deqScale2=None, antiquant_scale1=None, antiquant_scale2=None, antiquant_offset1=None, antiquant_offset2=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_ffn_ext(self, [x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, self.activation, self.inner_precise]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, self.activation, self.inner_precise)
return res
class InplaceFillScalar(Primitive):
r"""
.. code-block::
prim = ops.InplaceFillScalar()
out = prim(input, value)
is equivalent to
.. code-block::
ops.inplace_fill_scalar(input, value)
Refer to :func:`mindspore.ops.inplace_fill_scalar` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('value'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_fill_scalar(self, [input, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, value)
return res
inplace_fill_scalar_op=InplaceFillScalar()
class ResizeLinear1DGrad(Primitive):
r"""
Compute gradient of `ResizeLinear1D` operator.
.. warning::
This is an experimental API that is subject to change.
Args:
grads (Tensor): A Tensor of type float. 3-D with shape [batch, channel, width].
x (Tensor): A origin input Tensor. 3-D with shape [batch, channel, orig_width], The origin tensor that was resized.
coordinate_transformation_mode (string): Default is 'align_corners'. Describes how to transform the coordinate
in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'.
"""
@prim_arg_register
def __init__(self, coordinate_transformation_mode='align_corners'):
self._set_prim_arg_with_handler("coordinate_transformation_mode", coordinate_transformation_mode, str_to_enum)
def __call__(self, grads, x):
return super().__call__(grads, x, self.coordinate_transformation_mode)
class Log10(Primitive):
r"""
.. code-block::
prim = ops.Log10()
out = prim(input)
is equivalent to
.. code-block::
ops.log10_ext(input)
Refer to :func:`mindspore.ops.log10_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_log10(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
log10_op=Log10()
[文档]class Trace(Primitive):
r"""
.. code-block::
prim = ops.Trace()
out = prim(input)
is equivalent to
.. code-block::
ops.trace(input)
Refer to :func:`mindspore.ops.trace` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
trace_op=Trace()
class InnerNonZero(Primitive):
r"""
Return a Tensor of the positions of all non-zero values.
.. warning::
This is an internal and non-standard interface. The target interface is aclnn.
Inputs:
- **input** (Tensor) - The input Tensor.
Outputs:
Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
If the dimension of `input` is `D` and the number of non-zero in `input` is `N`, then the shape of output is `D*N` .
Raises:
TypeError: If `input` is not Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
>>> output = ops.auto_generate.InnerNonZero()(input)
>>> print(output)
[[0 2 4]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_non_zero(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
inner_non_zero_op=InnerNonZero()
class DistCommAllGatherIntoTensor(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, other, input, rank_size, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_all_gather_into_tensor(self, [other, input, rank_size, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, other, input, rank_size, group)
return res
dist_comm_all_gather_into_tensor_op=DistCommAllGatherIntoTensor()
class NewOnes(Primitive):
r"""
Return a tensor of `size` filled with ones.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
size (Union[int, tuple(int), list(int)]): An int, list or tuple of integers defining the output shape.
dtype (:class:`mindspore.dtype`, optional): The desired dtype of the output tensor. If None, the returned
tensor has the same dtype as `input`. Default: ``None``.
Inputs:
- **input** (Tensor) - Tensor of any dimension.
Outputs:
Tensor, the shape and dtype is defined above and filled with ones.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `size` is neither an int nor a tuple/list of int.
TypeError: If `dtype` is not a MindSpore dtype.
ValueError: If `size` contains negative values.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> x = Tensor((), mindspore.int32)
>>> ops.auto_generate.NewOnes()(x, (2, 3))
Tensor(shape=[2, 3], dtype=Int32, value=
[[1, 1, 1],
[1, 1, 1]])
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('size'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, size, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_new_ones(self, [input, size, dtype if dtype is None else dtype_to_type_id('NewOnes', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, size, dtype if dtype is None else dtype_to_type_id('NewOnes', 'dtype', dtype))
return res
new_ones_op=NewOnes()
class Contiguous(Primitive):
r"""
.. code-block::
prim = ops.Contiguous()
out = prim(input)
is equivalent to
.. code-block::
ops.contiguous(input)
Refer to :func:`mindspore.ops.contiguous` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_contiguous(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
contiguous_op=Contiguous()
class InplaceElu(Primitive):
r"""
.. code-block::
prim = ops.InplaceElu()
out = prim(input, alpha)
is equivalent to
.. code-block::
ops.inplace_elu(input, alpha)
Refer to :func:`mindspore.ops.inplace_elu` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('alpha', default=1.0),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, alpha=1.0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_elu(self, [input, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, alpha)
return res
inplace_elu_op=InplaceElu()
[文档]class Equal(Primitive):
r"""
.. code-block::
prim = ops.Equal()
out = prim(input, other)
is equivalent to
.. code-block::
ops.equal(input, other)
Refer to :func:`mindspore.ops.equal` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_equal(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
equal_op=Equal()
class HistcExt(Primitive):
r"""
.. code-block::
prim = ops.HistcExt()
out = prim(input, bins, min, max)
is equivalent to
.. code-block::
ops.histc_ext(input, bins, min, max)
Refer to :func:`mindspore.ops.histc_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('bins', default=100),
sig.make_sig('min', default=0),
sig.make_sig('max', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, bins=100, min=0, max=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_histc_ext(self, [input, bins, min, max]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, bins, min, max)
return res
histc_ext_op=HistcExt()
class Threshold(Primitive):
r"""
.. code-block::
prim = ops.Threshold()
out = prim(input, threshold, value)
is equivalent to
.. code-block::
ops.threshold(input, threshold, value)
Refer to :func:`mindspore.ops.threshold` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, threshold, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_threshold(self, [input, threshold, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, threshold, value)
return res
threshold_op=Threshold()
class AdamW(Primitive):
r"""
Implements Adam Weight Decay algorithm.
.. math::
\begin{aligned}
&\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2
\text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
\: \epsilon \text{ (epsilon)} \\
&\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad},
\: \textit{maximize} \\
&\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
\text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex]
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
&\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
&\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
&\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
&\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
&\hspace{5mm}\textbf{if} \: amsgrad \\
&\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
\widehat{v_t}) \\
&\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
\big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
&\hspace{5mm}\textbf{else} \\
&\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
\big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
&\bf{return} \: \theta_t \\[-1.ex]
\end{aligned}
.. warning::
This is an experimental optimizer API that is subject to change.
This module must be used with lr scheduler module in `LRScheduler Class
<https://www.mindspore.cn/docs/en/master/api_python/mindspore.experimental.html#lrscheduler-class>`_ .
Inputs:
- **var** (Union[Parameter, Tensor]) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means
any number of additional dimensions. The data type can be float16 or float32.
- **m** (Union[Parameter, Tensor]) - The 1st moment vector in the updating formula,
it should have the the shape as `var`. The data type can be float16 or float32.
- **v** (Union[Parameter, Tensor]) - The 2nd moment vector in the updating formula,
it should have the same shape as `m`.
- **max_v** (Union[Parameter, Tensor]) - The 2nd moment vector in the updating formula,
it should have the same shape as `m`.
- **gradient** (Tensor) - Gradient, has the same shape as `var`
- **step** (Tensor) - step
- **lr** (float) - :math:`lr` in the updating formula. The paper suggested value is :math:`10^{-8}`,
the data type should be float.
- **beta1** (float) - The exponential decay rate for the 1st moment estimations,
the data type should be float. The paper suggested value is :math:`0.9`
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations,
the data type should be float. The paper suggested value is :math:`0.999`
- **decay** (float) - weight decay (L2 penalty), must be a scalar tensor with float data type.
- **eps** (float) - Term added to the denominator to improve numerical stability,
the data type should be float.
- **amsgrad** (bool) - whether to use the AMSGrad algorithm. Default: ``False``.
- **maximize** (bool) - maximize the params based on the objective, instead of minimizing.
Default: ``False``.
.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('max_v', dtype=sig.sig_dtype.T1),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
sig.make_sig('step', dtype=sig.sig_dtype.T2),
sig.make_sig('lr', dtype=sig.sig_dtype.T3),
sig.make_sig('beta1', dtype=sig.sig_dtype.T3),
sig.make_sig('beta2', dtype=sig.sig_dtype.T3),
sig.make_sig('decay', dtype=sig.sig_dtype.T3),
sig.make_sig('eps', dtype=sig.sig_dtype.T3),
sig.make_sig('amsgrad', dtype=sig.sig_dtype.T4, default=False),
sig.make_sig('maximize', dtype=sig.sig_dtype.T5, default=False),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad=False, maximize=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_adamw(self, [var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad, maximize]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, var, m, v, max_v, gradient, step, lr, beta1, beta2, decay, eps, amsgrad, maximize)
return res
adamw_op=AdamW()
[文档]class Erf(Primitive):
r"""
.. code-block::
prim = ops.Erf()
out = prim(input)
is equivalent to
.. code-block::
ops.erf(input)
Refer to :func:`mindspore.ops.erf` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_erf(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
erf_op=Erf()
class RandIntLike(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('low'),
sig.make_sig('high'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, low, high, seed, offset, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_randint_like(self, [input, low, high, seed, offset, dtype if dtype is None else dtype_to_type_id('RandIntLike', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, low, high, seed, offset, dtype if dtype is None else dtype_to_type_id('RandIntLike', 'dtype', dtype))
return res
randint_like_op=RandIntLike()
class Atan2Ext(Primitive):
r"""
.. code-block::
prim = ops.Atan2Ext()
out = prim(input, other)
is equivalent to
.. code-block::
ops.atan2_ext(input, other)
Refer to :func:`mindspore.ops.atan2_ext` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_atan2_ext(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
atan2_ext_op=Atan2Ext()
class AvgPool3DExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('kernel_size'),
sig.make_sig('stride', default=None),
sig.make_sig('padding', default=0),
sig.make_sig('ceil_mode', default=False),
sig.make_sig('count_include_pad', default=True),
sig.make_sig('divisor_override', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_avg_pool3d_ext(self, [input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)
return res
avg_pool3d_ext_op=AvgPool3DExt()
class AsinhGrad(Primitive):
r"""
Performs grad of Asinh operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, out, dout):
return super().__call__(out, dout)
asinh_grad_op=AsinhGrad()
[文档]class SmoothL1Loss(Primitive):
r"""
Calculate the smooth L1 loss, and the L1 loss function has robustness.
Refer to :func:`mindspore.ops.smooth_l1_loss` for more details.
.. warning::
This API has poor performance on CPU and it is recommended to run it on the Ascend/GPU.
Args:
beta (number, optional): A parameter used to control the point where the function will change between
L1 to L2 loss. Default: ``1.0`` .
- Ascend: The value should be equal to or greater than zero.
- CPU/GPU: The value should be greater than zero.
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
``'sum'`` . Default: ``'none'`` .
- ``'none'``: no reduction will be applied.
- ``'mean'``: compute and return the mean of elements in the output.
- ``'sum'``: the output elements will be summed.
Inputs:
- **logits** (Tensor) - Input Tensor of any dimension. Supported dtypes:
- Ascend: float16, float32, bfloat16.
- CPU/GPU: float16, float32, float64.
- **labels** (Tensor) - Ground truth data.
- CPU/Ascend: has the same shape as the `logits`, `logits` and `labels` comply with the implicit type conversion rules to make the data types consistent.
- GPU: has the same shape and dtype as the `logits`.
Outputs:
Tensor, if `reduction` is ``'none'``, then output is a tensor with the same shape as `logits`. Otherwise the shape of output tensor is :math:`()`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> loss = ops.SmoothL1Loss()
>>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> labels = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> output = loss(logits, labels)
>>> print(output)
[0. 0. 0.5]
"""
@prim_arg_register
def __init__(self, beta=1.0, reduction='none'):
self._set_prim_arg("beta", type_it('SmoothL1Loss', 'beta', beta, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT))
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
def __call__(self, prediction, target):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_smooth_l1_loss(self, [prediction, target, self.beta, self.reduction]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, prediction, target, self.beta, self.reduction)
return res
class InplaceFloor(Primitive):
r"""
.. code-block::
prim = ops.InplaceFloor()
out = prim(input)
is equivalent to
.. code-block::
ops.floor_(input)
Refer to :func:`mindspore.ops.floor_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_floor(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
inplace_floor_op=InplaceFloor()
[文档]class CumProd(Primitive):
r"""
Computes the cumulative product of the tensor x along axis.
For example, if input is a vector of size N, the result will also be a vector of size N, with elements.
.. math::
y_i = x_1 * x_2 * x_3 * ... * x_i
Args:
exclusive (bool): If ``True`` , perform exclusive cumulative product. Default: ``False`` .
reverse (bool): If ``True`` , reverse the result along axis. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input Tensor with shape
:math:`(N, *)` where :math:`*` means any number of additional dimensions.
- **axis** (int) - The dimensions to compute the cumulative product.
Only constant value is allowed.
Outputs:
Tensor, has the same shape and dtype as the `x`.
Raises:
TypeError: If `exclusive` or `reverse` is not a bool.
TypeError: If `axis` is not an int.
ValueError: If `axis` is None.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> a, b, c, = 1, 2, 3
>>> x = Tensor(np.array([a, b, c]).astype(np.float32))
>>> op0 = ops.CumProd()
>>> output0 = op0(x, 0) # output=[a, a * b, a * b * c]
>>> op1 = ops.CumProd(exclusive=True)
>>> output1 = op1(x, 0) # output=[1, a, a * b]
>>> op2 = ops.CumProd(reverse=True)
>>> output2 = op2(x, 0) # output=[a * b * c, b * c, c]
>>> op3 = ops.CumProd(exclusive=True, reverse=True)
>>> output3 = op3(x, 0) # output=[b * c, c, 1]
>>> print(output0)
[1. 2. 6.]
>>> print(output1)
[1. 1. 2.]
>>> print(output2)
[6. 6. 3.]
>>> print(output3)
[6. 3. 1.]
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [5, 3, 5]]).astype(np.float32))
>>> output4 = op0(x, 0)
>>> output5 = op0(x, 1)
>>> print(output4)
[[ 1. 2. 3.]
[ 4. 10. 18.]
[20. 30. 90.]]
>>> print(output5)
[[ 1. 2. 6.]
[ 4. 20. 120.]
[ 5. 15. 75.]]
"""
@prim_arg_register
def __init__(self, exclusive=False, reverse=False):
self._set_prim_arg("exclusive", exclusive)
self._set_prim_arg("reverse", reverse)
def __call__(self, x, axis):
return super().__call__(x, axis, self.exclusive, self.reverse)
class MultinomialExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, num_samples, replacement, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_multinomial_ext(self, [input, num_samples, replacement, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, num_samples, replacement, seed, offset)
return res
multinomial_ext_op=MultinomialExt()
[文档]class Split(Primitive):
r"""
Splits the input tensor into output_num of tensors along the given axis and output numbers.
Refer to :func:`mindspore.ops.split` for more details.
Args:
axis (int): Index of the split position. Default: ``0`` .
output_num (int): The number of output tensors. Must be positive int. Default: ``1`` .
Inputs:
- **input_x** (Tensor) - The shape of tensor is :math:`(x_0, x_1, ..., x_{R-1})`, R >= 1.
Outputs:
tuple[Tensor], the shape of each output tensor is the same, which is
:math:`(x_0, x_1, ..., x_{axis}/{output\_num}, ..., x_{R-1})`.
And the data type is the same as `input_x`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> split = ops.Split(1, 2)
>>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)
>>> print(x)
[[1 1 1 1]
[2 2 2 2]]
>>> output = split(x)
>>> print(output)
(Tensor(shape=[2, 2], dtype=Int32, value=
[[1, 1],
[2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value=
[[1, 1],
[2, 2]]))
>>> split = ops.Split(1, 4)
>>> output = split(x)
>>> print(output)
(Tensor(shape=[2, 1], dtype=Int32, value=
[[1],
[2]]), Tensor(shape=[2, 1], dtype=Int32, value=
[[1],
[2]]), Tensor(shape=[2, 1], dtype=Int32, value=
[[1],
[2]]), Tensor(shape=[2, 1], dtype=Int32, value=
[[1],
[2]]))
"""
@prim_arg_register
def __init__(self, axis=0, output_num=1):
self._set_prim_arg("axis", axis)
self._set_prim_arg("output_num", output_num)
def __call__(self, input_x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_split(self, [input_x, self.axis, self.output_num]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input_x, self.axis, self.output_num)
return res
class BitwiseXorTensor(Primitive):
r"""
Returns bitwise `xor` of two tensors element-wise.
Inputs:
- **input** (Tensor) - The input tensor must be of integral or Boolean types.
- **other** (Tensor) - The second input tensor with same type as the `input`.
Outputs:
Tensor, has the same type as the `input`.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bitwise_xor_tensor(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
bitwise_xor_tensor_op=BitwiseXorTensor()
class EmbeddingFeatureMappingFileSize(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingFeatureMappingFileSize()
out = prim(file_path, table_name, global_step, embedding_dim, only_offset_flag)
is equivalent to
.. code-block::
ops.embedding_feature_mapping_file_size(file_path, table_name, global_step, embedding_dim, only_offset_flag)
Refer to :func:`mindspore.ops.embedding_feature_mapping_file_size` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('file_path'),
sig.make_sig('table_name'),
sig.make_sig('global_step'),
sig.make_sig('embedding_dim'),
sig.make_sig('only_offset_flag', default=True),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, file_path, table_name, global_step, embedding_dim, only_offset_flag=True):
return super().__call__(file_path, table_name, global_step, embedding_dim, only_offset_flag)
embedding_feature_mapping_file_size_op=EmbeddingFeatureMappingFileSize()
[文档]class MatMul(Primitive):
r"""
Multiplies matrix `a` and matrix `b`.
.. math::
(Output)_{i j}=\sum_{k=1}^{p} a_{i k} b_{k j}=a_{i 1} b_{1 j}+a_{i 2} b_{2 j}+\cdots+a_{i p} b_{p j}, p\in N
where the :math:`i,j` indicates the output of the i-th row and j-th column element.
Note:
- If :math:`N * M` cannot be divided by 16, the performance will be poor in ascend environment.
- The dtype of inputs must be same.
- On Ascend, float64 doesn't be supported.
Args:
transpose_a (bool): If ``True`` , `a` is transposed before multiplication. Default: ``False`` .
transpose_b (bool): If ``True`` , `b` is transposed before multiplication. Default: ``False`` .
Inputs:
- **a** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(N, C)`. If
`transpose_a` is ``True`` , its shape must be :math:`(C, N)` after transpose.
- **b** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(C, M)`. If
`transpose_b` is ``True`` , its shape must be :math:`(M, C)` after transpose.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, M)`.
Raises:
TypeError: If `transpose_a` or `transpose_b` is not a bool.
TypeError: If the dtype of `a` and the dtype of `b` are not the same.
ValueError: If the column of matrix dimensions of `a` is not equal to
the row of matrix dimensions of `b`.
ValueError: If length of shape of `a` or `b` is not equal to 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> a = Tensor(np.ones(shape=[1, 3]), mindspore.float32)
>>> b = Tensor(np.ones(shape=[3, 4]), mindspore.float32)
>>> matmul = ops.MatMul()
>>> output = matmul(a, b)
>>> print(output)
[[3. 3. 3. 3.]]
"""
@prim_arg_register
def __init__(self, transpose_a=False, transpose_b=False):
self._set_prim_arg("transpose_a", transpose_a)
self._set_prim_arg("transpose_b", transpose_b)
def __call__(self, input, mat2):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_matmul(self, [input, mat2, self.transpose_a, self.transpose_b]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mat2, self.transpose_a, self.transpose_b)
return res
class AvgPool2DGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('grad'),
sig.make_sig('image'),
sig.make_sig('kernel_size'),
sig.make_sig('stride'),
sig.make_sig('padding', default=0),
sig.make_sig('ceil_mode', default=False),
sig.make_sig('count_include_pad', default=True),
sig.make_sig('divisor_override', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, image, kernel_size, stride, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_avg_pool2d_grad(self, [grad, image, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad, image, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)
return res
avg_pool2d_grad_op=AvgPool2DGrad()
class NPUClearFloatStatusV2(Primitive):
r"""
Compare to NPUClearStatus
Clear the flag for storage overflow status. This flag is located in a register at a
fixed address on the `Ascend` device, and overflow information is automatically
written to this register.
The flag is a one-dimensional Tensor with shape :math:`(8,)` and data type `mindspore.dtype.int32`.
If the value of flag is zero, no overflow has occurred, otherwise, overflow.
When performing overflow detection on the network, you should first call `NPUClearFloatStatusV2` to
reset the register before the detection, and then call `NPUGetFloatStatusV2` to get the register
status after the network execution is completed.
Note:
- In order to avoid mis-optimization by the compiler, additional input and output are added to
this operator. The input and output are defined as a shape of: math:`(8,)` and data type of
`mindspore.dtype.int32` Tensor, meaningless.
- Since this op lacks contextual dependencies with parameters in the network,
:class:`mindspore.ops.Depend` needs to be used to ensure order of execution.
Inputs:
- **input** Tensor, an additional input created to avoid compiler optimization, is specified as shape :math:`(8,)`,
data type is `mindspore.dtype.int32`, and has no actual meaning..
Outputs:
- **output** Tensor, shape and data type are the same as input, meaningless.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import ops, nn, Tensor
>>> from mindspore.ops import NPUGetFloatStatusV2, NPUClearFloatStatusV2
>>> class Net(nn.Cell):
... def __init__(self):
... super().__init__()
... self.clear_status = NPUClearFloatStatusV2()
... self.get_status = NPUGetFloatStatusV2()
... self.sub = ops.Sub()
... self.neg = ops.Neg()
... self.equal = ops.Equal()
... self.reduce_all = ops.ReduceAll(keep_dims=False)
... self.base = Tensor([0], dtype=ms.int32)
... self.logic_not = ops.LogicalNot()
...
... def construct(self, x):
... init = Tensor([0]*8, dtype=ms.int32)
... clear_status = self.clear_status(init)
... x = ops.depend(x, clear_status)
... res = self.sub(x, self.neg(x))
... init = ops.depend(init, res)
... get_status = self.get_status(init)
... flag = self.equal(self.base, get_status)
... overall_finite = self.reduce_all(flag)
... overflow = self.logic_not(overall_finite)
... return overflow
...
>>> value = 65504
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
True
>>> value = 10
>>> data = np.full((2, 3), value, dtype=np.float16)
>>> x = Tensor(data, dtype=ms.float16)
>>> net = Net()
>>> res = net(x)
>>> print(res)
False
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
npu_clear_float_status_v2_op=NPUClearFloatStatusV2()
[文档]class RightShift(Primitive):
r"""
Shift the value of each position of Tensor `input_x` to the right by corresponding bits in Tensor `input_y`.
The inputs are two tensors, dtypes of them must be consistent, and the
shapes of them could be broadcast.
.. math::
\begin{aligned}
&out_{i} =x_{i} >> y_{i}
\end{aligned}
.. warning::
This is an experimental API that is subject to change or deletion.
Inputs:
- **input_x** (Tensor) - The target tensor, will be shifted to the right
by `input_y` bits element-wise. Support all int and uint types.
- **input_y** (Tensor) - Number of bits shifted, the tensor must have the same type as `input_x`.
Outputs:
- **output** (Tensor) - The output tensor, has the same type as `input_x`.
Raises:
TypeError: If `input_x` or `input_y` is not tensor.
TypeError: If `input_x` and `input_y` could not be broadcast.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([1, 2, 3]).astype(np.uint8))
>>> input_y = Tensor(np.array([1, 1, 1]).astype(np.uint8))
>>> output = ops.RightShift()(input_x, input_y)
>>> print(output)
[0 1 1]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, input_y):
return super().__call__(input_x, input_y)
right_shift_op=RightShift()
class SelectExt(Primitive):
r"""
.. code-block::
prim = ops.SelectExt()
out = prim(input, dim, index)
is equivalent to
.. code-block::
ops.select_ext(input, dim, index)
Refer to :func:`mindspore.ops.select_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, index):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_select_ext(self, [input, dim, index]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index)
return res
select_ext_op=SelectExt()
class LpNormV2(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('p', default=2),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
sig.make_sig('epsilon', default=1e-12),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, p=2, dim=None, keepdim=False, epsilon=1e-12):
return super().__call__(input, p, dim, keepdim, epsilon)
lp_norm_v2_op=LpNormV2()
class IFFTN(Primitive):
r"""
.. code-block::
prim = ops.IFFTN()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.ifftn(input, s, dim, norm)
Refer to :func:`mindspore.ops.ifftn` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=None, norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IFFTN', 'norm', norm))
ifftn_op=IFFTN()
class MSELossGradExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('x'),
sig.make_sig('target'),
sig.make_sig('reduction', default='mean'),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, x, target, reduction='mean'):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_mse_loss_grad_ext(self, [dout, x, target, str_to_enum('MSELossGradExt', 'reduction', reduction)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, x, target, str_to_enum('MSELossGradExt', 'reduction', reduction))
return res
mse_loss_grad_ext_op=MSELossGradExt()
[文档]class Rsqrt(Primitive):
r"""
.. code-block::
prim = ops.Rsqrt()
out = prim(input)
is equivalent to
.. code-block::
ops.rsqrt(input)
Refer to :func:`mindspore.ops.rsqrt` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_rsqrt(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
rsqrt_op=Rsqrt()
[文档]class Concat(Primitive):
r"""
.. code-block::
prim = ops.Concat(axis)
out = prim(tensors)
is equivalent to
.. code-block::
ops.cat(tensors, axis)
Refer to :func:`mindspore.ops.cat` for more details.
"""
@prim_arg_register
def __init__(self, axis=0):
self._set_prim_arg("axis", axis)
def __call__(self, tensors):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_concat(self, [tensors, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, tensors, self.axis)
return res
class LogAddExp(Primitive):
r"""
.. code-block::
prim = ops.LogAddExp()
out = prim(input, other)
is equivalent to
.. code-block::
ops.logaddexp_ext(input, other)
Refer to :func:`mindspore.ops.logaddexp_ext` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_logaddexp(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
logaddexp_op=LogAddExp()
class StdMean(Primitive):
r"""
By default, return the standard deviation and mean of each dimension in Tensor.
If dim is a dimension list, calculate the standard deviation and mean of the corresponding dimension.
The standard deviation (:math:`\sigma`) is calculated as:
.. math::
\sigma = \sqrt{\frac{1}{N - \delta N} \sum_{j=0}^{N-1} \left(self_{ij} - \overline{x_{i}}\right)^{2}}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the sample mean,
:math:`N` is the number of samples and :math:`\delta N` is the `correction` .
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input tensor. Supported dtypes: float16, float32.
dim (Union[int, tuple(int), list(int)], optional):
Specify the dimensions for calculating standard deviation and mean. Default value: ``None``.
Keyword Args:
correction (int, optional): Difference between the sample size and sample degrees of freedom.
Defaults to Bessel's correction. Default: ``1``.
keepdim (bool, optional): Whether to preserve the dimensions of the output Tensor.
If True, retain the reduced dimension with a size of 1. Otherwise, remove the dimensions.
Default value: ``False``.
Returns:
A tuple of standard deviation and mean.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `dim` is not one of the following data types: int, tuple, list, or Tensor.
TypeError: If `keepdim` is not a bool.
ValueError: If `dim` is out of range.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
>>> output_std, output_mean = ms.mint.std_mean(input, 1, correction=2, keepdim=True)
>>> print(output_std)
[[1.5811388]
[7.3824115]]
>>> print(output_mean)
[[ 2.5]
[-1.5]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('correction', default=1),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, correction=1, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_std_mean(self, [input, dim, correction, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, correction, keepdim)
return res
std_mean_op=StdMean()
class FmodScalar(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_fmod_scalar(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
fmod_scalar_op=FmodScalar()
class AdaptiveMaxPool2D(Primitive):
r"""
Performs 2D adaptive max pooling on a multi-plane input signal.
Refer to :func:`mindspore.ops.adaptive_max_pool2d` for more details.
Inputs:
- **input** (Tensor) - The input of AdaptiveMaxPool2D, which is a 3D or 4D tensor, with float16, float32 or float64 data type.
- **output_size** (tuple) - The target output size. `output_size` can be a tuple :math:`(H, W)`, :math:`H` and :math:`W` should be int.
Outputs:
Tensor, with the same type as the `input`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> # case 3: output_size=(1, 2)
>>> input = Tensor(np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]]), mindspore.float32)
>>> adaptive_max_pool_2d = ops.AdaptiveMaxPool2D((1, 2))
>>> output = adaptive_max_pool_2d(input)
>>> print(output[0])
[[[[8. 9.]]
[[8. 9.]]
[[8. 9.]]]]
[[[[7, 8]]
[[7, 8]]
[[7, 8]]]]
"""
@prim_arg_register
def __init__(self, output_size):
self._set_prim_arg("output_size", type_it('AdaptiveMaxPool2D', 'output_size', output_size, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_adaptive_max_pool2d(self, [input, self.output_size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.output_size)
return res
class RandnLike(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, seed, offset, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_randn_like(self, [input, seed, offset, dtype if dtype is None else dtype_to_type_id('RandnLike', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, seed, offset, dtype if dtype is None else dtype_to_type_id('RandnLike', 'dtype', dtype))
return res
randn_like_op=RandnLike()
class InnerInplaceIndexPut(Primitive):
r"""
Returns a Tensor. According to the index number of `indices` ,
replace the value corresponding to the `input` with the value in `values`.
.. warning::
This is an internal and non-standard interface. The target interface is aclnn.
Inputs:
- **input** (Tensor) - The input Tensor.
- **indices** (tuple[Tensor], list[Tensor]) - the indices of type is bool, uint8, int32 or int64,
used to index into the "self Tensor". The rank of tensors in indices should be 1-D,
size of indices should <= the rank of "self Tensor" and the tensors in indices should be broadcastable.
- **values** (Tensor) - 1-D Tensor of the same type as "self Tensor". If size == 1, it will be broadcastable.
- **accumulate** (bool, optional) - If `accumulate` is True, the elements in values are added to "self Tensor",
else the elements in `values` replace the corresponding element in the "self Tensor".
Default: ``False``.
Outputs:
Tensor, with the same type and shape as the "self Tensor".
Raises:
TypeError: If the dtype of the "self Tensor" is not equal to the dtype of `values`.
TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
TypeError: If the dtype of tensors in `indices` are not bool, uint8, int32 or int64.
TypeError: If the dtypes of tensors in `indices` are inconsistent.
TypeError: If the dtype of `accumulate` is not bool.
ValueError: If rank(`values`) is not 1-D.
ValueError: If size(`values`) is not 1 or max size of the tensors in `indices` when
rank("self Tensor") == size(`indices`).
ValueError: If size(`values`) is not 1 or "self Tensor".shape[-1] when
rank("self Tensor") > size(`indices`).
ValueError: If the rank of tensors in `indices` is not 1-D.
ValueError: If the tensors in `indices` is not be broadcastable.
ValueError: If size(`indices`) > rank("self Tensor").
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
>>> values = Tensor(np.array([3]).astype(np.int32))
>>> indices = [Tensor(np.array([0, 1, 1]).astype(np.int32)), Tensor(np.array([1, 2, 1]).astype(np.int32))]
>>> accumulate = True
>>> ops.auto_generate.InnerInplaceIndexPut()(x, indices, values, accumulate)
>>> print(x)
[[1 5 3]
[4 8 9]]
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('indices'),
sig.make_sig('values'),
sig.make_sig('accumulate', default=False),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, indices, values, accumulate=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_inplace_index_put(self, [input, indices, values, accumulate]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, indices, values, accumulate)
return res
inner_inplace_index_put_op=InnerInplaceIndexPut()
class Col2ImExt(Primitive):
r"""
.. code-block::
prim = ops.Col2ImExt()
out = prim(input, output_size, kernel_size, dilation, padding, stride)
is equivalent to
.. code-block::
ops.fold_ext(input, output_size, kernel_size, dilation, padding, stride)
Refer to :func:`mindspore.ops.fold_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('output_size'),
sig.make_sig('kernel_size'),
sig.make_sig('dilation', default=1),
sig.make_sig('padding', default=0),
sig.make_sig('stride', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, output_size, kernel_size, dilation=1, padding=0, stride=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_col2im_ext(self, [input, to_pair('Col2ImExt', 'output_size', output_size), to_pair('Col2ImExt', 'kernel_size', kernel_size), to_pair('Col2ImExt', 'dilation', dilation), to_pair('Col2ImExt', 'padding', padding), to_pair('Col2ImExt', 'stride', stride)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, to_pair('Col2ImExt', 'output_size', output_size), to_pair('Col2ImExt', 'kernel_size', kernel_size), to_pair('Col2ImExt', 'dilation', dilation), to_pair('Col2ImExt', 'padding', padding), to_pair('Col2ImExt', 'stride', stride))
return res
col2im_ext_op=Col2ImExt()
class IHFFT2(Primitive):
r"""
.. code-block::
prim = ops.IHFFT2()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.ihfft2(input, s, dim, norm)
Refer to :func:`mindspore.ops.ihfft2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=(-2, -1)),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=(-2, -1), norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IHFFT2', 'norm', norm))
ihfft2_op=IHFFT2()
class SiLUGrad(Primitive):
r"""
Performs grad of SiLU operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_silu_grad(self, [dout, x]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, x)
return res
silu_grad_op=SiLUGrad()
class BatchNormGradGrad(Primitive):
r"""
Performs grad of BatchNormGrad operation.
"""
@prim_arg_register
def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'):
self._set_prim_arg("is_training", is_training)
self._set_prim_arg("epsilon", epsilon)
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, x, dy, scale, saved_mean, saved_variance, dout_dx, dout_dscale, dout_dbias):
return super().__call__(x, dy, scale, saved_mean, saved_variance, dout_dx, dout_dscale, dout_dbias, self.is_training, self.epsilon, self.data_format)
class NormalTensorFloat(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, mean, std, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_normal_tensor_float(self, [mean, std, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, mean, std, seed, offset)
return res
normal_tensor_float_op=NormalTensorFloat()
class LayerNormExt(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is weight, :math:`\beta` is bias, :math:`\epsilon` is eps.
Args:
input (Tensor): Tensor of shape :math:`(N, \ldots)`. The input of LayerNorm.
normalized_shape (Union(tuple[int], list[int])): The normalized shape of `input` for LayerNorm.
weight (Tensor, optional): Learnable parameter :math:`\gamma` . Tensor of shape `normalized_shape`. Default: ``None`` .
bias (Tensor, optional): Learnable parameter :math:`\beta` . Tensor of shape `normalized_shape`. Default: ``None`` .
eps (float, optional): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-5`` .
Returns:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
- **mean** (Tensor) - The first `begin_norm_axis` (The begin axis of the `input_x` to apply LayerNorm) dimensions of `mean` shape is the same as `input_x`,
and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
the shape of the `mean` is :math:`(x_1, \ldots, x_{begin_params_axis}, 1, \ldots, 1)`
(when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
- **rstd** (Tensor) - Shape is the same as `mean` .
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `normalized_shape` is not an integer, a list or a tuple.
TypeError: If `eps` is not a float.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> normalized_shape = (3,)
>>> gamma = Tensor(np.ones(normalized_shape), mindspore.float32)
>>> beta = Tensor(np.zeros(normalized_shape), mindspore.float32)
>>> eps = 1e-7
>>> layer_norm = ops.LayerNormExt()
>>> output, mean, rstd = layer_norm(input_x, normalized_shape, gamma, beta, eps)
>>> print(output)
[[-1.2247448 0. 1.2247448]
[-1.2247448 0. 1.2247448]]
>>> print(mean)
[[2.]
[2.]]
>>> print(rstd)
[[1.2247447]
[1.2247447]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('normalized_shape'),
sig.make_sig('weight', default=None),
sig.make_sig('bias', default=None),
sig.make_sig('eps', default=1e-5),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, normalized_shape, weight=None, bias=None, eps=1e-5):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_layer_norm_ext(self, [input, normalized_shape, weight, bias, eps]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, normalized_shape, weight, bias, eps)
return res
layer_norm_ext_op=LayerNormExt()
[文档]class LayerNorm(Primitive):
r"""
Applies the Layer Normalization to the input tensor.
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
.. math::
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
Args:
begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNorm,
the value must be in [-1, rank(input_x)). Default: ``1`` .
begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to
apply LayerNorm, the value must be in [-1, rank(input_x)). Default: ``1`` .
Note: On the Ascend platform, the value of `begin_params_axis` needs to be equal to the value of `begin_norm_axis` .
epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-7`` .
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)`.
The input of LayerNorm. Supported dtypes: float16, float32, float64.
- **gamma** (Tensor) - Learnable parameter :math:`\gamma` . Tensor of shape `input_x_shape[begin_params_axis:]`. Supported dtypes: float16, float32, float64.
- **beta** (Tensor) - Learnable parameter :math:`\beta` . Tensor of shape `input_x_shape[begin_params_axis:]`. Supported dtypes: float16, float32, float64.
Outputs:
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
- **mean** (Tensor) - The first `begin_norm_axis` dimensions of `mean` shape is the same as `input_x`,
and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
the shape of the `mean` is :math:`(x_1, \ldots, x_{begin\_norm\_axis}, 1, \ldots, 1)`
(when `begin_norm_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
- **rstd** (Tensor) - The reciprocal of the input standard deviation. Shape is the same as `mean` .
Raises:
TypeError: If `begin_norm_axis` or `begin_params_axis` is not an int.
TypeError: If `epsilon` is not a float.
TypeError: If `input_x`, `gamma` or `beta` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
>>> beta = Tensor(np.ones([3]), mindspore.float32)
>>> layer_norm = ops.LayerNorm()
>>> output, _, _ = layer_norm(input_x, gamma, beta)
>>> print(output)
[[-0.2247448 1. 2.2247448]
[-0.2247448 1. 2.2247448]]
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
self._set_prim_arg("epsilon", epsilon)
def __call__(self, input_x, gamma, beta):
return super().__call__(input_x, gamma, beta, self.begin_norm_axis, self.begin_params_axis, self.epsilon)
class ProdExt(Primitive):
r"""
.. code-block::
prim = ops.ProdExt()
out = prim(input, dim, keepdim, dtype)
is equivalent to
.. code-block::
ops.prod_ext(input, dim, keepdim, dtype)
Refer to :func:`mindspore.ops.prod_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, keepdim=False, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_prod_ext(self, [input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('ProdExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('ProdExt', 'dtype', dtype))
return res
prod_ext_op=ProdExt()
class InplaceDiv(Primitive):
r"""
.. code-block::
prim = ops.InplaceDiv()
out = prim(input, other)
is equivalent to
.. code-block::
ops.div_tensor_(input, other)
Refer to :func:`mindspore.ops.div_tensor_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_div(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
inplace_div_op=InplaceDiv()
class EmbeddingFeatureMappingInsert(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingFeatureMappingInsert()
out = prim(table_name, num, feature_id, offset_id)
is equivalent to
.. code-block::
ops.embedding_feature_mapping_insert(table_name, num, feature_id, offset_id)
Refer to :func:`mindspore.ops.embedding_feature_mapping_insert` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, table_name, num, feature_id, offset_id):
return super().__call__(table_name, num, feature_id, offset_id)
embedding_feature_mapping_insert_op=EmbeddingFeatureMappingInsert()
[文档]class Add(Primitive):
r"""
.. code-block::
prim = ops.Add()
out = prim(input, other)
is equivalent to
.. code-block::
ops.add(input, other)
Refer to :func:`mindspore.ops.add` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_add(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
add_op=Add()
class LogSoftmaxGrad(Primitive):
r"""
Computes gradient for the Log Softmax activation.
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", axis)
def __call__(self, logits, grad):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_log_softmax_grad(self, [logits, grad, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, logits, grad, self.axis)
return res
class InnerCommIrecv(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, tag, src, shape, group, dtype):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_comm_irecv(self, [tag, src, shape, group, dtype_to_type_id('InnerCommIrecv', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, tag, src, shape, group, dtype_to_type_id('InnerCommIrecv', 'dtype', dtype))
return res
inner_comm_irecv_op=InnerCommIrecv()
class Conv1DPadding(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default='valid'),
sig.make_sig('dilation', default=1),
sig.make_sig('groups', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None, stride=1, padding='valid', dilation=1, groups=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_conv1d_padding(self, [input, weight, bias, stride, str_to_enum('Conv1DPadding', 'padding', padding), dilation, groups]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, stride, str_to_enum('Conv1DPadding', 'padding', padding), dilation, groups)
return res
conv1d_padding_op=Conv1DPadding()
class GridSampler3DGrad(Primitive):
r"""
Computes gradients for GridSampler3D operation.
Args:
- **grad** (Tensor) - A 5-D tensor whose dtype is float32 or float64 and whose shape is :math:`(N, C, D_{out},
H_{out}, W_{out})`. The shape is inconsistent with the shape of the output result of forward calculation.
- **input_x** (Tensor) - A 5-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, C,
D_{in}, H_{in}, W_{in})`.
- **grid** (Tensor) - A 5-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, D_{out},
H_{out}, W_{out}, 3)`.
interpolation_mode (str): An optional string specifying the interpolation method. The optional values are
"bilinear" or "nearest". Default: "bilinear".
padding_mode (str): An optional string specifying the pad method. The optional values are "zeros", "border" or
"reflection". Default: "zeros".
align_corners (bool): An optional bool. If "true", the centers of the corner pixels of the input and output
tensors are aligned. Defaults to "false".
Returns:
- **dx** (Tensor) - A 5-D tensor whose dtype and shape are the same as `input_x`.
- **dgrid** (Tensor) - A 5-D tensor whose dtype and shape are the same as `grid`.
Raises:
TypeError: If `grad`, `input_x` or `grid` is not a Tensor.
TypeError: If the dtypes of `grad`, `input_x` and `grid` are inconsistent.
TypeError: If the dtype of `grad`, `input_x` or `grid` is not a valid type.
TypeError: If `align_corners` is not a boolean value.
ValueError: If the rank of `grad`, `input_x` or `grid` is not equal to 5.
ValueError: If the first dimension of `grad`, `input_x` and `grid` are inconsistent.
ValueError: If the last dimension of `grid` is not equal to 3.
ValueError: If `interpolation_mode` is not "bilinear", "nearest" or a string value.
ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value.
ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation.
Supported Platforms:
``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False, output_mask=(1, 1)):
self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum)
self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum)
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("output_mask", output_mask)
def __call__(self, grad, input_x, grid):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_grid_sampler_3d_grad(self, [grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners, self.output_mask]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners, self.output_mask)
return res
class SortExt(Primitive):
r"""
.. code-block::
prim = ops.SortExt()
out = prim(input, dim, descending, stable)
is equivalent to
.. code-block::
ops.sort_ext(input, dim, descending, stable)
Refer to :func:`mindspore.ops.sort_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=-1),
sig.make_sig('descending', default=False),
sig.make_sig('stable', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=-1, descending=False, stable=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sort_ext(self, [input, dim, descending, stable]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, descending, stable)
return res
sort_ext_op=SortExt()
[文档]class ScatterNd(Primitive):
r"""
.. code-block::
prim = ops.ScatterNd()
out = prim(indices, updates, shape)
is equivalent to
.. code-block::
ops.scatter_nd(indices, updates, shape)
Refer to :func:`mindspore.ops.scatter_nd` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, indices, updates, shape):
return super().__call__(indices, updates, shape)
scatter_nd_op=ScatterNd()
class FillScalar(Primitive):
r"""
Create a Tensor of the specified shape and fill it with the specified scalar value.
Args:
size (Union(tuple[int], list[int])): The specified shape of output tensor.
fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
Keyword Args:
dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for
details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
Returns:
Tensor.
Raises:
TypeError: If `size` is not a tuple or list.
ValueError: The element in `size` is less than 0.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('size'),
sig.make_sig('fill_value'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, size, fill_value, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_fill_scalar(self, [size, fill_value, dtype if dtype is None else dtype_to_type_id('FillScalar', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, size, fill_value, dtype if dtype is None else dtype_to_type_id('FillScalar', 'dtype', dtype))
return res
fill_scalar_op=FillScalar()
class HFFT2(Primitive):
r"""
.. code-block::
prim = ops.HFFT2()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.hfft2(input, s, dim, norm)
Refer to :func:`mindspore.ops.hfft2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=(-2, -1)),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=(-2, -1), norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('HFFT2', 'norm', norm))
hfft2_op=HFFT2()
[文档]class Greater(Primitive):
r"""
.. code-block::
prim = ops.Greater()
out = prim(input, other)
is equivalent to
.. code-block::
ops.greater(input, other)
Refer to :func:`mindspore.ops.greater` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_greater(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
greater_op=Greater()
class ResizeNearestNeighborV2(Primitive):
r"""
Resizes the input tensor to specific size by using the nearest neighbor algorithm.
The nearest neighbor algorithm selects the value of the nearest point and does not consider the
values of neighboring points at all, yielding a piecewise-constant interpolant.
Args:
align_corners (bool, optional): If ``True`` , the centers of the 4 corner pixels of the input and output
tensors are aligned, preserving the values at the corner pixels. Default: ``False`` .
half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` ,
`align_corners` should be False. Default: ``False`` .
Inputs:
- **x** (Tensor) - 4-D with shape :math:`(batch, channels, height, width)` .
- **size** (Tensor) - The new size for the images. A 1-D int32 Tensor
of 2 elements: [`new_height, new_width`].
Outputs:
- **y** (Tensor) - The resized images. A 4-D with shape
:math:`(batch, channels, new\_height, new\_width)`. It has the same dtype as `x`.
Raises:
TypeError: If `x` or `size` is not a Tensor.
TypeError: If the data type of `size` is not int32.
TypeError: If `align_corners` or `half_pixel_centers` is not bool.
ValueError: If any value of `size` is non positive.
ValueError: If the dimension of `x` is not 4.
ValueError: If the dimension of `size` is not 1.
ValueError: If the elements number of `size` is not 2.
ValueError: If attr `half_pixel_centers` and `align_corners` are True at the same time.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> input_tensor = Tensor(np.ones((1, 1, 4, 4)), mstype.float32)
>>> size = Tensor([2, 2], mstype.int32)
>>> resize = ops.ResizeNearestNeighborV2()
>>> output = resize(input_tensor, size)
>>> print(output)
[[[[1. 1.]
[1. 1.]]]]
>>> print(output.shape)
(1, 1, 2, 2)
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, image, size):
return super().__call__(image, size, self.align_corners, self.half_pixel_centers)
[文档]class Slice(Primitive):
r"""
Slices a tensor in the specified shape.
Refer to :func:`mindspore.ops.slice` for more details.
Inputs:
- **input_x** (Tensor): The target tensor. The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
- **begin** (Union[tuple, list]): The beginning of the slice. Only constant value(>=0) is allowed.
- **size** (Union[tuple, list]): The size of the slice. Only constant value is allowed.
Outputs:
Tensor, the shape is the same as that of `size`, the data type is the same as `input_x`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore import Tensor
>>> from mindspore import ops
>>> import numpy as np
>>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]],
... [[3, 3, 3], [4, 4, 4]],
... [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))
>>> slice_op = ops.Slice()
>>> output = slice_op(data, (1, 0, 0), (1, 1, 3))
>>> print(output)
[[[3 3 3]]]
>>> output = slice_op(data, (1, 0, 0), (1, 1, 2))
>>> print(output)
[[[3 3]]]
>>> output = slice_op(data, (1, 0, 0), (1, 1, 1))
>>> print(output)
[[[3]]]
>>> output = slice_op(data, (1, 1, 0), (1, 1, 3))
>>> print(output)
[[[4 4 4]]]
>>> output = slice_op(data, (1, 0, 1), (1, 1, 2))
>>> print(output)
[[[3 3]]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, begin, size):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_slice(self, [input, begin, size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, begin, size)
return res
slice_op=Slice()
class SpeedFusionAttentionGrad(Primitive):
r"""
Calculates the gradient of SpeedFusionAttention operation.
.. warning::
This is an experimental API that is subject to change or deletion.
Only support on Atlas A2 training series.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key'),
sig.make_sig('value'),
sig.make_sig('dy'),
sig.make_sig('head_num'),
sig.make_sig('input_layout'),
sig.make_sig('pse', default=None),
sig.make_sig('padding_mask', default=None),
sig.make_sig('atten_mask', default=None),
sig.make_sig('softmax_max', default=None),
sig.make_sig('softmax_sum', default=None),
sig.make_sig('softmax_in', default=None),
sig.make_sig('attention_in', default=None),
sig.make_sig('scale_value', default=1.0),
sig.make_sig('keep_prob', default=1.0),
sig.make_sig('pre_tokens', default=2147483647),
sig.make_sig('next_tokens', default=2147483647),
sig.make_sig('inner_precise', default=0),
sig.make_sig('seed', default=None),
sig.make_sig('offset', default=None),
sig.make_sig('numels', default=None),
sig.make_sig('prefix', default=None),
sig.make_sig('actual_seq_qlen', default=None),
sig.make_sig('actual_seq_kvlen', default=None),
sig.make_sig('sparse_mode', default=0),
sig.make_sig('gen_mask_parallel', default=True),
sig.make_sig('sync', default=False),
sig.make_sig('pse_type', default=1),
sig.make_sig('q_start_idx', default=None),
sig.make_sig('kv_start_idx', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, query, key, value, dy, head_num, input_layout, pse=None, padding_mask=None, atten_mask=None, softmax_max=None, softmax_sum=None, softmax_in=None, attention_in=None, scale_value=1.0, keep_prob=1.0, pre_tokens=2147483647, next_tokens=2147483647, inner_precise=0, seed=None, offset=None, numels=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None, sparse_mode=0, gen_mask_parallel=True, sync=False, pse_type=1, q_start_idx=None, kv_start_idx=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_speed_fusion_attention_grad(self, [query, key, value, dy, head_num, str_to_enum('SpeedFusionAttentionGrad', 'input_layout', input_layout), pse, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, scale_value, keep_prob, pre_tokens, next_tokens, inner_precise, seed, offset, numels, prefix, actual_seq_qlen, actual_seq_kvlen, sparse_mode, gen_mask_parallel, sync, pse_type, q_start_idx, kv_start_idx]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, query, key, value, dy, head_num, str_to_enum('SpeedFusionAttentionGrad', 'input_layout', input_layout), pse, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, scale_value, keep_prob, pre_tokens, next_tokens, inner_precise, seed, offset, numels, prefix, actual_seq_qlen, actual_seq_kvlen, sparse_mode, gen_mask_parallel, sync, pse_type, q_start_idx, kv_start_idx)
return res
speed_fusion_attention_grad_op=SpeedFusionAttentionGrad()
class KLDivGrad(Primitive):
r"""
.. code-block::
prim = ops.KLDivGrad()
out = prim(grad_output, input, target, reduction, log_target)
is equivalent to
.. code-block::
ops.kl_div_grad(grad_output, input, target, reduction, log_target)
Refer to :func:`mindspore.ops.kl_div_grad` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('grad_output'),
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('reduction', default='mean'),
sig.make_sig('log_target', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, target, reduction='mean', log_target=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_kl_div_grad(self, [grad_output, input, target, str_to_enum('KLDivGrad', 'reduction', reduction), log_target]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, target, str_to_enum('KLDivGrad', 'reduction', reduction), log_target)
return res
kl_div_grad_op=KLDivGrad()
class InplaceScatterSrc(Primitive):
r"""
InplaceScatterSrc is for scatter_ when using a Tensor as the source element without reduce.
For details, please refer to :func:`mindspore.Tensor.scatter_`.
Examples:
>>> from mindspore import Tensor, int64, float32
>>> this_tensor = Tensor([[1, 2], [3, 4]], dtype=float32)
>>> index = Tensor([[1, 0], [1, 0]], dtype=int64)
>>> src = Tensor([[4, 3], [2, 1]], dtype=float32)
>>> this_tensor.scatter_(1, index, src)
>>> print(this_tensor)
[[3., 4.],
[1., 2.]]
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('dim'),
sig.make_sig('index'),
sig.make_sig('src'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, dim, index, src):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_scatter_src(self, [input, dim, index, src]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, src)
return res
inplace_scatter_src_op=InplaceScatterSrc()
class SilentCheckV3(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('val'),
sig.make_sig('max'),
sig.make_sig('avg'),
sig.make_sig('input_grad'),
sig.make_sig('step'),
sig.make_sig('c_thresh_l1', default=1000000.0),
sig.make_sig('c_thresh_l2', default=10000.0),
sig.make_sig('beta1', default=0.0),
sig.make_sig('npu_asd_detect', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, val, max, avg, input_grad, step, c_thresh_l1=1000000.0, c_thresh_l2=10000.0, beta1=0.0, npu_asd_detect=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_silent_check_v3(self, [val, max, avg, input_grad, step, c_thresh_l1, c_thresh_l2, beta1, npu_asd_detect]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, val, max, avg, input_grad, step, c_thresh_l1, c_thresh_l2, beta1, npu_asd_detect)
return res
silent_check_v3_op=SilentCheckV3()
class BinaryCrossEntropyWithLogitsBackward(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('grad_output'),
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('weight', default=None),
sig.make_sig('posWeight', default=None),
sig.make_sig('reduction', default='mean'),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, target, weight=None, posWeight=None, reduction='mean'):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_binary_cross_entropy_with_logits_backward(self, [grad_output, input, target, weight, posWeight, str_to_enum('BinaryCrossEntropyWithLogitsBackward', 'reduction', reduction)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, target, weight, posWeight, str_to_enum('BinaryCrossEntropyWithLogitsBackward', 'reduction', reduction))
return res
binary_cross_entropy_with_logits_backward_op=BinaryCrossEntropyWithLogitsBackward()
class RandInt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('low'),
sig.make_sig('high'),
sig.make_sig('shape'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, low, high, shape, seed, offset, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_randint(self, [low, high, shape, seed, offset, dtype if dtype is None else dtype_to_type_id('RandInt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, low, high, shape, seed, offset, dtype if dtype is None else dtype_to_type_id('RandInt', 'dtype', dtype))
return res
randint_op=RandInt()
class Conv3DExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default=0),
sig.make_sig('dilation', default=1),
sig.make_sig('groups', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_conv3d_ext(self, [input, weight, bias, stride, padding, dilation, groups]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, stride, padding, dilation, groups)
return res
conv3d_ext_op=Conv3DExt()
class EmbeddingApplyAdam(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingApplyAdam()
out = prim(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
is equivalent to
.. code-block::
ops.embedding_apply_adam(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
Refer to :func:`mindspore.ops.embedding_apply_adam` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('var_handle'),
sig.make_sig('beta1_power'),
sig.make_sig('beta2_power'),
sig.make_sig('lr'),
sig.make_sig('beta1'),
sig.make_sig('beta2'),
sig.make_sig('epsilon'),
sig.make_sig('grad'),
sig.make_sig('keys'),
sig.make_sig('global_step'),
sig.make_sig('embedding_dim'),
sig.make_sig('mask_zero', default=(0,)),
sig.make_sig('padding_key', default=(0,)),
sig.make_sig('padding_key_mask', default=(1,)),
sig.make_sig('completion_key', default=(0,)),
sig.make_sig('completion_key_mask', default=(1,)),
sig.make_sig('_embedding_dim', default=1),
sig.make_sig('_max_key_num', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("_process_node_engine_id", 'PS')
def __call__(self, var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
return super().__call__(var_handle, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
embedding_apply_adam_op=EmbeddingApplyAdam()
class Im2ColExt(Primitive):
r"""
.. code-block::
prim = ops.Im2ColExt()
out = prim(input, kernel_size, dilation, padding, stride)
is equivalent to
.. code-block::
ops.unfold_ext(input, kernel_size, dilation, padding, stride)
Refer to :func:`mindspore.ops.unfold_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('kernel_size'),
sig.make_sig('dilation', default=1),
sig.make_sig('padding', default=0),
sig.make_sig('stride', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, kernel_size, dilation=1, padding=0, stride=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_im2col_ext(self, [input, to_pair('Im2ColExt', 'kernel_size', kernel_size), to_pair('Im2ColExt', 'dilation', dilation), to_pair('Im2ColExt', 'padding', padding), to_pair('Im2ColExt', 'stride', stride)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, to_pair('Im2ColExt', 'kernel_size', kernel_size), to_pair('Im2ColExt', 'dilation', dilation), to_pair('Im2ColExt', 'padding', padding), to_pair('Im2ColExt', 'stride', stride))
return res
im2col_ext_op=Im2ColExt()
class LinalgQr(Primitive):
r"""
.. code-block::
prim = ops.LinalgQr()
out = prim(A, mode)
is equivalent to
.. code-block::
ops.linalg_qr(A, mode)
Refer to :func:`mindspore.ops.linalg_qr` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('A'),
sig.make_sig('mode', default='reduced'),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, A, mode='reduced'):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_linalg_qr(self, [A, str_to_enum('LinalgQr', 'mode', mode)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, A, str_to_enum('LinalgQr', 'mode', mode))
return res
linalg_qr_op=LinalgQr()
[文档]class Log1p(Primitive):
r"""
.. code-block::
prim = ops.Log1p()
out = prim(input)
is equivalent to
.. code-block::
ops.log1p(input)
Refer to :func:`mindspore.ops.log1p` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_log1p(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
log1p_op=Log1p()
class ReplicationPad1D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_replication_pad_1d(self, [input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, padding)
return res
replication_pad_1d_op=ReplicationPad1D()
class SliceExt(Primitive):
r"""
Returns a sliced tensor from input tensor, and
the dimension axis is input from start to end by step.
Args:
input (Tensor): the tensor to slice.
dim (int): dimension along which to slice.
start (int): the starting dimension.
end (int): the ending dimension.
step (int): the slice step size
Returns:
Tensor.
Raises:
ValueError: If dim is out of range [-input.ndim, input.ndim).
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import ops
>>> from mindspore import Tensor
>>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
>>> output = ops.SliceExt()(x, 0, 0, 2, 1)
>>> print(output)
[[ 1 2 3]
[ 4 5 6]]
>>> output = ops.SliceExt()(x, 1, 1, 3, 1)
>>> print(output)
[[ 2 3]
[ 5 6]
[ 8 9]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, start, end, step):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_slice_ext(self, [input, dim, start, end, step]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, start, end, step)
return res
slice_ext_op=SliceExt()
[文档]class Dropout(Primitive):
r"""
During training, randomly zeroes some of the elements of the input tensor
with probability :math:`1 - keep\_prob` from a Bernoulli distribution. It plays the
role of reducing neuron correlation and avoid overfitting.
Refer to :func:`mindspore.ops.dropout` for more details.
.. warning::
The Ascend backend does not support the reproducibility of random numbers, so
the `Seed0` and `Seed1` parameter have no effect.
Args:
keep_prob (float, optional): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
means dropping out 10% of input units. Default: ``0.5`` .
Seed0 (int, optional): Seed0 value for random generating. Default: ``0`` .
Seed1 (int, optional): Seed1 value for random generating. Default: ``0`` .
Inputs:
- **x** (Tensor) - The input Tensor of shape :math:`(*, N)`, with data type of float16, float32 or float64.
Outputs:
- **output** (Tensor) - With the same shape and data type as `x`.
- **mask** (Tensor) - The mask applied to `x`.
- On GPU and CPU, `mask` has the same shape and data type as `x`.
- On Ascend, to achieve a better performance, it is denoted as a 1-D Tensor
with Uint8 data type. It has shape :math:`(byte\_counts, )` where :math:`byte\_counts` is the
number of bytes needed to mask the input `x`, :math:`byte\_counts` is calculated using the
following formula:
.. math::
byte\_counts = \text{ceil}(\text{cumprod}(x.shape) / 128) * 16
If shape of `x` is :math:`(2, 3, 4, 5, 6)`, the shape of `mask` will be :math:`(96, )`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> dropout = ops.Dropout(keep_prob=0.5)
>>> x = Tensor(np.ones([1, 2, 3, 4, 5]), mindspore.float32)
>>> output, mask = dropout(x)
>>> print(output.shape, mask.shape, mask.dtype)
(1, 2, 3, 4, 5) (16,) UInt8
"""
@prim_arg_register
def __init__(self, keep_prob=0.5, Seed0=0, Seed1=0):
self._set_prim_arg("keep_prob", keep_prob)
self._set_prim_arg("Seed0", Seed0)
self._set_prim_arg("Seed1", Seed1)
self.add_prim_attr("side_effect_hidden", True)
def __call__(self, x):
return super().__call__(x, self.keep_prob, self.Seed0, self.Seed1)
[文档]class GatherNd(Primitive):
r"""
.. code-block::
prim = ops.GatherNd()
out = prim(input_x, indices)
is equivalent to
.. code-block::
ops.gather_nd(input_x, indices)
Refer to :func:`mindspore.ops.gather_nd` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, indices):
return super().__call__(input_x, indices)
gather_nd_op=GatherNd()
class ApplyRotaryPosEmb(Primitive):
r"""
.. code-block::
prim = ops.ApplyRotaryPosEmb(cos_format)
out = prim(query, key, cos, sin, position_ids)
is equivalent to
.. code-block::
ops.apply_rotary_pos_emb_(query, key, cos, sin, position_ids, cos_format)
Refer to :func:`mindspore.ops.apply_rotary_pos_emb_` for more details.
"""
@prim_arg_register
def __init__(self, cos_format=0):
self._set_prim_arg("cos_format", cos_format)
def __call__(self, query, key, cos, sin, position_ids):
return super().__call__(query, key, cos, sin, position_ids, self.cos_format)
[文档]class Cummax(Primitive):
r"""
.. code-block::
prim = ops.Cummax(axis)
out = prim(input)
is equivalent to
.. code-block::
ops.cummax(input, axis)
Refer to :func:`mindspore.ops.cummax` for more details.
"""
@prim_arg_register
def __init__(self, axis):
self._set_prim_arg("axis", axis)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_cummax(self, [input, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.axis)
return res
class InplaceMul(Primitive):
r"""
.. code-block::
prim = ops.InplaceMul()
out = prim(input, other)
is equivalent to
.. code-block::
ops.inplace_mul(input, other)
Refer to :func:`mindspore.ops.inplace_mul` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_mul(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
inplace_mul_op=InplaceMul()
class LinSpaceExt(Primitive):
r"""
Returns a Tensor whose value is `steps` evenly spaced in the interval `start` and `end` (including `start` and
`end`), and the length of the output Tensor is `steps`.
.. math::
\begin{aligned}
&step = (end - start)/(steps - 1)\\
&output = [start, start+step, start+2*step, ... , end]
\end{aligned}
.. warning::
Atlas training series does not support int16 dtype currently.
Inputs:
- **start** (Union[float, int]) - Start value of interval.
It can be a float or integer.
- **end** (Union[float, int]) - Last value of interval.
It can be a float or integer.
- **steps** (int) - Number of ticks in the interval, inclusive of start and end.
Must be positive integer.
- **dtype** (mindspore.dtype, optional) - The output Tensor data type. Default: ``None`` , where the data type of output
Tensor is float32.
Outputs:
Tensor, has the shape of :math:`(steps,)`, with dtype specified by `dtype`.
Raises:
TypeError: If type of `start` or dtype of `end` is not supported.
ValueError: If `steps` is not positive integer.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> from mindspore import ops
>>> start = 1
>>> end = 10
>>> steps = 5
>>> output = ops.auto_generate.LinSpaceExt()(start, end, steps, dtype=ms.float32)
>>> print(output)
[ 1. 3.25 5.5 7.75 10. ]
"""
__mindspore_signature__ = (
sig.make_sig('start'),
sig.make_sig('end'),
sig.make_sig('steps'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, start, end, steps, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_lin_space_ext(self, [start, end, steps, dtype if dtype is None else dtype_to_type_id('LinSpaceExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, start, end, steps, dtype if dtype is None else dtype_to_type_id('LinSpaceExt', 'dtype', dtype))
return res
lin_space_ext_op=LinSpaceExt()
class IRFFTDouble(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n'),
sig.make_sig('dim', default=-1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n, dim=-1):
return super().__call__(input, n, dim)
irfft_double_op=IRFFTDouble()
class Convolution(Primitive):
r"""
.. code-block::
prim = ops.Convolution()
out = prim(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups)
is equivalent to
.. code-block::
ops.convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups)
Refer to :func:`mindspore.ops.convolution` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default=0),
sig.make_sig('dilation', default=1),
sig.make_sig('transposed', default=False),
sig.make_sig('output_padding', default=0),
sig.make_sig('groups', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None, stride=1, padding=0, dilation=1, transposed=False, output_padding=0, groups=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_convolution(self, [input, weight, bias, stride, padding, dilation, transposed, output_padding, groups]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups)
return res
convolution_op=Convolution()
class InplaceIndexPut(Primitive):
r"""
.. code-block::
prim = ops.InplaceIndexPut()
out = prim(input, indices, values, accumulate)
is equivalent to
.. code-block::
ops.index_put_(input, indices, values, accumulate)
Refer to :func:`mindspore.ops.index_put_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('indices'),
sig.make_sig('values'),
sig.make_sig('accumulate', default=False),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, indices, values, accumulate=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_index_put(self, [input, indices, values, accumulate]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, indices, values, accumulate)
return res
inplace_index_put_op=InplaceIndexPut()
class SqrtGrad(Primitive):
r"""
Performs grad of Sqrt operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, y):
return super().__call__(dy, y)
sqrt_grad_op=SqrtGrad()
class FlattenExt(Primitive):
r"""
.. code-block::
prim = ops.FlattenExt()
out = prim(input, start_dim, end_dim)
is equivalent to
.. code-block::
ops.flatten_ext(input, start_dim, end_dim)
Refer to :func:`mindspore.ops.flatten_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('start_dim', default=0),
sig.make_sig('end_dim', default=-1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, start_dim=0, end_dim=-1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_flatten_ext(self, [input, start_dim, end_dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, start_dim, end_dim)
return res
flatten_ext_op=FlattenExt()
class SoftShrinkGrad(Primitive):
r"""
.. code-block::
prim = ops.SoftShrinkGrad(lambd)
out = prim(input_grad, input_x)
is equivalent to
.. code-block::
ops.softshrink_grad(input_grad, input_x, lambd)
Refer to :func:`mindspore.ops.softshrink_grad` for more details.
"""
@prim_arg_register
def __init__(self, lambd=0.5):
self._set_prim_arg("lambd", type_it('SoftShrinkGrad', 'lambd', lambd, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT))
def __call__(self, input_grad, input_x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_softshrink_grad(self, [input_grad, input_x, self.lambd]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input_grad, input_x, self.lambd)
return res
class InplaceCopy(Primitive):
r"""
.. code-block::
prim = ops.InplaceCopy()
out = prim(variable, value)
is equivalent to
.. code-block::
ops.inplace_copy(variable, value)
Refer to :func:`mindspore.ops.inplace_copy` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE),
sig.make_sig('value'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, variable, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_copy(self, [variable, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, variable, value)
return res
inplace_copy_op=InplaceCopy()
[文档]class Maximum(Primitive):
r"""
.. code-block::
prim = ops.Maximum()
out = prim(input, other)
is equivalent to
.. code-block::
ops.maximum(input, other)
Refer to :func:`mindspore.ops.maximum` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_maximum(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
maximum_op=Maximum()
class InplaceTanh(Primitive):
r"""
.. code-block::
prim = ops.InplaceTanh()
out = prim(input)
is equivalent to
.. code-block::
ops.tanh_(input)
Refer to :func:`mindspore.ops.tanh_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_tanh(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
inplace_tanh_op=InplaceTanh()
[文档]class Argmax(Primitive):
r"""
Returns the indices of the maximum value along a specified `axis` of a Tensor.
Refer to :func:`mindspore.ops.argmax` for more details.
Args:
axis (int): Axis where the Argmax operation applies to. Default: ``-1`` .
output_type (:class:`mindspore.dtype`): Output data type.
Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
Inputs:
- **input_x** (Tensor) - The input tensor. :math:`(N, *)` where :math:`*` means, any number of additional
dimensions.
Outputs:
Tensor, indices of the max value of input tensor across the axis.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
>>> output = ops.Argmax(output_type=mindspore.int32)(input_x)
>>> print(output)
[1 0 0]
"""
@prim_arg_register
def __init__(self, axis=-1, output_type=mstype.int32):
self._set_prim_arg("axis", axis)
self._set_prim_arg_with_handler("output_type", output_type, dtype_to_type_id)
def __call__(self, input_x):
return super().__call__(input_x, self.axis, self.output_type)
class AvgPool3DGradExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('grad'),
sig.make_sig('input'),
sig.make_sig('kernel_size'),
sig.make_sig('stride', default=None),
sig.make_sig('padding', default=0),
sig.make_sig('ceil_mode', default=False),
sig.make_sig('count_include_pad', default=True),
sig.make_sig('divisor_override', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_avg_pool3d_grad_ext(self, [grad, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)
return res
avg_pool3d_grad_ext_op=AvgPool3DGradExt()
[文档]class Meshgrid(Primitive):
r"""
Generates coordinate matrices from given coordinate tensors.
Refer to :func:`mindspore.ops.meshgrid` for more details.
Args:
indexing (str, optional): Cartesian ``'xy'`` or
matrix ('ij') indexing of output. Valid options: xy' or ``'ij'``. In the 2-D case with
inputs of length `M` and `N`, for ``'xy'`` indexing, the shape of outputs is :math:`(N, M)`
for ``'ij'`` indexing, the shape of outputs is :math:`(M, N)`. In the 3-D
case with inputs of length `M`, `N` and `P`, for ``'xy'`` indexing, the shape of outputs is
:math:`(N, M, P)` and for ``'ij'`` indexing, the shape of outputs is :math:`(M, N, P)`.
Default: ``'xy'`` .
Inputs:
- **inputs** (Union(tuple[Tensor], list[Tensor])) - In GRAPH_MODE, a tuple of N 1-D Tensor objects and
the length of input should be greater than 1. In PYNATIVE_MODE, a tuple of N 0-D or 1-D Tensor objects
and the length of input should be greater than 0. The data type is Number.
Outputs:
Tensors, A Tuple of N N-D Tensor objects. The data type is the same with the Inputs.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1, 2, 3, 4]).astype(np.int32))
>>> y = Tensor(np.array([5, 6, 7]).astype(np.int32))
>>> z = Tensor(np.array([8, 9, 0, 1, 2]).astype(np.int32))
>>> inputs = (x, y, z)
>>> meshgrid = ops.Meshgrid(indexing='xy')
>>> output = meshgrid(inputs)
>>> print(output)
(Tensor(shape=[3, 4, 5], dtype=Int32, value=
[[[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]]]),
Tensor(shape=[3, 4, 5], dtype=Int32, value=
[[[5, 5, 5, 5, 5],
[5, 5, 5, 5, 5],
[5, 5, 5, 5, 5],
[5, 5, 5, 5, 5]],
[[6, 6, 6, 6, 6],
[6, 6, 6, 6, 6],
[6, 6, 6, 6, 6],
[6, 6, 6, 6, 6]],
[[7, 7, 7, 7, 7],
[7, 7, 7, 7, 7],
[7, 7, 7, 7, 7],
[7, 7, 7, 7, 7]]]),
Tensor(shape=[3, 4, 5], dtype=Int32, value=
[[[8, 9, 0, 1, 2],
[8, 9, 0, 1, 2],
[8, 9, 0, 1, 2],
[8, 9, 0, 1, 2]],
[[8, 9, 0, 1, 2],
[8, 9, 0, 1, 2],
[8, 9, 0, 1, 2],
[8, 9, 0, 1, 2]],
[[8, 9, 0, 1, 2],
[8, 9, 0, 1, 2],
[8, 9, 0, 1, 2],
[8, 9, 0, 1, 2]]]))
"""
@prim_arg_register
def __init__(self, indexing='xy'):
self._set_prim_arg_with_handler("indexing", indexing, str_to_enum)
def __call__(self, inputs):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_meshgrid(self, [inputs, self.indexing]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, inputs, self.indexing)
return res
[文档]class NanToNum(Primitive):
r"""
.. code-block::
prim = ops.NanToNum(nan, posinf, neginf)
out = prim(input)
is equivalent to
.. code-block::
ops.nan_to_num(input, nan, posinf, neginf)
Refer to :func:`mindspore.ops.nan_to_num` for more details.
"""
@prim_arg_register
def __init__(self, nan=None, posinf=None, neginf=None):
self._set_prim_arg("nan", type_it('NanToNum', 'nan', nan, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT))
self._set_prim_arg("posinf", type_it('NanToNum', 'posinf', posinf, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT))
self._set_prim_arg("neginf", type_it('NanToNum', 'neginf', neginf, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_nan_to_num(self, [input, self.nan, self.posinf, self.neginf]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.nan, self.posinf, self.neginf)
return res
class CumsumExt(Primitive):
r"""
.. code-block::
prim = ops.CumsumExt()
out = prim(input, dim, dtype)
is equivalent to
.. code-block::
ops.cumsum_ext(input, dim, dtype)
Refer to :func:`mindspore.ops.cumsum_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_cumsum_ext(self, [input, dim, dtype if dtype is None else dtype_to_type_id('CumsumExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, dtype if dtype is None else dtype_to_type_id('CumsumExt', 'dtype', dtype))
return res
cumsum_ext_op=CumsumExt()
[文档]class Sqrt(Primitive):
r"""
.. code-block::
prim = ops.Sqrt()
out = prim(x)
is equivalent to
.. code-block::
ops.sqrt(x)
Refer to :func:`mindspore.ops.sqrt` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sqrt(self, [x]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x)
return res
sqrt_op=Sqrt()
class CholeskyInverse(Primitive):
r"""
Returns the inverse of the positive definite matrix using cholesky matrix factorization given its Cholesky factor.
Refer to :func:`mindspore.ops.cholesky_inverse` for more details.
Args:
upper(bool, optional): Whether to return a lower or upper triangular matrix. Default: ``False`` .
Inputs:
- **x** (Tensor) - The input tensor whose rank is 2. Supported dtypes: float32, float64.
Outputs:
Tensor, has the same shape and dtype as `x`.
Supported Platforms:
``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([[1, 1], [1, 2]), mindspore.float32)
>>> y = ops.CholeskyInverse()(x)
>>> print(y)
[[ 5.0 -3.0 ]
[-3.0 2.0 ]]
"""
@prim_arg_register
def __init__(self, upper=False):
self._set_prim_arg("upper", upper)
def __call__(self, input_x):
return super().__call__(input_x, self.upper)
class XLogYScalarSelf(Primitive):
r"""
Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
Returns zero when `input` is zero.
.. math::
out_i = input_{i}\ln{other_{i}}
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
Inputs:
- **input** (number) - The first input is a number.
- **other** (Tensor) - The second input is a tensor.
Outputs:
- **y** (Tensor) - the shape is the same as the second input,
and the data type is the one with higher precision or higher digits among the two inputs.
Raises:
TypeError: If `input` is not a number.
TypeError: If `other` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.auto_generate import XLogYScalarSelf
>>> input = 3
>>> other = Tensor(np.array([2, 2, 2]), mindspore.float32)
>>> op = XLogYScalarSelf()
>>> output = op(input, other)
>>> print(output)
[2.07944155 2.07944155 2.07944155]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_xlogy_scalar_self(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
xlogy_scalar_self_op=XLogYScalarSelf()
[文档]class FloorDiv(Primitive):
r"""
.. code-block::
prim = ops.FloorDiv()
out = prim(input, other)
is equivalent to
.. code-block::
ops.floor_divide(input, other)
Refer to :func:`mindspore.ops.floor_divide` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_floor_div(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
floor_div_op=FloorDiv()
class MaskedSelectGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mask, grad):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_masked_select_grad(self, [input, mask, grad]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mask, grad)
return res
masked_select_grad_op=MaskedSelectGrad()
class NewEmpty(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('size'),
sig.make_sig('dtype', default=None),
sig.make_sig('device', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, size, dtype=None, device=None):
return super().__call__(input, size, dtype if dtype is None else dtype_to_type_id('NewEmpty', 'dtype', dtype), device)
new_empty_op=NewEmpty()
class LeakyReLUGradExt(Primitive):
r"""
Computes gradient for the LeakyReLU activation.
Args:
dy (Tensor): Input gradients tensor, has the same dtype and shape as `input`.
input (Tensor): Origin input tensor.
negative_slope (Scalar): Origin negative_slope
is_result(bool): Output input if True.
Returns:
Tensor, has the same dtype and shape as `input`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input'),
sig.make_sig('negative_slope', default=0.01),
sig.make_sig('is_result', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input, negative_slope=0.01, is_result=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_leaky_relu_grad_ext(self, [dy, input, negative_slope, is_result]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, input, negative_slope, is_result)
return res
leaky_relu_grad_ext_op=LeakyReLUGradExt()
class Exp2(Primitive):
r"""
.. code-block::
prim = ops.Exp2()
out = prim(input)
is equivalent to
.. code-block::
ops.exp2(input)
Refer to :func:`mindspore.ops.exp2` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_exp2(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
exp2_op=Exp2()
class InplaceIndexAddExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('dim'),
sig.make_sig('index'),
sig.make_sig('source'),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, dim, index, source, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_index_add(self, [input, dim, index, source, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, source, alpha)
return res
inplace_index_add_op=InplaceIndexAddExt()
class ReciprocalGrad(Primitive):
r"""
Performs grad of Reciprocal operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y, dy):
return super().__call__(y, dy)
reciprocal_grad_op=ReciprocalGrad()
[文档]class Cos(Primitive):
r"""
.. code-block::
prim = ops.Cos()
out = prim(input)
is equivalent to
.. code-block::
ops.cos(input)
Refer to :func:`mindspore.ops.cos` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_cos(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
cos_op=Cos()
class MeanExt(Primitive):
r"""
.. code-block::
prim = ops.MeanExt()
out = prim(input, dim, keepdim, dtype)
is equivalent to
.. code-block::
ops.mean_ext(input, dim, keepdim, dtype)
Refer to :func:`mindspore.ops.mean_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, keepdim=False, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_mean_ext(self, [input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('MeanExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('MeanExt', 'dtype', dtype))
return res
mean_ext_op=MeanExt()
class SmoothL1LossGrad(Primitive):
r"""
Computes gradient for prediction on SmoothL1Loss.
"""
@prim_arg_register
def __init__(self, beta=1.0, reduction='none'):
self._set_prim_arg("beta", type_it('SmoothL1LossGrad', 'beta', beta, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT))
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
def __call__(self, prediction, target, dout):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_smooth_l1_loss_grad(self, [prediction, target, dout, self.beta, self.reduction]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, prediction, target, dout, self.beta, self.reduction)
return res
class Generator(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, cmd, inputs):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_generator(self, [cmd, inputs]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, cmd, inputs)
return res
generator_op=Generator()
class SeLUExt(Primitive):
r"""
.. code-block::
prim = ops.SeLUExt()
out = prim(input)
is equivalent to
.. code-block::
ops.selu_ext(input)
Refer to :func:`mindspore.ops.selu_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_selu_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
selu_ext_op=SeLUExt()
class CountNonZero(Primitive):
r"""
.. code-block::
prim = ops.CountNonZero()
out = prim(input, dim)
is equivalent to
.. code-block::
ops.count_nonzero(input, dim)
Refer to :func:`mindspore.ops.count_nonzero` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_count_nonzero(self, [input, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim)
return res
count_nonzero_op=CountNonZero()
class DivMods(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('other'),
sig.make_sig('rounding_mode', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other, rounding_mode=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_divmods(self, [input, other, rounding_mode if rounding_mode is None else str_to_enum('DivMods', 'rounding_mode', rounding_mode)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, rounding_mode if rounding_mode is None else str_to_enum('DivMods', 'rounding_mode', rounding_mode))
return res
divmods_op=DivMods()
class Var(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('correction', default=1),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, correction=1, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_var(self, [input, dim, correction, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, correction, keepdim)
return res
var_op=Var()
class GroupNorm(Primitive):
r"""
Group Normalization over a mini-batch of inputs.
Group Normalization is widely used in recurrent neural networks. It applies
normalization on a mini-batch of inputs for each single training case as described
in the paper `Group Normalization <https://arxiv.org/pdf/1803.08494.pdf>`_. Group Normalization
divides the channels into groups and computes within each group the mean and variance for normalization,
and it performs very stable over a wide range of batch size. :math:`\gamma` and :math:`\beta` are trainable scale
and shift.
It can be described using the following formula:
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
where :math:`\gamma` is `weight`, :math:`\beta` is `bias`, :math:`\epsilon` is `eps`.
Args:
input (Tensor): The input feature with shape :math:`(N, C, *)` where :math:`*` means, any number of
additional dimensions.
num_groups (int): The number of groups to be divided along the channel dimension.
weight (Tensor, optional): The shape :math:`(C,)`, Default: ``None``, has the same data type with `input`.
bias (Tensor, optional): The shape :math:`(C,)`, Default: ``None``, has the same data type with `input`.
eps (float, optional): A value added to the denominator for numerical stability. Default: ``1e-5`` .
Returns:
Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input`.
Raises:
TypeError: If `num_groups` is not an int.
TypeError: If `eps` is not a float.
ValueError: If `num_groups` is less than 1.
ValueError: If `C` (the second parameter of dimensions of `input`) is not divided by `num_groups`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import ops
>>> x = ms.Tensor(np.ones([1, 2, 4, 4], np.float32))
>>> group_norm_op = ops.GroupNorm()
>>> output = group_norm_op(x, 2)[0]
>>> print(output)
[[[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('num_groups'),
sig.make_sig('weight', default=None),
sig.make_sig('bias', default=None),
sig.make_sig('eps', default=1e-5),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, num_groups, weight=None, bias=None, eps=1e-5):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_group_norm(self, [input, num_groups, weight, bias, eps]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, num_groups, weight, bias, eps)
return res
group_norm_op=GroupNorm()
[文档]class Cross(Primitive):
r"""
Returns the cross product of vectors in dimension `dim` of input and other.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.cross` for more details.
Args:
dim (int): Specified dim along which to compute cross product with. Default: ``-65530`` .
Inputs:
- **input** (Tensor) - Input Tensor.
- **other** (Tensor) - Another input Tensor, must have the same shape and
the same type as `input`, and the size of their `dim` dimension should be 3.
Outputs:
Tensor, has the same shape and type as inputs.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore import dtype as mstype
>>> from mindspore import ops
>>> cross = ops.Cross(dim = 0)
>>> x1 = Tensor([1, 2, 3], mstype.int8)
>>> x2 = Tensor([1, 2, 3], mstype.int8)
>>> output = cross(x1, x2)
>>> print(output)
[0 0 0]
"""
@prim_arg_register
def __init__(self, dim=-65530):
self._set_prim_arg("dim", dim)
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_cross(self, [input, other, self.dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, self.dim)
return res
class ConvolutionStrGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default='valid'),
sig.make_sig('dilation', default=1),
sig.make_sig('transposed', default=False),
sig.make_sig('output_padding', default=0),
sig.make_sig('groups', default=1),
sig.make_sig('output_mask', default=()),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, input, weight, bias=None, stride=1, padding='valid', dilation=1, transposed=False, output_padding=0, groups=1, output_mask=()):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_convolution_str_grad(self, [dout, input, weight, bias, stride, str_to_enum('ConvolutionStrGrad', 'padding', padding), dilation, transposed, output_padding, groups, output_mask]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, input, weight, bias, stride, str_to_enum('ConvolutionStrGrad', 'padding', padding), dilation, transposed, output_padding, groups, output_mask)
return res
convolution_str_grad_op=ConvolutionStrGrad()
class MaxPoolGradWithIndices(Primitive):
r"""
Gradients of the MaxPoolWithIndices operation.
"""
@prim_arg_register
def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pads", pads, to_output_padding)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("ceil_mode", ceil_mode)
self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
def __call__(self, x, grad, argmax):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_max_pool_grad_with_indices(self, [x, grad, argmax, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, grad, argmax, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type)
return res
class UpsampleTrilinear3DGrad(Primitive):
r"""
Upsample the 3-D gradient data with trilinear interpolation algorithm.
Note:
One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, D, H, W]. Must be one of the following types:
float16, float32, float64.
- **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 5 elements:
[batch, channels, depth, height, width]. Must:
input_size[0] == dy.shape[0]
input_size[1] == dy.shape[1].
- **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``.
It contains 3 elements: depth, height, width, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0],
dy.shape[3] == output_size[1],
dy.shape[4] == output_size[2].
- **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. Must:
dy.shape[2] == floor(input_size[2] * scales[0],
dy.shape[3] == floor(input_size[3] * scales[1],
dy.shape[4] == floor(input_size[4] * scales[2].
- **align_corners** (bool): An optional bool. Default: ``False``.
Outputs:
- **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self, align_corners=False):
self._set_prim_arg("align_corners", align_corners)
def __call__(self, dy, input_size, output_size=None, scales=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_trilinear3d_grad(self, [dy, input_size, output_size, scales, self.align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, input_size, output_size, scales, self.align_corners)
return res
[文档]class Logit(Primitive):
r"""
Calculate the logit of a tensor element-wise. Element in `x` is clamped to [eps, 1-eps].
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.logit` for more details.
Args:
eps (float, optional): The epsilon. The input clamp bound is defined as [eps, 1-eps]. Default: ``-1.0`` .
Inputs:
- **x** (Tensor) - The input tensor of type float16, float32 or float64.
Outputs:
Tensor, with the same shape and dtype as the `x`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([0.1, 0.2, 0.3]).astype(np.float32))
>>> op = ops.Logit(eps=1e-5)
>>> output = op(x)
>>> print(output)
[-2.1972246 -1.3862944 -0.8472978]
"""
@prim_arg_register
def __init__(self, eps=-1.0):
self._set_prim_arg("eps", eps)
def __call__(self, input):
return super().__call__(input, self.eps)
[文档]class Mul(Primitive):
r"""
.. code-block::
prim = ops.Mul()
out = prim(input, other)
is equivalent to
.. code-block::
ops.mul(input, other)
Refer to :func:`mindspore.ops.mul` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
sig.make_sig('other', sig.sig_rw.RW_READ, dtype=sig.sig_dtype.T),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_mul(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
mul_op=Mul()
class SubExt(Primitive):
r"""
.. code-block::
prim = ops.SubExt()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.sub_ext(input, other, alpha)
Refer to :func:`mindspore.ops.sub_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', dtype=sig.sig_dtype.T),
sig.make_sig('other', dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1, default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sub_ext(self, [input, other, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, alpha)
return res
sub_ext_op=SubExt()
[文档]class Geqrf(Primitive):
r"""
.. code-block::
prim = ops.Geqrf()
out = prim(input)
is equivalent to
.. code-block::
ops.geqrf(input)
Refer to :func:`mindspore.ops.geqrf` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
geqrf_op=Geqrf()
class DistCommGatherIntoTensor(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, other, input, rank_size, dst, rank_id, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_gather_into_tensor(self, [other, input, rank_size, dst, rank_id, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, other, input, rank_size, dst, rank_id, group)
return res
dist_comm_gather_into_tensor_op=DistCommGatherIntoTensor()
[文档]class ReduceAll(Primitive):
r"""
.. code-block::
prim = ops.ReduceAll(keep_dims)
out = prim(input, axis)
is equivalent to
.. code-block::
ops.all(input, axis, keep_dims)
Refer to :func:`mindspore.ops.all` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('axis', default=None),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, input, axis=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reduce_all(self, [input, axis, self.keep_dims]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, axis, self.keep_dims)
return res
class ViewAs(Primitive):
r"""
.. code-block::
prim = ops.ViewAs()
out = prim(input, other)
is equivalent to
.. code-block::
ops.view_as(input, other)
Refer to :func:`mindspore.ops.view_as` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_view_as(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
view_as_op=ViewAs()
class MatrixInverseExt(Primitive):
r"""
.. code-block::
prim = ops.MatrixInverseExt()
out = prim(input)
is equivalent to
.. code-block::
ops.matrix_inverse_ext(input)
Refer to :func:`mindspore.ops.matrix_inverse_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_matrix_inverse_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
matrix_inverse_ext_op=MatrixInverseExt()
[文档]class Asin(Primitive):
r"""
.. code-block::
prim = ops.Asin()
out = prim(input)
is equivalent to
.. code-block::
ops.asin(input)
Refer to :func:`mindspore.ops.asin` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
asin_op=Asin()
class InplaceClampTensor(Primitive):
r"""
.. code-block::
prim = ops.InplaceClampTensor()
out = prim(input, min, max)
is equivalent to
.. code-block::
ops.inplace_clamp_tensor(input, min, max)
Refer to :func:`mindspore.ops.inplace_clamp_tensor` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('min', default=None),
sig.make_sig('max', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, min=None, max=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_clamp_tensor(self, [input, min, max]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, min, max)
return res
inplace_clamp_tensor_op=InplaceClampTensor()
class DropoutGenMaskExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_hidden", True)
def __call__(self, shape, p, seed, offset, dtype):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dropout_gen_mask_ext(self, [shape, p, seed, offset, dtype_to_type_id('DropoutGenMaskExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, shape, p, seed, offset, dtype_to_type_id('DropoutGenMaskExt', 'dtype', dtype))
return res
dropout_gen_mask_ext_op=DropoutGenMaskExt()
class ReplicationPad2D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_replication_pad_2d(self, [input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, padding)
return res
replication_pad_2d_op=ReplicationPad2D()
[文档]class PReLU(Primitive):
r"""
.. code-block::
prim = ops.PReLU()
out = prim(input, weight)
is equivalent to
.. code-block::
ops.prelu(input, weight)
Refer to :func:`mindspore.ops.prelu` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_prelu(self, [input, weight]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight)
return res
prelu_op=PReLU()
[文档]class LogicalOr(Primitive):
r"""
Computes the "logical OR" of two tensors element-wise.
Refer to :func:`mindspore.ops.logical_or` for more details.
Inputs:
- **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly
converted to bool.
- **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
a tensor whose data type can be implicitly converted to bool.
Outputs:
Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> logical_or = ops.LogicalOr()
>>> output = logical_or(x, y)
>>> print(output)
[ True True True]
>>> x = Tensor(1, mindspore.bool_)
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalOr()(x, y)
>>> print(output)
True
>>> x = True
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalOr()(x, y)
>>> print(output)
True
>>> x = True
>>> y = Tensor(np.array([True, False]), mindspore.bool_)
>>> output = ops.LogicalOr()(x, y)
>>> print(output)
[True True]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_logical_or(self, [x, y]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, y)
return res
logical_or_op=LogicalOr()
[文档]class LogicalAnd(Primitive):
r"""
Computes the "logical AND" of two tensors element-wise.
Refer to :func:`mindspore.ops.logical_and` for more details.
Inputs:
- **x** (Union[Tensor, bool]) - The first input is a bool or a tensor whose data type can be implicitly
converted to bool.
- **y** (Union[Tensor, bool]) - The second input is a bool when the first input is a tensor or
a tensor whose data type can be implicitly converted to bool.
Outputs:
Tensor, the shape is the same as the `x` and `y` after broadcasting, and the data type is bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([True, False, True]), mindspore.bool_)
>>> y = Tensor(np.array([True, True, False]), mindspore.bool_)
>>> logical_and = ops.LogicalAnd()
>>> output = logical_and(x, y)
>>> print(output)
[ True False False]
>>> x = Tensor(1, mindspore.bool_)
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalAnd()(x, y)
>>> print(output)
False
>>> x = True
>>> y = Tensor(0, mindspore.bool_)
>>> output = ops.LogicalAnd()(x, y)
>>> print(output)
False
>>> x = True
>>> y = Tensor(np.array([True, False]), mindspore.bool_)
>>> output = ops.LogicalAnd()(x, y)
>>> print(output)
[True False]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_logical_and(self, [x, y]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, y)
return res
logical_and_op=LogicalAnd()
class MoeTokenUnpermuteGrad(Primitive):
r"""
.. code-block::
prim = ops.MoeTokenUnpermuteGrad()
out = prim(permuted_tokens, unpermuted_tokens_grad, sorted_indices, probs, padded_mode, restore_shape)
is equivalent to
.. code-block::
ops.moe_token_unpermute_grad(permuted_tokens, unpermuted_tokens_grad, sorted_indices, probs, padded_mode, restore_shape)
Refer to :func:`mindspore.ops.moe_token_unpermute_grad` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('permuted_tokens'),
sig.make_sig('unpermuted_tokens_grad'),
sig.make_sig('sorted_indices'),
sig.make_sig('probs', default=None),
sig.make_sig('padded_mode', default=False),
sig.make_sig('restore_shape', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, permuted_tokens, unpermuted_tokens_grad, sorted_indices, probs=None, padded_mode=False, restore_shape=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_moe_token_unpermute_grad(self, [permuted_tokens, unpermuted_tokens_grad, sorted_indices, probs, padded_mode, restore_shape]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, permuted_tokens, unpermuted_tokens_grad, sorted_indices, probs, padded_mode, restore_shape)
return res
moe_token_unpermute_grad_op=MoeTokenUnpermuteGrad()
[文档]class ReduceMin(Primitive):
r"""
Reduces a dimension of a tensor by the minimum value in the dimension, by default. And also can
reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Only constant value is allowed. Must be in the range [-r, r).
Outputs:
Tensor, has the same dtype as the `x`.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the minimum of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
- If `axis` is tuple(int), set as (1, 2), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = ops.ReduceMin(keep_dims=True)
>>> output = op(x, 1)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by the minimum value of all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = op(x)
>>> print(output)
[[[1.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = op(x, 0)
>>> print(output)
[[[1. 1. 1. 1. 1. 1.]
[2. 2. 2. 2. 2. 2.]
[3. 3. 3. 3. 3. 3.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = op(x, 1)
>>> print(output)
[[[1. 1. 1. 1. 1. 1.]]
[[4. 4. 4. 4. 4. 4.]]
[[7. 7. 7. 7. 7. 7.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = op(x, 2)
>>> print(output)
[[[1.]
[2.]
[3.]]
[[4.]
[5.]
[6.]]
[[7.]
[8.]
[9.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reduce_min(self, [x, axis, self.keep_dims]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, axis, self.keep_dims)
return res
class Conv2DPadding(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default='valid'),
sig.make_sig('dilation', default=1),
sig.make_sig('groups', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None, stride=1, padding='valid', dilation=1, groups=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_conv2d_padding(self, [input, weight, bias, stride, str_to_enum('Conv2DPadding', 'padding', padding), dilation, groups]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, stride, str_to_enum('Conv2DPadding', 'padding', padding), dilation, groups)
return res
conv2d_padding_op=Conv2DPadding()
[文档]class Ceil(Primitive):
r"""
.. code-block::
prim = ops.Ceil()
out = prim(input)
is equivalent to
.. code-block::
ops.ceil(input)
Refer to :func:`mindspore.ops.ceil` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_ceil(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
ceil_op=Ceil()
[文档]class Square(Primitive):
r"""
.. code-block::
prim = ops.Square()
out = prim(input)
is equivalent to
.. code-block::
ops.square(input)
Refer to :func:`mindspore.ops.square` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_square(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
square_op=Square()
class Index(Primitive):
r"""
.. code-block::
prim = ops.Index()
out = prim(input, indices)
is equivalent to
.. code-block::
ops.index(input, indices)
Refer to :func:`mindspore.ops.index` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, indices):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_index(self, [input, indices]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, indices)
return res
index_op=Index()
class EmbeddingFeatureMappingV2(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingFeatureMappingV2()
out = prim(table_name, feature_id, table_total_size, table_actual_size)
is equivalent to
.. code-block::
ops.embedding_feature_mapping_v2(table_name, feature_id, table_total_size, table_actual_size)
Refer to :func:`mindspore.ops.embedding_feature_mapping_v2` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, table_name, feature_id, table_total_size, table_actual_size):
return super().__call__(table_name, feature_id, table_total_size, table_actual_size)
embedding_feature_mapping_v2_op=EmbeddingFeatureMappingV2()
[文档]class StridedSlice(Primitive):
r"""
.. code-block::
prim = ops.StridedSlice(begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
out = prim(input_x, begin, end, strides)
is equivalent to
.. code-block::
ops.strided_slice(input_x, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
Refer to :func:`mindspore.ops.strided_slice` for more details.
"""
@prim_arg_register
def __init__(self, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0):
self._set_prim_arg("begin_mask", begin_mask)
self._set_prim_arg("end_mask", end_mask)
self._set_prim_arg("ellipsis_mask", ellipsis_mask)
self._set_prim_arg("new_axis_mask", new_axis_mask)
self._set_prim_arg("shrink_axis_mask", shrink_axis_mask)
def __call__(self, input_x, begin, end, strides):
return super().__call__(input_x, begin, end, strides, self.begin_mask, self.end_mask, self.ellipsis_mask, self.new_axis_mask, self.shrink_axis_mask)
class ReplicationPad3D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_replication_pad_3d(self, [input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, padding)
return res
replication_pad_3d_op=ReplicationPad3D()
class BatchNormElemtGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, input, mean, invstd, weight, sumd_dy, sum_dy_xmu, count):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_batch_norm_elemt_grad(self, [dout, input, mean, invstd, weight, sumd_dy, sum_dy_xmu, count]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, input, mean, invstd, weight, sumd_dy, sum_dy_xmu, count)
return res
batch_norm_elemt_grad_op=BatchNormElemtGrad()
class LayerNormGradExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('x'),
sig.make_sig('normalized_shape'),
sig.make_sig('mean'),
sig.make_sig('variance'),
sig.make_sig('gamma'),
sig.make_sig('beta'),
sig.make_sig('output_mask', default=(1, 1, 1)),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x, normalized_shape, mean, variance, gamma, beta, output_mask=(1, 1, 1)):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_layer_norm_grad_ext(self, [dy, x, normalized_shape, mean, variance, gamma, beta, output_mask]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, x, normalized_shape, mean, variance, gamma, beta, output_mask)
return res
layer_norm_grad_ext_op=LayerNormGradExt()
class UniqueDim(Primitive):
r"""
Returns the unique elements of input tensor.
when `return_inverse=True`, also return a tensor containing the index of each value of input
tensor corresponding to the output unique tensor.
Inputs:
- **input**(Tensor) - The input tensor.
- **sorted**(bool) - Whether to sort the unique elements in ascending order before returning as output.
- **return_inverse**(bool) - Whether to also return the indices for where elements in the original input ended up in
the returned unique list.
- **dim**(int) - the dimension to operate upon.
Returns:
A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
- **output**(Tensor) - the output list of unique scalar elements.
- **inverse_indices**(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where
elements in the original input map to in the output; The shape is input.shape[dim].
- **counts**(Tensor) - Return the number of occurrences for each unique value or tensor. The shape is
output.shape(dim).
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, nn
>>> from mindspore import ops
>>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
>>> unique = ops.auto_generate.UniqueDim()
>>> output = unique(x, sorted=True, return_inverse=True, dim=0)
>>> print(output)
(Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
>>> y = output[0]
>>> print(y)
[1 2 5]
>>> idx = output[1]
>>> print(idx)
[0 1 2 1]
>>> counts = output[1]
>>> print(counts)
[1 2 1]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, sorted, return_inverse, dim):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_unique_dim(self, [input, sorted, return_inverse, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, sorted, return_inverse, dim)
return res
unique_dim_op=UniqueDim()
[文档]class Sinh(Primitive):
r"""
.. code-block::
prim = ops.Sinh()
out = prim(input)
is equivalent to
.. code-block::
ops.sinh(input)
Refer to :func:`mindspore.ops.sinh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sinh(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
sinh_op=Sinh()
class IHFFT(Primitive):
r"""
.. code-block::
prim = ops.IHFFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.ihfft(input, n, dim, norm)
Refer to :func:`mindspore.ops.ihfft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('IHFFT', 'norm', norm))
ihfft_op=IHFFT()
class ApplyCamePart4(Primitive):
r"""
Computes Part 4 of the CAME Optimizer.
Args:
- **param** (Parameter) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **m** (Parameter) - The shape = 2D :math:`(..., n, m)`.
The Tensor has the same data type as `param`.
- **r** (Tensor) - The shape = 1D :math:`(..., n)`.
The Tensor has the same data type as `param`.
- **c** (Tensor) - The shape = 1D :math:`(..., m)`.
The Tensor has the same data type as `param`.
- **weight_decay** (Tensor) - The shape = 1D :math:`(1)`.
A Tensor of types: float32.
- **lr** (Tensor) - The shape = 1D :math:`(1)`.
A Tensor of types: float32.
- **beta3** (float) - data type must be float.
- **sum_r** (Tensor) - The shape = 1D :math:`(..., 1)`.
'None' is currently supported. A Tensor of types: float32.
- **sum_u_r** (Tensor) - The shape = 1D :math:`(..., n)`.
A Tensor of types: float32.
- **sum_u_c** (Tensor) - The shape = 1D :math:`(..., m)`.
A Tensor of types: float32.
- **sum_u_rc** (Tensor) - The shape = 1D :math:`(...)`.
A Tensor of types: float32.
- **global_shape** (Tensor) - the shape = 1D :math:`(2)`.
'None' is currently supported. A Tensor of types: int64.
Returns:
- **param** (Tensor) - A Tensor of shape :math:`(..., n, m)`
- **r** (Tensor) - A Tensor of shape :math:`(..., n)`
- **c** (Tensor) - A Tensor of of shape:math:`(..., m)`
Raises:
TypeError: If `param` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _inner_ops as P
>>> apply_came_part4 = P.ApplyCamePart4()
>>> param = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> m = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> r = Tensor(np.ones([1024]), dtype=ms.float32)
>>> c = Tensor(np.ones([64]), dtype=ms.float32)
>>> weight_decay = Tensor([0.8])
>>> lr = Tensor([0.5])
>>> beta3 = 0.5
>>> sum_r = Tensor(np.array([128.]), dtype=ms.float32)
>>> sum_u_r = Tensor(np.ones([1024]), dtype=ms.float32)
>>> sum_u_c = Tensor(np.ones([64]), dtype=ms.float32)
>>> sum_u_rc = Tensor(np.array([128.]), dtype=ms.float32)
>>> global_shape = (1024, 64)
>>> output = apply_came_part4(param, m, r, c, weight_decay, lr, beta3, \
... sum_r, sum_u_r, sum_u_c, sum_u_rc, global_shape)
>>> print(output[0].shape)
(1024, 64)
"""
__mindspore_signature__ = (
sig.make_sig('param'),
sig.make_sig('m'),
sig.make_sig('r'),
sig.make_sig('c'),
sig.make_sig('weight_decay'),
sig.make_sig('lr'),
sig.make_sig('beta3'),
sig.make_sig('sum_r'),
sig.make_sig('sum_u_r'),
sig.make_sig('sum_u_c'),
sig.make_sig('sum_u_rc'),
sig.make_sig('global_shape', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, param, m, r, c, weight_decay, lr, beta3, sum_r, sum_u_r, sum_u_c, sum_u_rc, global_shape=None):
return super().__call__(param, m, r, c, weight_decay, lr, beta3, sum_r, sum_u_r, sum_u_c, sum_u_rc, global_shape)
apply_came_part4_op=ApplyCamePart4()
class Addbmm(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('batch1'),
sig.make_sig('batch2'),
sig.make_sig('beta', default=1),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, batch1, batch2, beta=1, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_addbmm(self, [input, batch1, batch2, beta, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, batch1, batch2, beta, alpha)
return res
addbmm_op=Addbmm()
class GreaterEqualScalar(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_greater_equal_scalar(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
greater_equal_scalar_op=GreaterEqualScalar()
class ArgSort(Primitive):
r"""
.. code-block::
prim = ops.ArgSort()
out = prim(input, dim, descending)
is equivalent to
.. code-block::
ops.argsort_ext(input, dim, descending)
Refer to :func:`mindspore.ops.argsort_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=-1),
sig.make_sig('descending', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=-1, descending=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_argsort(self, [input, dim, descending]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, descending)
return res
argsort_op=ArgSort()
class FmodTensor(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_fmod_tensor(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
fmod_tensor_op=FmodTensor()
class BitwiseAndTensor(Primitive):
r"""
Returns bitwise `and` of two tensors element-wise.
Inputs:
- **input** (Tensor) - The input tensor must be of integral or Boolean types.
- **other** (Tensor) - The second input tensor with same type as the `input`.
Outputs:
Tensor, has the same type as the `input`.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bitwise_and_tensor(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
bitwise_and_tensor_op=BitwiseAndTensor()
class AddLayerNormV2(Primitive):
r"""
.. code-block::
prim = ops.AddLayerNormV2()
out = prim(x1, x2, gamma, beta, epsilon, additionalOut)
is equivalent to
.. code-block::
ops.add_layernorm_v2(x1, x2, gamma, beta, epsilon, additionalOut)
Refer to :func:`mindspore.ops.add_layernorm_v2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x1'),
sig.make_sig('x2'),
sig.make_sig('gamma'),
sig.make_sig('beta'),
sig.make_sig('epsilon', default=1e-5),
sig.make_sig('additionalOut', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x1, x2, gamma, beta, epsilon=1e-5, additionalOut=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_add_layernorm_v2(self, [x1, x2, gamma, beta, epsilon, additionalOut]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x1, x2, gamma, beta, epsilon, additionalOut)
return res
add_layernorm_v2_op=AddLayerNormV2()
[文档]class Cdist(Primitive):
r"""
.. code-block::
prim = ops.Cdist(p)
out = prim(x1, x2)
is equivalent to
.. code-block::
ops.cdist(x1, x2, p)
Refer to :func:`mindspore.ops.cdist` for more details.
"""
@prim_arg_register
def __init__(self, p=2.0):
self._set_prim_arg("p", p)
def __call__(self, x1, x2):
return super().__call__(x1, x2, self.p)
class L1LossExt(Primitive):
r"""
.. code-block::
prim = ops.L1LossExt()
out = prim(input, target, reduction)
is equivalent to
.. code-block::
ops.l1_loss_ext(input, target, reduction)
Refer to :func:`mindspore.ops.l1_loss_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('reduction', default='mean'),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, target, reduction='mean'):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_l1_loss_ext(self, [input, target, str_to_enum('L1LossExt', 'reduction', reduction)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, target, str_to_enum('L1LossExt', 'reduction', reduction))
return res
l1_loss_ext_op=L1LossExt()
class MinimumGrad(Primitive):
r"""
Grad for minimum.
"""
@prim_arg_register
def __init__(self, grad_x=True, grad_y=True):
self._set_prim_arg("grad_x", grad_x)
self._set_prim_arg("grad_y", grad_y)
def __call__(self, x1, x2, grads):
return super().__call__(x1, x2, grads, self.grad_x, self.grad_y)
class DistCommAllGather(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, tensor_list, input, rank_size, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_all_gather(self, [tensor_list, input, rank_size, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, tensor_list, input, rank_size, group)
return res
dist_comm_all_gather_op=DistCommAllGather()
class AdaptiveAvgPool2DGradExt(Primitive):
r"""
.. code-block::
prim = ops.AdaptiveAvgPool2DGradExt()
out = prim(grad_output, x)
is equivalent to
.. code-block::
ops.adaptive_avg_pool2d_grad_ext(grad_output, x)
Refer to :func:`mindspore.ops.adaptive_avg_pool2d_grad_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_adaptive_avg_pool2d_grad_ext(self, [grad_output, x]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, x)
return res
adaptive_avg_pool2d_grad_ext_op=AdaptiveAvgPool2DGradExt()
class SeluGrad(Primitive):
r"""
.. code-block::
prim = ops.SeluGrad()
out = prim(gradient, result)
is equivalent to
.. code-block::
ops.selu_grad(gradient, result)
Refer to :func:`mindspore.ops.selu_grad` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, gradient, result):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_selu_grad(self, [gradient, result]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, gradient, result)
return res
selu_grad_op=SeluGrad()
[文档]class Cosh(Primitive):
r"""
.. code-block::
prim = ops.Cosh()
out = prim(input)
is equivalent to
.. code-block::
ops.cosh(input)
Refer to :func:`mindspore.ops.cosh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_cosh(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
cosh_op=Cosh()
class UniformExt(Primitive):
r"""
Generates random numbers according to the Uniform random number distribution.
Inputs:
- **tensor** (Tensor) - The tensor of random tensor to be generated.
- **a** (number) - Lower bound of the random numbers. Default: 0.0.
- **b** (number) - Upper bound of the random numbers. Default: 0.0.
- **seed** (int) - Seed for random number generation. Default: 0.
- **offset** (int) - Positional offset in the tensor to start filling with random numbers. Default: 0.
Raises:
TypeError: If `a` or `b` is not a float.
TypeError: If `tensor` is not a Tensor.
ValueError: If `a` is larger than `b`.
Outputs:
- **output** (Tensor) - With the same type and shape as the 'tensor'.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations.random_ops import UniformExt
>>> x = Tensor(np.random.randn(3,4), mstype.float64)
>>> uniform = UniformExt()
>>> y = uniform(x, a=1.0, b=2.0, seed=10, offset=5)
>>> print(y.shape)
(3, 4)
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, tensor, a, b, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_uniform_ext(self, [tensor, a, b, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, tensor, a, b, seed, offset)
return res
uniform_ext_op=UniformExt()
class InplaceExp(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_exp(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
inplace_exp_op=InplaceExp()
class TanhGrad(Primitive):
r"""
Computes TanhGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y, dy):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_tanh_grad(self, [y, dy]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, y, dy)
return res
tanh_grad_op=TanhGrad()
class MedianExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_median_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
median_ext_op=MedianExt()
class PReLUGrad(Primitive):
r"""
Gradients of PReLU operation.
Note:
1-dimensional input_x is not supported.
Inputs:
- **dy** (Tensor) - Representing the backprop of the next layer.
- **x** (Tensor) - Must be the input `x` of forward operator PRelu.
- **weight** (Tensor) - Float Tensor, w > 0, must be the input `weight` of forward operator PRelu.
Outputs:
- **dx** (Tensor), with the same type as `x`.
- **dw** (Tensor), with the same type as `weight`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x, weight):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_prelu_grad(self, [dy, x, weight]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, x, weight)
return res
prelu_grad_op=PReLUGrad()
[文档]class ReduceMean(Primitive):
r"""
Reduces a dimension of a tensor by averaging all elements in the dimension, by default. And also can reduce
a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by
controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Only constant value is allowed. Must be in the range [-r, r).
Outputs:
Tensor, has the same dtype as the `x`.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the mean of all elements in the input tensor.
- If `axis` is int, set as 1, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
- If `axis` is tuple(int) or list(int), set as (1, 2), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [1, 2], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
ValueError: If `axis` is out of range.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = ops.ReduceMean(keep_dims=True)
>>> output = op(x, 1)
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by averaging all elements in the dimension.
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
... mindspore.float32)
>>> output = op(x)
>>> print(output)
[[[5.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along the axis 0
>>> output = op(x, 0)
>>> print(output)
[[[4. 4. 4. 4. 4. 4.]
[5. 5. 5. 5. 5. 5.]
[6. 6. 6. 6. 6. 6.]]]
>>> # case 3: Reduces a dimension along the axis 1
>>> output = op(x, 1)
>>> print(output)
[[[2. 2. 2. 2. 2. 2.]]
[[5. 5. 5. 5. 5. 5.]]
[[8. 8. 8. 8. 8. 8.]]]
>>> # case 4: Reduces a dimension along the axis 2
>>> output = op(x, 2)
>>> print(output)
[[[ 2.]
[ 2.]
[ 2.]]
[[ 4.]
[ 5.]
[ 6.]]
[[ 6.]
[ 8.]
[10.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
return super().__call__(x, axis, self.keep_dims)
class DistCommScatter(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, other, scatter_list, rank_size, src, rank_id, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_scatter(self, [other, scatter_list, rank_size, src, rank_id, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, other, scatter_list, rank_size, src, rank_id, group)
return res
dist_comm_scatter_op=DistCommScatter()
class RandExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('shape'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, shape, seed, offset, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_rand_ext(self, [shape, seed, offset, dtype if dtype is None else dtype_to_type_id('RandExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, shape, seed, offset, dtype if dtype is None else dtype_to_type_id('RandExt', 'dtype', dtype))
return res
rand_ext_op=RandExt()
class InplaceScatterAdd(Primitive):
r"""
.. code-block::
prim = ops.InplaceScatterAdd()
out = prim(input, dim, index, src)
is equivalent to
.. code-block::
ops.inplace_scatter_add(input, dim, index, src)
Refer to :func:`mindspore.ops.inplace_scatter_add` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('dim'),
sig.make_sig('index'),
sig.make_sig('src'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, dim, index, src):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_scatter_add(self, [input, dim, index, src]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, src)
return res
inplace_scatter_add_op=InplaceScatterAdd()
class BatchNormGrad(Primitive):
r"""
Performs grad of BatchNorm operation.
"""
@prim_arg_register
def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'):
self._set_prim_arg("is_training", is_training)
self._set_prim_arg("epsilon", epsilon)
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, dout, x, scale, saved_mean, saved_variance, reserve):
return super().__call__(dout, x, scale, saved_mean, saved_variance, reserve, self.is_training, self.epsilon, self.data_format)
[文档]class AvgPool(Primitive):
r"""
Average pooling operation.
Refer to :func:`mindspore.ops.avg_pool2d` for more details.
Args:
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value,
is an int number that represents height and width of the kernel, or a tuple
of two int numbers that represent height and width respectively. Default: ``1`` .
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: ``1`` .
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
``"SAME"`` or ``"VALID"`` . Default: ``"VALID"`` .
- ``"SAME"``: Pad the input around its edges so that the shape of input and output
are the same when `stride` is set to ``1``.
The amount of padding to is calculated by the operator internally, If the amount is even, it is
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
possible height and width. Extra pixels that could not complete a full stride will
be discarded.
data_format (str, optional): The format of input and output data. It should be ``'NHWC'`` or ``'NCHW'`` .
Default: ``'NCHW'`` .
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Supported dtypes: float16, float32, float64.
Outputs:
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Raises:
TypeError: If `kernel_size` or `strides` is neither int nor tuple.
TypeError: If dtype of `x` is not float16, float32 or float64.
ValueError: If `kernel_size` or `strides` is less than 1.
ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive.
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
ValueError: If length of shape of `x` is not equal to 4.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops, nn
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.avgpool_op = ops.AvgPool(pad_mode='VALID', kernel_size=2, strides=1)
...
... def construct(self, x):
... result = self.avgpool_op(x)
... return result
...
>>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
>>> net = Net()
>>> output = net(x)
>>> print(output)
[[[[ 2.5 3.5 4.5]
[ 6.5 7.5 8.5]]
[[14.5 15.5 16.5]
[18.5 19.5 20.5]]
[[26.5 27.5 28.5]
[30.5 31.5 32.5]]]]
"""
@prim_arg_register
def __init__(self, kernel_size=1, strides=1, pad_mode='VALID', data_format='NCHW'):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pad_mode", pad_mode, str_to_enum)
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, x):
return super().__call__(x, self.kernel_size, self.strides, self.pad_mode, self.data_format)
class EluGradExt(Primitive):
r"""
Gradients of EluExt operation.
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('x_or_out'),
sig.make_sig('alpha', default=1.0),
sig.make_sig('is_result', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, x_or_out, alpha=1.0, is_result=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_elu_grad_ext(self, [dout, x_or_out, alpha, is_result]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, x_or_out, alpha, is_result)
return res
elu_grad_ext_op=EluGradExt()
class RandpermExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('n'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('dtype', default=mstype.int64),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, n, seed, offset, dtype=mstype.int64):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_randperm_ext(self, [n, seed, offset, dtype_to_type_id('RandpermExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, n, seed, offset, dtype_to_type_id('RandpermExt', 'dtype', dtype))
return res
randperm_ext_op=RandpermExt()
[文档]class HSwish(Primitive):
r"""
.. code-block::
prim = ops.HSwish()
out = prim(input)
is equivalent to
.. code-block::
ops.hardswish(input)
Refer to :func:`mindspore.ops.hardswish` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_hswish(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
hswish_op=HSwish()
[文档]class AssignSub(Primitive):
r"""
.. code-block::
prim = ops.AssignSub()
out = prim(variable, value)
is equivalent to
.. code-block::
ops.assign_sub(variable, value)
Refer to :func:`mindspore.ops.assign_sub` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('value', dtype=sig.sig_dtype.T),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, variable, value):
return super().__call__(variable, value)
assign_sub_op=AssignSub()
class TopKRouter(Primitive):
r"""
.. code-block::
prim = ops.TopKRouter()
out = prim(input, capacity, expert_num, drop_type)
is equivalent to
.. code-block::
ops.topkrouter(input, capacity, expert_num, drop_type)
Refer to :func:`mindspore.ops.topkrouter` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('capacity'),
sig.make_sig('expert_num'),
sig.make_sig('drop_type', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, capacity, expert_num, drop_type=0):
return super().__call__(input, capacity, expert_num, drop_type)
topkrouter_op=TopKRouter()
class DCTN(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('type', default=2),
sig.make_sig('s', default=None),
sig.make_sig('axes', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, type=2, s=None, axes=None, norm=None):
return super().__call__(x, type, s, axes, norm if norm is None else str_to_enum('DCTN', 'norm', norm))
dctn_op=DCTN()
class InplaceErfinv(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_erfinv(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
inplace_erfinv_op=InplaceErfinv()
class HFFTN(Primitive):
r"""
.. code-block::
prim = ops.HFFTN()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.hfftn(input, s, dim, norm)
Refer to :func:`mindspore.ops.hfftn` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=None, norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('HFFTN', 'norm', norm))
hfftn_op=HFFTN()
class ApplyAdamW(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T2),
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T3),
sig.make_sig('lr', dtype=sig.sig_dtype.T4),
sig.make_sig('weight_decay', dtype=sig.sig_dtype.T4),
sig.make_sig('beta1', dtype=sig.sig_dtype.T4),
sig.make_sig('beta2', dtype=sig.sig_dtype.T4),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T4),
sig.make_sig('grad', dtype=sig.sig_dtype.T),
sig.make_sig('max_grad_norm', dtype=sig.sig_dtype.T5, default=None),
sig.make_sig('amsgrad', dtype=sig.sig_dtype.T6, default=False),
sig.make_sig('maximize', dtype=sig.sig_dtype.T7, default=False),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, var, m, v, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, max_grad_norm=None, amsgrad=False, maximize=False):
return super().__call__(var, m, v, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, max_grad_norm, amsgrad, maximize)
apply_adamw_op=ApplyAdamW()
class BincountExt(Primitive):
r"""
.. code-block::
prim = ops.BincountExt()
out = prim(input, weights, minlength)
is equivalent to
.. code-block::
ops.bincount_ext(input, weights, minlength)
Refer to :func:`mindspore.ops.bincount_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weights', default=None),
sig.make_sig('minlength', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weights=None, minlength=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bincount_ext(self, [input, weights, minlength]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weights, minlength)
return res
bincount_ext_op=BincountExt()
class RFFT(Primitive):
r"""
.. code-block::
prim = ops.RFFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.rfft(input, n, dim, norm)
Refer to :func:`mindspore.ops.rfft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('RFFT', 'norm', norm))
rfft_op=RFFT()
[文档]class Transpose(Primitive):
r"""
.. code-block::
prim = ops.Transpose()
out = prim(input, input_perm)
is equivalent to
.. code-block::
ops.transpose(input, input_perm)
Refer to :func:`mindspore.ops.transpose` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, input_perm):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_transpose(self, [input, input_perm]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, input_perm)
return res
transpose_op=Transpose()
[文档]class Atanh(Primitive):
r"""
.. code-block::
prim = ops.Atanh()
out = prim(input)
is equivalent to
.. code-block::
ops.atanh(input)
Refer to :func:`mindspore.ops.atanh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_atanh(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
atanh_op=Atanh()
class LayerNormGradGrad(Primitive):
r"""
Gets the gradient of LayerNormGrad operation.
Inputs:
- **x** (Tensor) - The input tensor to be normalized, float32 or float16.
- **dy** (Tensor) - The gradient of LayerNorm's output y, float32 or float16.
- **variance** (Tensor) - The variance of x, float32 or float16.
- **mean** (Tensor) - The mean of x, float32 or float16.
- **gamma** (Tensor) - The original value of weight gamma initialized in LayerNorm, float32 or float16.
Default: 'ones'.
- **d_dx** (Tensor) - The gradient of dx, where dx is the gradient of LayerNorm's input x, float32 or float16.
- **d_dg** (Tensor) - The gradient of dg, where dg is the gradient of LayerNorm's weight gamma,
float32 or float16.
- **d_db** (Tensor) - The gradient of db, where db is the gradient of LayerNorm's weight beta,
float32 or float16.
- **begin_norm_axis** (int) - The begin axis for the input to apply layernorm. Default: 1.
- **begin_params_axis** (int) - The begin axis for the parameter input to apply layernorm. Default: 1.
Outputs:
Tuple[Tensor], tuple of 3 Tensors (the gradients of layernormgrad x, dy, gamma).
Raises:
TypeError: If the 8 inputs don't have the same dtype.
ValueError: If x, dy, d_dx don't have the same shape.
ValueError: If variance, mean don't have the same shape.
ValueError: If gamma, d_dg, d_db don't have the same shape.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
def __call__(self, x, dy, variance, mean, gamma, d_dx, d_dg, d_db):
return super().__call__(x, dy, variance, mean, gamma, d_dx, d_dg, d_db, self.begin_norm_axis, self.begin_params_axis)
class ReflectionPad3DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reflection_pad_3d_grad(self, [grad_output, input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, padding)
return res
reflection_pad_3d_grad_op=ReflectionPad3DGrad()
class IndexAddExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('index', dtype=sig.sig_dtype.T1),
sig.make_sig('source', dtype=sig.sig_dtype.T),
sig.make_sig('axis', dtype=sig.sig_dtype.T2),
sig.make_sig('alpha', dtype=sig.sig_dtype.T3, default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, index, source, axis, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_index_add_ext(self, [input, index, source, axis, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, index, source, axis, alpha)
return res
index_add_ext_op=IndexAddExt()
[文档]class Less(Primitive):
r"""
.. code-block::
prim = ops.Less()
out = prim(input, other)
is equivalent to
.. code-block::
ops.less(input, other)
Refer to :func:`mindspore.ops.less` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_less(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
less_op=Less()
[文档]class IsFinite(Primitive):
r"""
.. code-block::
prim = ops.IsFinite()
out = prim(input)
is equivalent to
.. code-block::
ops.isfinite(input)
Refer to :func:`mindspore.ops.isfinite` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_isfinite(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
isfinite_op=IsFinite()
class IndexSelect(Primitive):
r"""
.. code-block::
prim = ops.IndexSelect()
out = prim(input, dim, index)
is equivalent to
.. code-block::
ops.index_select_ext(input, dim, index)
Refer to :func:`mindspore.ops.index_select_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, index):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_index_select(self, [input, dim, index]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index)
return res
index_select_op=IndexSelect()
class ACosGrad(Primitive):
r"""
Computes ACosGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dout):
return super().__call__(x, dout)
a_cos_grad_op=ACosGrad()
class LogAddExp2(Primitive):
r"""
.. code-block::
prim = ops.LogAddExp2()
out = prim(input, other)
is equivalent to
.. code-block::
ops.logaddexp2(input, other)
Refer to :func:`mindspore.ops.logaddexp2` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_logaddexp2(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
logaddexp2_op=LogAddExp2()
class MaxUnpool2DExt(Primitive):
r"""
.. code-block::
prim = ops.MaxUnpool2DExt()
out = prim(input, indices, kernel_size, stride, padding, output_size)
is equivalent to
.. code-block::
ops.max_unpool2d_ext(input, indices, kernel_size, stride, padding, output_size)
Refer to :func:`mindspore.ops.max_unpool2d_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('indices'),
sig.make_sig('kernel_size'),
sig.make_sig('stride', default=None),
sig.make_sig('padding', default=0),
sig.make_sig('output_size', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, indices, kernel_size, stride=None, padding=0, output_size=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_max_unpool2d_ext(self, [input, indices, kernel_size, stride, padding, output_size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, indices, kernel_size, stride, padding, output_size)
return res
max_unpool2d_ext_op=MaxUnpool2DExt()
class GroupNormGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('x'),
sig.make_sig('mean'),
sig.make_sig('rstd'),
sig.make_sig('gamma_opt'),
sig.make_sig('num_groups'),
sig.make_sig('dx_is_require', default=True),
sig.make_sig('dgamma_is_require', default=True),
sig.make_sig('dbeta_is_require', default=True),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, x, mean, rstd, gamma_opt, num_groups, dx_is_require=True, dgamma_is_require=True, dbeta_is_require=True):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_group_norm_grad(self, [dy, x, mean, rstd, gamma_opt, num_groups, dx_is_require, dgamma_is_require, dbeta_is_require]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, x, mean, rstd, gamma_opt, num_groups, dx_is_require, dgamma_is_require, dbeta_is_require)
return res
group_norm_grad_op=GroupNormGrad()
class CumminExt(Primitive):
r"""
.. code-block::
prim = ops.CumminExt()
out = prim(input, dim)
is equivalent to
.. code-block::
ops.cummin_ext(input, dim)
Refer to :func:`mindspore.ops.cummin_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_cummin_ext(self, [input, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim)
return res
cummin_ext_op=CumminExt()
class AsStrided(Primitive):
r"""
.. code-block::
prim = ops.AsStrided()
out = prim(input, size, stride, storage_offset)
is equivalent to
.. code-block::
ops.as_strided(input, size, stride, storage_offset)
Refer to :func:`mindspore.ops.as_strided` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('size'),
sig.make_sig('stride'),
sig.make_sig('storage_offset', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, size, stride, storage_offset=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_as_strided(self, [input, size, stride, storage_offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, size, stride, storage_offset)
return res
as_strided_op=AsStrided()
class FFT2(Primitive):
r"""
.. code-block::
prim = ops.FFT2()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.fft2(input, s, dim, norm)
Refer to :func:`mindspore.ops.fft2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=(-2, -1)),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=(-2, -1), norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('FFT2', 'norm', norm))
fft2_op=FFT2()
class IDCTN(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('type', default=2),
sig.make_sig('s', default=None),
sig.make_sig('axes', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, type=2, s=None, axes=None, norm=None):
return super().__call__(x, type, s, axes, norm if norm is None else str_to_enum('IDCTN', 'norm', norm))
idctn_op=IDCTN()
class ReflectionPad3D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reflection_pad_3d(self, [input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, padding)
return res
reflection_pad_3d_op=ReflectionPad3D()
class AsinExt(Primitive):
r"""
.. code-block::
prim = ops.AsinExt()
out = prim(input)
is equivalent to
.. code-block::
ops.asin_ext(input)
Refer to :func:`mindspore.ops.asin_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_asin_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
asin_ext_op=AsinExt()
class FFT(Primitive):
r"""
.. code-block::
prim = ops.FFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.fft(input, n, dim, norm)
Refer to :func:`mindspore.ops.fft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('FFT', 'norm', norm))
fft_op=FFT()
[文档]class MaskedSelect(Primitive):
r"""
.. code-block::
prim = ops.MaskedSelect()
out = prim(input, mask)
is equivalent to
.. code-block::
ops.masked_select(input, mask)
Refer to :func:`mindspore.ops.masked_select` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mask):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_masked_select(self, [input, mask]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mask)
return res
masked_select_op=MaskedSelect()
class DistCommAllToAllV(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, other, input, group, send_numel_list, recv_numel_list, rank_size):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_all_to_all_v(self, [other, input, group, send_numel_list, recv_numel_list, rank_size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, other, input, group, send_numel_list, recv_numel_list, rank_size)
return res
dist_comm_all_to_all_v_op=DistCommAllToAllV()
[文档]class BCEWithLogitsLoss(Primitive):
r"""
Adds sigmoid activation function to `input` as logits, and uses the given logits to compute binary cross entropy
between the logits and the target.
Sets input `input` as :math:`X`, input `target` as :math:`Y`, input weight as :math:`W`, output as :math:`L`.
Then,
.. math::
\begin{array}{ll} \\
p_{ij} = sigmoid(X_{ij}) = \frac{1}{1 + e^{-X_{ij}}} \\
L_{ij} = -[Y_{ij}log(p_{ij}) + (1 - Y_{ij})log(1 - p_{ij})]
\end{array}
:math:`i` indicates the :math:`i^{th}` sample, :math:`j` indicates the category. Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{'none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
\end{cases}
:math:`\ell` indicates the method of calculating the loss. There are three methods:
the first method is to provide the loss value directly,
the second method is to calculate the average value of all losses,
and the third method is to calculate the sum of all losses.
This operator will multiply the output by the corresponding weight.
The tensor `weight` assigns different weights to each piece of data in the batch,
and the tensor `pos_weight` adds corresponding weights to the positive examples of each category.
In addition, it can trade off recall and precision by adding weights to positive examples.
In the case of multi-label classification the loss can be described as:
.. math::
\begin{array}{ll} \\
p_{ij,c} = sigmoid(X_{ij,c}) = \frac{1}{1 + e^{-X_{ij,c}}} \\
L_{ij,c} = -[P_{c}Y_{ij,c} * log(p_{ij,c}) + (1 - Y_{ij,c})log(1 - p_{ij,c})]
\end{array}
where c is the class number (c>1 for multi-label binary classification, c=1 for single-label binary classification),
n is the number of the sample in the batch and :math:`P_c` is the weight of the positive answer for the class c.
:math:`P_c>1` increases the recall, :math:`P_c<1` increases the precision.
Args:
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
``'sum'`` . Default: ``'mean'`` .
- ``'none'``: no reduction will be applied.
- ``'mean'``: compute and return the weighted mean of elements in the output.
- ``'sum'``: the output elements will be summed.
Inputs:
- **input** (Tensor) - Input `input`. Tensor of shape :math:`(N, *)` where :math:`*` means, any number of
additional dimensions. Data type must be float16, float32 or bfloat16(only Atlas A2 series products are
supported).
- **target** (Tensor) - Ground truth label, has the same shape as `input`.
Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported).
- **weight** (Tensor) - A rescaling weight applied to the loss of each batch element. It can be
broadcast to a tensor with shape of `input`. Data type must be float16, float32 or bfloat16(only Atlas A2
series products are supported).
- **pos_weight** (Tensor) - A weight of positive examples. Must be a vector with length equal to the
number of classes. It can be broadcast to a tensor with shape of `input`.
Data type must be float16, float32 or bfloat16(only Atlas A2 series products are supported).
Outputs:
Tensor or Scalar, if `reduction` is ``'none'``, it's a tensor with the same shape and type as input `input`.
Otherwise, the output is a scalar.
Raises:
TypeError: If any input is not Tensor.
TypeError: If data type of any input is not float16, float32 or bfloat16.
TypeError: If data type of `reduction` is not string.
ValueError: If `weight` or `pos_weight` can not be broadcast to a tensor with shape of `input`.
ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[-0.8, 1.2, 0.7], [-0.1, -0.4, 0.7]]), mindspore.float32)
>>> target = Tensor(np.array([[0.3, 0.8, 1.2], [-0.6, 0.1, 2.2]]), mindspore.float32)
>>> weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32)
>>> pos_weight = Tensor(np.array([1.0, 1.0, 1.0]), mindspore.float32)
>>> loss = ops.BCEWithLogitsLoss()
>>> output = loss(input, target, weight, pos_weight)
>>> print(output)
0.3463612
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('weight', default=None),
sig.make_sig('posWeight', default=None),
)
@prim_arg_register
def __init__(self, reduction='mean'):
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
def __call__(self, input, target, weight=None, posWeight=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_binary_cross_entropy_with_logits(self, [input, target, weight, posWeight, self.reduction]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, target, weight, posWeight, self.reduction)
return res
class RotaryPositionEmbedding(Primitive):
r"""
.. code-block::
prim = ops.RotaryPositionEmbedding()
out = prim(x, cos, sin, mode)
is equivalent to
.. code-block::
ops.rotary_position_embedding(x, cos, sin, mode)
Refer to :func:`mindspore.ops.rotary_position_embedding` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('cos'),
sig.make_sig('sin'),
sig.make_sig('mode', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, cos, sin, mode=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_rotary_position_embedding(self, [x, cos, sin, mode]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, cos, sin, mode)
return res
rotary_position_embedding_op=RotaryPositionEmbedding()
class ReLU6Grad(Primitive):
r"""
Computes gradient for the ReLU6 activation.
Args:
y_backprop (Tensor): Input gradients tensor, has the same dtype and shape as `x`.
x (Tensor): Origin input tensor.
Returns:
Tensor, has the same dtype and shape as `x`.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y_backprop, x):
return super().__call__(y_backprop, x)
relu6_grad_op=ReLU6Grad()
[文档]class SearchSorted(Primitive):
r"""
Return the position indices such that after inserting the values into the `sorted_sequence`, the order of innermost
dimension of the `sorted_sequence` remains unchanged.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.searchsorted` for more details.
Args:
dtype (mindspore.dtype, optional): The specified type of output tensor. Optional values are: ``mstype.int32`` and
``mstype.int64``. Default value: ``mstype.int64``.
right (bool, optional): Search Strategy. If ``True`` , return the last suitable index found;
if ``False`` , return the first such index. Default: ``False`` .
Inputs:
- **sorted_sequence** (Tensor) - The input tensor. It must contain a monotonically increasing sequence on the innermost dimension.
- **values** (Tensor) - The value that should be inserted.
- **sorter** (Tensor, optional) - if provided, a tensor matching the shape of the unsorted sorted_sequence
containing a sequence of indices that sort it in the ascending order on the innermost dimension and type must be int64. Default: ``None`` . CPU and GPU can only use default values
Outputs:
Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
if insert the corresponding value in the `values` Tensor, the order of `sorted_sequence` would be preserved,
whose datatype is int32 if out_int32 is ``True`` , otherwise int64, and shape is the same as the shape of
`values`.
Raises:
ValueError: If the dimension of `sorted_sequence` isn't 1 and all dimensions except the last dimension of `sorted_sequence` and `values` are different.
ValueError: If `sorted_sequence` value is a scalar.
ValueError: If `values` is a scalar when `sorted_sequence` dimension is not 1.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> searchsorted = ops.SearchSorted()
>>> sorted_sequence = Tensor(np.array([[0, 1, 3, 5, 7], [2, 4, 6, 8, 10]]), mindspore.float32)
>>> values = Tensor(np.array([[3, 6, 9], [3, 6, 9]]), mindspore.float32)
>>> output = searchsorted(sorted_sequence, values)
>>> print(output)
[[2 4 5]
[1 2 4]]
"""
__mindspore_signature__ = (
sig.make_sig('sorted_sequence'),
sig.make_sig('values'),
sig.make_sig('sorter', default=None),
)
@prim_arg_register
def __init__(self, dtype=mstype.int64, right=False):
self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id)
self._set_prim_arg("right", right)
def __call__(self, sorted_sequence, values, sorter=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_searchsorted(self, [sorted_sequence, values, sorter, self.dtype, self.right]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, sorted_sequence, values, sorter, self.dtype, self.right)
return res
class AvgPool1D(Primitive):
r"""
.. code-block::
prim = ops.AvgPool1D()
out = prim(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
is equivalent to
.. code-block::
ops.avg_pool1d_ext(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
Refer to :func:`mindspore.ops.avg_pool1d_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('kernel_size'),
sig.make_sig('stride', default=None),
sig.make_sig('padding', default=0),
sig.make_sig('ceil_mode', default=False),
sig.make_sig('count_include_pad', default=True),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_avg_pool1d(self, [input, kernel_size, stride, padding, ceil_mode, count_include_pad]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, kernel_size, stride, padding, ceil_mode, count_include_pad)
return res
avg_pool1d_op=AvgPool1D()
class InplaceDivs(Primitive):
r"""
.. code-block::
prim = ops.InplaceDivs()
out = prim(input, other)
is equivalent to
.. code-block::
ops.div_scalar_(input, other)
Refer to :func:`mindspore.ops.div_scalar_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_divs(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
inplace_divs_op=InplaceDivs()
class InplaceStopGradient(Primitive):
r"""
.. code-block::
prim = ops.InplaceStopGradient()
out = prim(input)
is equivalent to
.. code-block::
ops.inplace_stop_gradient(input)
Refer to :func:`mindspore.ops.inplace_stop_gradient` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_stop_gradient(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
inplace_stop_gradient_op=InplaceStopGradient()
class GluGrad(Primitive):
r"""
Computes grad for Glu operation.
"""
@prim_arg_register
def __init__(self, axis):
self._set_prim_arg("axis", axis)
def __call__(self, grads, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_glu_grad(self, [grads, x, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grads, x, self.axis)
return res
class AShardIdentity(Primitive):
r"""
.. code-block::
prim = ops.AShardIdentity()
out = prim(input)
is equivalent to
.. code-block::
ops.shard_identity(input)
Refer to :func:`mindspore.ops.shard_identity` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
shard_identity_op=AShardIdentity()
class BernoulliExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bernoulli_ext(self, [input, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, seed, offset)
return res
bernoulli_ext_op=BernoulliExt()
class InplaceScatterValueReduce(Primitive):
r"""
InplaceScatterValueReduce is for scatter_ when using a scalar as the source element with reduce.
For details, please refer to :func:`mindspore.Tensor.scatter_`.
Examples:
>>> from mindspore import Tensor, int64, float32
>>> this_tensor = Tensor([[1, 2], [3, 4]], dtype=float32)
>>> index = Tensor([[0], [1]], dtype=int64)
>>> this_tensor.scatter_(0, index, 3, reduce="multiply")
>>> print(this_tensor)
[[3., 2.],
[9., 4.]]
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('dim'),
sig.make_sig('index'),
sig.make_sig('value'),
sig.make_sig('reduce'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, dim, index, value, reduce):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_scatter_value_reduce(self, [input, dim, index, value, str_to_enum('InplaceScatterValueReduce', 'reduce', reduce)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, value, str_to_enum('InplaceScatterValueReduce', 'reduce', reduce))
return res
inplace_scatter_value_reduce_op=InplaceScatterValueReduce()
class DistCommReduceScatter(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, other, input_list, rank_size, op_type, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_reduce_scatter(self, [other, input_list, rank_size, op_type, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, other, input_list, rank_size, op_type, group)
return res
dist_comm_reduce_scatter_op=DistCommReduceScatter()
class MedianDim(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=-1),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=-1, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_median_dim(self, [input, dim, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim)
return res
median_dim_op=MedianDim()
class FlashAttentionScore(Primitive):
r"""
FlashAttentionScore.
.. math::
\begin{array}{ll} \\
y = Dropout(Softmax(Mask(scale_value \mul (real_shift + query * key), attn_mask), -1), keep_prob) \\
\mul value \\
\end{array}
B -- Batch size. Value range 1 to 2k.
S1 -- Sequence length of query. Value range 1 to 512k.
S2 -- Sequence length of key and value. Value range 1 to 512k.
N1 -- Num heads of query. Value range 1 to 256.
N2 -- Num heads of key and value, and N2 must be a factor of N1.
D -- Head size. The value ranges is a multiple of 16, with the max value of 512.
H1 -- Hidden size of query, which equals to N1 * D.
H2 -- Hidden size of key and value, which equals to N2 * D.
.. warning::
This is an experimental API that is subject to change or deletion. Only support on Atlas A2 training series.
Args:
head_num (int): The head num of query, equal to N1.
keep_prob (float): The keep probability of dropout. Value range is (0.0, 1.0]. Default: 1.0. When keep_prob
is 1.0, drop_mask should be none.
scale_value (float): The scale factor of score. Generally, the value is 1.0 / (D ** 0.5). Default: 1.0.
pre_tokens (int): Parameter for sparse computation, represents how many tokens are counted forward.
When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647.
next_tokens (int): Parameter for sparse computation, represents how many tokens are counted backward.
When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647.
The value of pre_tokens corresponds to S1, and the value of next_tokens corresponds to S2. They define the
valid area on the attn_mask matrix. It must ensure that the band is not empty.
The following values are not allowed:
- pre_tokens < 0 and next_tokens < 0.
- (pre_tokens < 0 and next_tokens >= 0) and (next_tokens < abs(pre_tokens) or abs(pre_tokens) >= S2).
- (pre_tokens >= 0 and next_tokens < 0) and (abs(next_tokens) > pre_tokens or abs(next_tokens) >= S1).
inner_precise (int): The parameter is reserved and not implemented yet. Default: 0.
input_layout (str): Specifies the layout of input `query`, key and value. The value can be "BSH", "BNSD", "SBH",
"BSND", "TH" or "TND". "TH" and "TND" are experimental formats. Default: "BSH".
When input_layout is "TND", the following restrictions must be met.
There are two lists that represent the length of the input sequence: list_seq_q and list_seq_k. Each
value in the list indicates the length of the sequence in the batch. For example, list_seq_q = [4, 2, 6],
list_seq_k = [10, 3, 9]. The element of list indicate S. T1 is sum(list_seq_q) = 12, T2 is
sum(list_seq_k) = 22.
max_seqlen_q = max(list_seq_q), max_seqlen_k = max(list_seq_k).
qk_pointer = sum(list_seq_q * list_seq_k), which is the sum of the element multiplication.
- The lengths of two lists are the same, and size of list is batch. batch is less than or equal to 1024.
- When input_layout is "TND", actual_seq_qlen and actual_seq_kvlen must be not none.
Otherwise, they are none.
- The actual_seq_qlen and actual_seq_kvlen are the cumulative sum of sequence of key/value, so they must
be non-decreasing.
- If real_shift is not none, list_seq_q and list_seq_k must be same. The maximum value of list_seq_q and
list_seq_k is greater than 1024. Real_shift should be `(B, N1, 1024, S2)` and `(1, N1, 1024, S2)`, and
S2 is equal to max_seqlen_k.
- Attn mask must be a lower trianglar matrix, so sparse_mode should be 2 or 3. The shape of attn_mask
should be `(2048, 2048)`.
- The shape of drop_mask is (qk_pointer * N1 // 8,).
- Prefix is none.
- Next_tokens is 0, and pre_tokens is not less than max_seqlen_q.
- When sparse_mode is 3, S1 of each batch should be less than or equal to S2.
- 0 should not exist in list_seq_k.
sparse_mode (int): Indicates sparse mode. Default 0.
- 0: Indicates the defaultMask mode. If attn_mask is not passed, the mask operation is not performed,
and preTokens and nextTokens(internally assigned as INT_MAX) are ignored. If passed in, the full attn_mask
matrix (S1 * S2) needs to be passed in, indicating that the part between preTokens and nextTokens needs to
be calculated.
- 1: Represents allMask, that is, passing in the complete attn_mask matrix.
- 2: Representing the leftUpCausal mode corresponds to the lower triangle scenario divided by the left
vertex, and the optimized attn_mask matrix (2048*2048) is required.
- 3: Representing the rightDownCausal model corresponds to the lower triangle scene divided by the lower
right vertex, and the optimized attn_mask matrix (2048*2048) is required.
- 4: Represents the band scenario, that is, the part between counting preTokens and nextTokens, and the
optimized attn_mask matrix (2048*2048) is required..
- 5: Represents the prefix scenario, that is, on the basis of rightDownCasual, a matrix with length S1 and
width N is added to the left side. The value of N is obtained by the new input prefix, and the N value of
each Batch axis is different, not implemented yet.
- 6: Represents the global scenario, not implemented yet.
- 7: Represents the dilated scenario, not implemented yet.
- 8: Represents the block_local scenario, not implemented yet.
Inputs:
- **query** (Tensor[float16, bfloat16]) - The query tensor.
Input tensor of shape :math:`(B, S1, H1)`, `(B, N1, S1, D)`, `(S1, B, H1)`, `(B, S1, N1, D)` or `(T1, N1, D)`.
- **key** (Tensor[float16, bfloat16]) - The key tensor.
Input tensor of shape :math:`(B, S2, H2)`, `(B, N2, S2, D)`, `(S2, B, H2)`, `(B, S2, N2, D)` or `(T2, N2, D)`.
- **value** (Tensor[float16, bfloat16]) - The value tensor.
Input tensor of shape :math:`(B, S2, H2)`, `(B, N2, S2, D)`, `(S2, B, H2)`, `(B, S2, N2, D)` or `(T2, N2, D)`.
The key and value have the same shape.
- **real_shift** (Union[Tensor[float16, bfloat16], None]) - Also known as pse. The position embedding code. If S
is greater than 1024 and the mask of the lower triangle is used, enter only the inverse 1024 lines of
the lower triangle for memory optimization. Input tensor of shape :math:`(B, N1, S1, S2)`,
`(1, N1, S1, S2)`, `(B, N1, 1024, S2)`, `(1, N1, 1024, S2)`.
- ALiBi scenario: real_shift must meet the ALiBi rule, and sparse_mode is 2 or 3 for the lower triangle.
In this scenario, real_shift is `(B, N1, 1024, S2)`, `(1, N1, 1024, S2)`.
- Non-ALiBi scenario: real_shift is `(B, N1, S1, S2)`, `(1, N1, S1, S2)`.
The shape of `real_shift` should be `(B, N1, 1024, S2)` and `(1, N1, 1024, S2)` when input_layout is `TND`.
- **drop_mask** (Union[Tensor[uint8], None]) - The dropout mask tensor.
Input tensor of shape :math:`(B, N1, S1, S2 // 8) or None`. S2 is a multiple of 8 when not None.
- **padding_mask** (None) - Reserved parameter. Not implemented yet.
- **attn_mask** (Union[Tensor[uint8], Tensor[bool], None]) - The attention mask tensor. For each element, 0
indicates retention and 1 indicates discard. Input tensor of shape :math:`(B, N1, S1, S2)`, `(B, 1, S1, S2)`,
`(S1, S2)` or (2048, 2048). In compression scenario, sparse_mode is 2, 3, or 4, attn_mask must be
`(2048, 2048)`. When sparse_mode is 5, attn_mask must be `(B, N1, S1, S2)`, `(B, 1, S1, S2)`. When sparse_mode
is 0 and 1, attn_mask should be `(B, N1, S1, S2)`, `(B, 1, S1, S2)`, `(S1, S2)`.
- **prefix** (Union[List[int64], Tuple[int64], None]) - N value of each Batch in the prefix sparse calculation
scenario. Input tensor of shape :math:`(B,)`. B max value 32. Not none only when sparse_mode is 5.
If S1 > S2, N ranges from 0 to S2. If S1 <= S2, N ranges from S2 - S1 to S2.
- **actual_seq_qlen** (Union[List[int64], Tuple[int64], None]) - Size of query corresponding to each batch, array
with increasing values and the last value equal to T1.
- **actual_seq_kvlen** (Union[List[int64], Tuple[int64], None]) - Size of key and value corresponding to each
batch, array with increasing values and the last value equal to T2.
Outputs:
- **softmax_max** (Tensor[float32]) - (B, N1, S1, 8) when input_layout is not `TND` else (T1, N1, D)
- **softmax_sum** (Tensor[float32]) - (B, N1, S1, 8) when input_layout is not `TND` else (T1, N1, D)
- **softmax_out** (Tensor[float16, bfloat16]) - Useless output, ignore it. Output tensor of shape : `()`
- **attention_out** (Tensor[float16, bfloat16]) - The output of attention, its shape, and data type
are the same as the query.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key'),
sig.make_sig('value'),
sig.make_sig('real_shift', default=None),
sig.make_sig('drop_mask', default=None),
sig.make_sig('padding_mask', default=None),
sig.make_sig('attn_mask', default=None),
sig.make_sig('prefix', default=None),
sig.make_sig('actual_seq_qlen', default=None),
sig.make_sig('actual_seq_kvlen', default=None),
)
@prim_arg_register
def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=2147483647, next_tokens=2147483647, inner_precise=0, input_layout='BSH', sparse_mode=0):
self._set_prim_arg("head_num", head_num)
self._set_prim_arg("keep_prob", keep_prob)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("pre_tokens", pre_tokens)
self._set_prim_arg("next_tokens", next_tokens)
self._set_prim_arg("inner_precise", inner_precise)
self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
self._set_prim_arg("sparse_mode", sparse_mode)
def __call__(self, query, key, value, real_shift=None, drop_mask=None, padding_mask=None, attn_mask=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_flash_attention_score(self, [query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode)
return res
class AddRmsNorm(Primitive):
r"""
.. code-block::
prim = ops.AddRmsNorm()
out = prim(x1, x2, gamma, epsilon)
is equivalent to
.. code-block::
ops.add_rms_norm(x1, x2, gamma, epsilon)
Refer to :func:`mindspore.ops.add_rms_norm` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x1'),
sig.make_sig('x2'),
sig.make_sig('gamma'),
sig.make_sig('epsilon', default=1e-6),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x1, x2, gamma, epsilon=1e-6):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_add_rms_norm(self, [x1, x2, gamma, epsilon]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x1, x2, gamma, epsilon)
return res
add_rms_norm_op=AddRmsNorm()
[文档]class Sinc(Primitive):
r"""
.. code-block::
prim = ops.Sinc()
out = prim(input)
is equivalent to
.. code-block::
ops.sinc(input)
Refer to :func:`mindspore.ops.sinc` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sinc(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
sinc_op=Sinc()
class HardtanhGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('input'),
sig.make_sig('min_val', default=-1),
sig.make_sig('max_val', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, input, min_val=-1, max_val=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_hardtanh_grad(self, [dout, input, min_val, max_val]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, input, min_val, max_val)
return res
hardtanh_grad_op=HardtanhGrad()
class MaxPoolGradWithMask(Primitive):
r"""
Gradients of the MaxPoolWithMask operation.
"""
@prim_arg_register
def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pads", pads, to_output_padding)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("ceil_mode", ceil_mode)
self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
def __call__(self, x, grad, mask):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_max_pool_grad_with_mask(self, [x, grad, mask, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, grad, mask, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type)
return res
class LayerNormGrad(Primitive):
r"""
Applies the layer Normalization to the input array.
This operator will calculate the input gradients of layernorm.
Inputs:
x (Tensor): The inputs of layer norm op.
dy (Tensor): The gradient of outputs of layer norm op.
variance (Tensor): The variance of x.
mean (Tensor): The mean of x.
gamma (Tensor): The weights of normalized elements.
begin_norm_axis (int): The begin axis for the input to apply layernorm. Default: 1.
begin_params_axis (int): The begin axis for the parameter input to apply layernorm. Default: 1.
Outputs:
tuple[int], tuple of 3 values (the gradients of layernorm input, gamma, beta).
pd_x (Tensor): the gradients of layernorm input x.
pd_gamma (Tensor): the gradients of gamma.
pd_beta (Tensor): the gradients of beta.
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
def __call__(self, x, dy, variance, mean, gamma):
return super().__call__(x, dy, variance, mean, gamma, self.begin_norm_axis, self.begin_params_axis)
class Narrow(Primitive):
r"""
.. code-block::
prim = ops.Narrow()
out = prim(input, dim, start, length)
is equivalent to
.. code-block::
ops.narrow(input, dim, start, length)
Refer to :func:`mindspore.ops.narrow` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, start, length):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_narrow(self, [input, dim, start, length]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, start, length)
return res
narrow_op=Narrow()
class BiasAddGrad(Primitive):
r"""
Computes gradients of BiasAdd.
"""
@prim_arg_register
def __init__(self, data_format='NCHW'):
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, dout):
return super().__call__(dout, self.data_format)
class GmmBackward(Primitive):
r"""
.. code-block::
prim = ops.GmmBackward()
out = prim(grad, x, weight, group_list)
is equivalent to
.. code-block::
ops.gmm_backward(grad, x, weight, group_list)
Refer to :func:`mindspore.ops.gmm_backward` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('grad'),
sig.make_sig('x'),
sig.make_sig('weight'),
sig.make_sig('group_list', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, x, weight, group_list=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_gmm_backward(self, [grad, x, weight, group_list]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad, x, weight, group_list)
return res
gmm_backward_op=GmmBackward()
[文档]class Erfc(Primitive):
r"""
.. code-block::
prim = ops.Erfc()
out = prim(input)
is equivalent to
.. code-block::
ops.erfc(input)
Refer to :func:`mindspore.ops.erfc` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_erfc(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
erfc_op=Erfc()
class TopkExt(Primitive):
r"""
.. code-block::
prim = ops.TopkExt()
out = prim(input, k, dim, largest, sorted)
is equivalent to
.. code-block::
ops.topk_ext(input, k, dim, largest, sorted)
Refer to :func:`mindspore.ops.topk_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('k'),
sig.make_sig('dim', default=-1),
sig.make_sig('largest', default=True),
sig.make_sig('sorted', default=True),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, k, dim=-1, largest=True, sorted=True):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_topk_ext(self, [input, k, dim, largest, sorted]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, k, dim, largest, sorted)
return res
topk_ext_op=TopkExt()
class AsinGrad(Primitive):
r"""
Computes AsinGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dout):
return super().__call__(x, dout)
asin_grad_op=AsinGrad()
class BitwiseXorScalar(Primitive):
r"""
Returns bitwise `xor` of tensor and scalar element-wise.
Inputs:
- **input** (Tensor) - The input tensor must be of integral or Boolean types.
- **other** (number.Number) - The second input scalar with same type as the `input`.
Outputs:
Tensor, has the same type as the `input`.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bitwise_xor_scalar(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
bitwise_xor_scalar_op=BitwiseXorScalar()
class DropoutDoMaskExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mask, p):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dropout_do_mask_ext(self, [input, mask, p]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mask, p)
return res
dropout_do_mask_ext_op=DropoutDoMaskExt()
[文档]class UpsampleTrilinear3D(Primitive):
r"""
Performs upsampling with trilinear interpolation across 3dims for 5dim input Tensor.
This operator scale up the volumetric input with specified `output_size` or `scales` factors,
using trilinear upscaling algorithm.
Note:
One of `scales` and `output_size` must be specified. And it is an error if both are specified.
Args:
align_corners (bool, optional): An optional bool. Default: ``False``.
If ``True``, the input and output tensors are aligned by the center points of their corner pixels,
preserving the values at the corner pixels.
If ``False`` , the input and output tensors are aligned by the corner points of their corner pixels,
and the interpolation use edge value padding for out of boundary values.
Inputs:
- **x** (Tensor) - 5D tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`. Supporting types:
[float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of 3 int elements
:math:`(output\_depth, output\_height, output\_width)`. Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of 3 float
elements :math:`(scale\_depth, scale\_height, scale\_width)`. Default: ``None``.
Outputs:
- **y** (Tensor) - Upsampled output with the same data type as `x`, whose shape is
:math:`(N, C, D_{out}, H_{out}, W_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not in [float16, float32, float64].
TypeError: If type of `align_corners` is not bool.
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 5D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 3 when `scales` is specified.
ValueError: If size of `output_size` is not equal 3 when `output_size` is specified.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> net = ops.UpsampleTrilinear3D()
>>> in_x = Tensor(input_data=np.random.randn(2, 3, 4, 512, 256))
>>> output_size=[4, 64, 48]
>>> out = net(in_x, output_size, None)
>>> print(out.shape)
(2, 3, 4, 64, 48)
>>>
>>> net = ops.UpsampleTrilinear3D()
>>> in_x = Tensor(np.arange(1, 5, dtype=np.float32).reshape((1, 1, 1, 2, 2)))
>>> output_size=[2, 4, 4]
>>> out = net(in_x, output_size, None)
>>> print(out)
[[[[[1. 1.25 1.75 2. ]
[1.5 1.75 2.25 2.5 ]
[2.5 2.75 3.25 3.5 ]
[3. 3.25 3.75 4. ]]
[[1. 1.25 1.75 2. ]
[1.5 1.75 2.25 2.5 ]
[2.5 2.75 3.25 3.5 ]
[3. 3.25 3.75 4. ]]]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self, align_corners=False):
self._set_prim_arg("align_corners", align_corners)
def __call__(self, x, output_size=None, scales=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_trilinear3d(self, [x, output_size, scales, self.align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, output_size, scales, self.align_corners)
return res
class EmbeddingTableEvict(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingTableEvict()
out = prim(var_handle, global_step, steps_to_live)
is equivalent to
.. code-block::
ops.embedding_table_evict(var_handle, global_step, steps_to_live)
Refer to :func:`mindspore.ops.embedding_table_evict` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('var_handle'),
sig.make_sig('global_step'),
sig.make_sig('steps_to_live', default=0),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("_process_node_engine_id", 'PS')
def __call__(self, var_handle, global_step, steps_to_live=0):
return super().__call__(var_handle, global_step, steps_to_live)
embedding_table_evict_op=EmbeddingTableEvict()
[文档]class Assign(Primitive):
r"""
.. code-block::
prim = ops.Assign()
out = prim(variable, value)
is equivalent to
.. code-block::
ops.assign(variable, value)
Refer to :func:`mindspore.ops.assign` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('value', dtype=sig.sig_dtype.T),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, variable, value):
return super().__call__(variable, value)
assign_op=Assign()
class AvgPoolGrad(Primitive):
r"""
Gradients of the avg pool operation.
"""
@prim_arg_register
def __init__(self, kernel_size=1, strides=1, pad_mode='VALID', data_format='NCHW'):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pad_mode", pad_mode, str_to_enum)
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, x, out, dout):
return super().__call__(x, out, dout, self.kernel_size, self.strides, self.pad_mode, self.data_format)
[文档]class SoftShrink(Primitive):
r"""
.. code-block::
prim = ops.SoftShrink(lambd)
out = prim(input)
is equivalent to
.. code-block::
ops.softshrink(input, lambd)
Refer to :func:`mindspore.ops.softshrink` for more details.
"""
@prim_arg_register
def __init__(self, lambd=0.5):
self._set_prim_arg("lambd", type_it('SoftShrink', 'lambd', lambd, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_softshrink(self, [input, self.lambd]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.lambd)
return res
class LerpScalar(Primitive):
r"""
.. code-block::
prim = ops.LerpScalar()
out = prim(input, end, weight)
is equivalent to
.. code-block::
ops.lerp_scalar(input, end, weight)
Refer to :func:`mindspore.ops.lerp_scalar` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, end, weight):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_lerp_scalar(self, [input, end, weight]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, end, weight)
return res
lerp_scalar_op=LerpScalar()
class AvgPool2D(Primitive):
r"""
Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.
Typically the input is of shape :math:`(N, C, H_{in}, W_{in})` , outputs regional average in the
:math:`(H_{in}, W_{in})` -dimension. Given kernel size :math:`(kH, kW)` and `stride` , the operation
is as follows.
.. math::
\text{output}(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
\text{input}(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
Inputs:
input (Tensor): Tensor of shape :math:`(N, C, H_{in}, W_{in})` .
kernel_size (Union[int, tuple[int], list[int]]): The size of kernel used to take the average value. Can be
a single number or a tuple :math:`(kH, kW)` .
stride (Union[int, tuple[int], list[int]]): The distance of kernel moving. Can be a single number or
a tuple :math:`(sH, sW)` .
padding (Union(int, tuple[int], list[int])): Implicit zero padding to be added on both sides. Can be a single
number or a tuple :math:`(padH, padW)` . Default: 0.
ceil_mode (bool): If True, apply ceil instead of floor to compute the output shape. Default: ``False``.
count_include_pad (bool): If True, include the zero-padding in the averaging calculation. Default: ``True`` .
divisor_override (int): If specified, it will be used as divisor in the averaging calculation, otherwise
`kernel_size` will be used. Default: ``None``.
Outputs:
Tensor, with shape :math:`(N, C, H_{out}, W_{out})`.
.. math::
H_{out} = \frac{H_{in} + 2 \times padding[0] - kernel_size[0]}{stride[0]} + 1
W_{out} = \frac{W_{in} + 2 \times padding[1] - kernel_size[1]}{stride[1]} + 1
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `kernel_size` or `stride` is neither int nor tuple.
TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
TypeError: If `divisor_override` is not an int or None.
ValueError: If the dimension of `input` is not equal to `3` or `4`.
ValueError: If `kernel_size` or `stride` is less than 1.
ValueError: If value of `padding` is less than `0`.
ValueError: If `kernel_size`, `padding` or `stride` is a tuple whose length is not equal to `1` or `2`.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
>>> output = ops.auto_generate.AvgPool2D()(x, 2, 1)
>>> print(output)
[[[[ 2.5 3.5 4.5]
[ 6.5 7.5 8.5]]
[[14.5 15.5 16.5]
[18.5 19.5 20.5]]
[[26.5 27.5 28.5]
[30.5 31.5 32.5]]]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('kernel_size'),
sig.make_sig('stride'),
sig.make_sig('padding', default=0),
sig.make_sig('ceil_mode', default=False),
sig.make_sig('count_include_pad', default=True),
sig.make_sig('divisor_override', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, kernel_size, stride, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_avg_pool2d(self, [input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)
return res
avg_pool2d_op=AvgPool2D()
class View(Primitive):
r"""
.. code-block::
prim = ops.View()
out = prim(input, shape)
is equivalent to
.. code-block::
ops.view(input, shape)
Refer to :func:`mindspore.ops.view` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, shape):
return super().__call__(input, shape)
view_op=View()
class AddExt(Primitive):
r"""
.. code-block::
prim = ops.AddExt()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.add_ext(input, other, alpha)
Refer to :func:`mindspore.ops.add_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', dtype=sig.sig_dtype.T),
sig.make_sig('other', dtype=sig.sig_dtype.T),
sig.make_sig('alpha', dtype=sig.sig_dtype.T1, default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_add_ext(self, [input, other, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, alpha)
return res
add_ext_op=AddExt()
[文档]class AdamWeightDecay(Primitive):
r"""
Updates gradients by the Adaptive Moment Estimation algorithm with weight decay (AdamWeightDecay).
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
The AdamWeightDecay variant was proposed in `Decoupled Weight Decay Regularization
<https://arxiv.org/abs/1711.05101>`_.
The updating formulas are as follows,
.. math::
\begin{array}{ll} \\
m = \beta_1 * m + (1 - \beta_1) * g \\
v = \beta_2 * v + (1 - \beta_2) * g * g \\
update = \frac{m}{\sqrt{v} + \epsilon} \\
update =
\begin{cases}
update + weight\_decay * w
& \text{ if } weight\_decay > 0 \\
update
& \text{ otherwise }
\end{cases} \\
w = w - lr * update
\end{array}
:math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents
`gradient`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
:math:`lr` represents `learning_rate`, :math:`w` represents `var`, :math:`decay` represents `weight_decay`,
:math:`\epsilon` represents `epsilon`.
Args:
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
If ``True`` , updates of the var, m, and v tensors will be protected by a lock.
If ``False`` , the result is unpredictable. Default: ``False`` .
Inputs:
- **var** (Union[Parameter, Tensor]) - Weights to be updated. The shape is :math:`(N, *)`
where :math:`*` means any number of additional dimensions. The data type can be float16 or float32.
- **m** (Union[Parameter, Tensor]) - The 1st moment vector in the updating formula,
it should have the the shape as `var`. The data type can be float16 or float32.
- **v** (Union[Parameter, Tensor]) - The 2nd moment vector in the updating formula,
it should have the same shape as `m`.
- **lr** (float) - :math:`lr` in the updating formula. The paper suggested value is :math:`10^{-8}`,
the data type should be float32.
- **beta1** (float) - The exponential decay rate for the 1st moment estimations,
the data type should be float32. The paper suggested value is :math:`0.9`
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations,
the data type should be float32. The paper suggested value is :math:`0.999`
- **epsilon** (float) - Term added to the denominator to improve numerical stability,
the data type should be float32.
- **decay** (float) - The weight decay value, must be a scalar tensor with float32 data type.
Default: ``0.0`` .
- **gradient** (Tensor) - Gradient, has the same shape as `var`.
Outputs:
Tuple of 3 Tensor, the updated parameters.
- **var** (Tensor) - The same shape and data type as `var`.
- **m** (Tensor) - The same shape and data type as `m`.
- **v** (Tensor) - The same shape and data type as `v`.
Raises:
TypeError: If `use_locking` is not a bool.
TypeError: If `lr`, `beta1`, `beta2`, `epsilon` or `decay` is not a float32.
TypeError: If `var`, `m` or `v` is neither float16 nor float32.
TypeError: If `gradient` is not a Tensor.
ValueError: If `epsilon` <= 0.
ValueError: If `beta1`, `beta2` is not in range (0.0,1.0).
ValueError: If `decay` < 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import Tensor, Parameter, ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.adam_weight_decay = ops.AdamWeightDecay()
... self.var = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="var")
... self.m = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="m")
... self.v = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="v")
... def construct(self, lr, beta1, beta2, epsilon, decay, grad):
... out = self.adam_weight_decay(self.var, self.m, self.v, lr, beta1, beta2,
... epsilon, decay, grad)
... return out
>>> net = Net()
>>> gradient = Tensor(np.ones([2, 2]).astype(np.float32))
>>> output = net(0.001, 0.9, 0.999, 1e-8, 0.0, gradient)
>>> print(net.var.asnumpy())
[[0.999 0.999]
[0.999 0.999]]
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
sig.make_sig('lr', dtype=sig.sig_dtype.T2),
sig.make_sig('beta1', dtype=sig.sig_dtype.T2),
sig.make_sig('beta2', dtype=sig.sig_dtype.T2),
sig.make_sig('epsilon', dtype=sig.sig_dtype.T2),
sig.make_sig('decay', dtype=sig.sig_dtype.T2),
sig.make_sig('gradient', dtype=sig.sig_dtype.T),
)
@prim_arg_register
def __init__(self, use_locking=False):
self._set_prim_arg("use_locking", use_locking)
self.add_prim_attr("side_effect_mem", True)
def __call__(self, var, m, v, lr, beta1, beta2, epsilon, decay, gradient):
return super().__call__(var, m, v, lr, beta1, beta2, epsilon, decay, gradient, self.use_locking)
[文档]class Trunc(Primitive):
r"""
.. code-block::
prim = ops.Trunc()
out = prim(input)
is equivalent to
.. code-block::
ops.trunc(input)
Refer to :func:`mindspore.ops.trunc` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_trunc(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
trunc_op=Trunc()
class RemainderScalarTensor(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_remainder_scalar_tensor(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
remainder_scalar_tensor_op=RemainderScalarTensor()
class Std(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('correction', default=1),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, correction=1, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_std(self, [input, dim, correction, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, correction, keepdim)
return res
std_op=Std()
class RsqrtGrad(Primitive):
r"""
Computes gradients for the Rsqrt.
Args:
y_backprop (Tensor): Input gradients tensor, has the same dtype and shape as `x`.
x (Tensor): Origin input tensor.
Returns:
Tensor, has the same dtype and shape as `x`.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y_backprop, x):
return super().__call__(y_backprop, x)
rsqrt_grad_op=RsqrtGrad()
class InplaceAddmm(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('mat1'),
sig.make_sig('mat2'),
sig.make_sig('beta', default=1),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, mat1, mat2, beta=1, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_addmm(self, [input, mat1, mat2, beta, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mat1, mat2, beta, alpha)
return res
inplace_addmm_op=InplaceAddmm()
class DropoutGradExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mask, p):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dropout_grad_ext(self, [input, mask, p]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mask, p)
return res
dropout_grad_ext_op=DropoutGradExt()
class Baddbmm(Primitive):
r"""
.. code-block::
prim = ops.Baddbmm()
out = prim(input, batch1, batch2, beta, alpha)
is equivalent to
.. code-block::
ops.baddbmm(input, batch1, batch2, beta, alpha)
Refer to :func:`mindspore.ops.baddbmm` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('batch1'),
sig.make_sig('batch2'),
sig.make_sig('beta', default=1),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, batch1, batch2, beta=1, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_baddbmm(self, [input, batch1, batch2, beta, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, batch1, batch2, beta, alpha)
return res
baddbmm_op=Baddbmm()
class NLLLoss2dGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, loss_grad, input, target, weight, reduction, ignore_index, total_weight):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_nllloss_2d_grad(self, [loss_grad, input, target, weight, str_to_enum('NLLLoss2dGrad', 'reduction', reduction), ignore_index, total_weight]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, loss_grad, input, target, weight, str_to_enum('NLLLoss2dGrad', 'reduction', reduction), ignore_index, total_weight)
return res
nllloss_2d_grad_op=NLLLoss2dGrad()
class MishExt(Primitive):
r"""
.. code-block::
prim = ops.MishExt()
out = prim(input)
is equivalent to
.. code-block::
ops.mish_ext(input)
Refer to :func:`mindspore.ops.mish_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_mish_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
mish_ext_op=MishExt()
[文档]class BinaryCrossEntropy(Primitive):
r"""
Computes the binary cross entropy between the logits and the labels.
Sets logits as :math:`x`, labels as :math:`y`, output as :math:`\ell(x, y)`.
Let,
.. math::
L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right]
In which, :math:`L` indicates the loss of all batch_sizes, :math:`l` indicates the loss of one batch_size,
and n indicates one batch_size in the 1-N range, :math:`w_n` indicates the
weight of :math:`n`-th batch of binary cross entropy. Then,
.. math::
\ell(x, y) = \begin{cases}
L, & \text{if reduction} = \text{'none';}\\
\operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
\end{cases}
.. warning::
- The value of :math:`x` must range from 0 to 1.
Args:
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
``'sum'`` . Default: ``'mean'`` .
- ``'none'``: no reduction will be applied.
- ``'mean'``: compute and return the weighted mean of elements in the output.
- ``'sum'``: the output elements will be summed.
Inputs:
- **logits** (Tensor) - The predictive value whose data type must be float16 or float32,
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
- **labels** (Tensor) - The target value which has the same shape and data type as `logits`. And the data type is float16 or float32.
- **weight** (Tensor, optional) - A rescaling weight applied to the loss of each batch element.
And it must have the same shape and data type as `logits`. Default: ``None`` .
Outputs:
Tensor or Scalar. Returns Tensor that has the same dtype and shape as `logits` if `reduction` is 'none'.
Otherwise, returns a scalar Tensor.
Raises:
TypeError: If dtype of `logits`, `labels` or `weight` (if given) is neither float16 nor float32.
ValueError: If `reduction` is not one of ``'none'``, ``'mean'`` or ``'sum'``.
ValueError: If shape of `labels` is not the same as `logits` or `weight` (if given).
TypeError: If `logits`, `labels` or `weight` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, nn, ops
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.binary_cross_entropy = ops.BinaryCrossEntropy()
... def construct(self, logits, labels, weight):
... result = self.binary_cross_entropy(logits, labels, weight)
... return result
...
>>> net = Net()
>>> logits = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)
>>> labels = Tensor(np.array([0., 1., 0.]), mindspore.float32)
>>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32)
>>> output = net(logits, labels, weight)
>>> print(output)
0.38240486
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('weight', default=None),
)
@prim_arg_register
def __init__(self, reduction='mean'):
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
def __call__(self, input, target, weight=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_binary_cross_entropy(self, [input, target, weight, self.reduction]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, target, weight, self.reduction)
return res
[文档]class CumSum(Primitive):
r"""
Computes the cumulative sum of input tensor along axis.
.. math::
y_i = x_1 + x_2 + x_3 + ... + x_i
Args:
exclusive (bool): By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output. Default: ``False`` .
reverse (bool): If ``True`` , perform inverse cumulative sum. Default: ``False`` .
Inputs:
- **input** (Tensor) - The input Tensor with shape
:math:`(N, *)` where :math:`*` means any number of additional dimensions.
- **axis** (int) - The axis to accumulate the tensor's value. Only constant value is allowed.
Must be in the range [-rank(input), rank(input)).
Outputs:
Tensor, the shape of the output tensor is consistent with the input tensor's.
Raises:
TypeError: If `exclusive` or `reverse` is not a bool.
TypeError: If `axis` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float32))
>>> cumsum = ops.CumSum()
>>> # case 1: along the axis 0
>>> y = cumsum(x, 0)
>>> print(y)
[[ 3. 4. 6. 10.]
[ 4. 10. 13. 19.]
[ 8. 13. 21. 26.]
[ 9. 16. 28. 35.]]
>>> # case 2: along the axis 1
>>> y = cumsum(x, 1)
>>> print(y)
[[ 3. 7. 13. 23.]
[ 1. 7. 14. 23.]
[ 4. 7. 15. 22.]
[ 1. 4. 11. 20.]]
>>> # Next demonstrate exclusive and reverse, along axis 1
>>> # case 3: exclusive = True
>>> cumsum = ops.CumSum(exclusive=True)
>>> y = cumsum(x, 1)
>>> print(y)
[[ 0. 3. 7. 13.]
[ 0. 1. 7. 14.]
[ 0. 4. 7. 15.]
[ 0. 1. 4. 11.]]
>>> # case 4: reverse = True
>>> cumsum = ops.CumSum(reverse=True)
>>> y = cumsum(x, 1)
>>> print(y)
[[23. 20. 16. 10.]
[23. 22. 16. 9.]
[22. 18. 15. 7.]
[20. 19. 16. 9.]]
"""
@prim_arg_register
def __init__(self, exclusive=False, reverse=False):
self._set_prim_arg("exclusive", exclusive)
self._set_prim_arg("reverse", reverse)
def __call__(self, input, axis):
return super().__call__(input, axis, self.exclusive, self.reverse)
[文档]class ReduceAny(Primitive):
r"""
Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by default. And also can
reduce a dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the
same by controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
Inputs:
- **x** (Tensor[bool]) - The input tensor. The dtype of the tensor to be reduced is bool.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions. Only constant value is allowed. Must be in the range [-rank(x), rank(x)).
Outputs:
Tensor, the dtype is bool.
- If `axis` is ``()`` , and `keep_dims` is ``False`` ,
the output is a 0-D tensor representing the "logical or" of all elements in the input tensor.
- If `axis` is int, set as 2, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_3, ..., x_R)`.
- If `axis` is tuple(int), set as (2, 3), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [2, 3], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `x` is not a Tensor.
TypeError: If `axis` is not one of the following: int, tuple, list or Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[True, False], [True, True]]))
>>> op = ops.ReduceAny(keep_dims=True)
>>> # case 1: Reduces a dimension by the "logical OR" of all elements in the dimension.
>>> output = op(x)
>>> print(output)
[[ True]]
>>> print(output.shape)
(1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = op(x, 0)
>>> print(output)
[[ True True]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = op(x, 1)
>>> print(output)
[[True]
[ True]]
>>> # case 4: input is a scalar.
>>> x = Tensor(True)
>>> op = ops.ReduceAny()
>>> output = op(x)
>>> print(output)
True
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False):
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, x, axis=()):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reduce_any(self, [x, axis, self.keep_dims]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, axis, self.keep_dims)
return res
[文档]class ReverseV2(Primitive):
r"""
.. code-block::
prim = ops.ReverseV2(axis)
out = prim(input)
is equivalent to
.. code-block::
ops.flip(input, axis)
Refer to :func:`mindspore.ops.flip` for more details.
"""
@prim_arg_register
def __init__(self, axis):
self._set_prim_arg("axis", type_it('ReverseV2', 'axis', axis, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reverse_v2(self, [input, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.axis)
return res
[文档]class Sign(Primitive):
r"""
.. code-block::
prim = ops.Sign()
out = prim(input)
is equivalent to
.. code-block::
ops.sign(input)
Refer to :func:`mindspore.ops.sign` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sign(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
sign_op=Sign()
class AcosExt(Primitive):
r"""
.. code-block::
prim = ops.AcosExt()
out = prim(input)
is equivalent to
.. code-block::
ops.acos_ext(input)
Refer to :func:`mindspore.ops.acos_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_acos_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
acos_ext_op=AcosExt()
class InplaceMuls(Primitive):
r"""
.. code-block::
prim = ops.InplaceMuls()
out = prim(input, other)
is equivalent to
.. code-block::
ops.inplace_muls(input, other)
Refer to :func:`mindspore.ops.inplace_muls` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_muls(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
inplace_muls_op=InplaceMuls()
class DistCommReduce(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('op_type'),
sig.make_sig('dst'),
sig.make_sig('group'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, op_type, dst, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_reduce(self, [input, op_type, dst, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, op_type, dst, group)
return res
dist_comm_reduce_op=DistCommReduce()
class InsertGemV2InBackward(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('ele_pos'),
sig.make_sig('cur_step'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('start', default=0),
sig.make_sig('steps', default=1),
sig.make_sig('error_mode', default='cycle'),
sig.make_sig('flip_mode', default='bitflip'),
sig.make_sig('multiply_factor', default=0.0),
sig.make_sig('bit_pos', default=0),
sig.make_sig('flip_probability', default=0.0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, ele_pos, cur_step, seed, offset, start=0, steps=1, error_mode='cycle', flip_mode='bitflip', multiply_factor=0.0, bit_pos=0, flip_probability=0.0):
return super().__call__(input, ele_pos, cur_step, seed, offset, start, steps, str_to_enum('InsertGemV2InBackward', 'error_mode', error_mode), str_to_enum('InsertGemV2InBackward', 'flip_mode', flip_mode), multiply_factor, bit_pos, flip_probability)
insert_gem_v2_in_backward_op=InsertGemV2InBackward()
class NormalTensorTensor(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, mean, std, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_normal_tensor_tensor(self, [mean, std, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, mean, std, seed, offset)
return res
normal_tensor_tensor_op=NormalTensorTensor()
class UpsampleNearest2DGrad(Primitive):
r"""
Upsample the 2-D gradient data with the nearest neighbor interpolation algorithm.
Note:
Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, H, W], Must be one of the following types:
float16, float32, float64.
- **input_size** (tuple[int]): An required tuple[int], which contain 4 elements:
[min_batch, channels, height, width].
Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1].
- **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``.
It contains 2 elements: height, width, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0],
dy.shape[3] == output_size[1].
- **scales** (tuple[float]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 2 elements: scale_height, scale_width. Must:
dy.shape[2] == floor(input_size[2] * scales[0],
dy.shape[3] == floor(input_size[3] * scales[1].
Outputs:
- **dx**- (Tensor) - A 4-D tensor. Has the same type as `dy`, shape depends on `input_size`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_nearest2d_grad(self, [dy, input_size, output_size, scales]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, input_size, output_size, scales)
return res
upsample_nearest2d_grad_op=UpsampleNearest2DGrad()
[文档]class Gather(Primitive):
r"""
.. code-block::
prim = ops.Gather(batch_dims)
out = prim(input_params, input_indices, axis)
is equivalent to
.. code-block::
ops.gather(input_params, input_indices, axis, batch_dims)
Refer to :func:`mindspore.ops.gather` for more details.
"""
@prim_arg_register
def __init__(self, batch_dims=0):
self._set_prim_arg("batch_dims", batch_dims)
def __call__(self, input_params, input_indices, axis):
return super().__call__(input_params, input_indices, axis, self.batch_dims)
class BatchNormElemt(Primitive):
r"""
.. code-block::
prim = ops.BatchNormElemt()
out = prim(input, weight, bias, mean, invstd, eps)
is equivalent to
.. code-block::
ops.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
Refer to :func:`mindspore.ops.batch_norm_elemt` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight', sig.sig_rw.RW_WRITE, default=None),
sig.make_sig('bias', sig.sig_rw.RW_WRITE, default=None),
sig.make_sig('mean', default=None),
sig.make_sig('invstd', default=None),
sig.make_sig('eps', default=1e-5),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, weight=None, bias=None, mean=None, invstd=None, eps=1e-5):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_batch_norm_elemt(self, [input, weight, bias, mean, invstd, eps]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, mean, invstd, eps)
return res
batch_norm_elemt_op=BatchNormElemt()
class ApplyCamePart1(Primitive):
r"""
Computes Part 1 of the CAME Optimizer.
Args:
- **grad** (Tensor) - The shape = 2D :math:`(..., n, m)`.
A Tensor of types: float16, float32, bfloat16.
- **eps** (float) - data type must be float.
Returns:
- **sum_grad_r** (Tensor) - A Tensor of shape :math:`(..., n)`
- **sum_grad_c** (Tensor) - A Tensor of shape :math:`(..., m)`
- **sum_grad_rc** (Tensor) - A Tensor of of shape:math:`(..., m)`
Raises:
TypeError: If `grad` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _inner_ops as P
>>> grad = Tensor(np.ones([1024, 64]), dtype=ms.float32)
>>> apply_came_part1 = P.ApplyCamePart1()
>>> output = apply_came_part1(grad, 1.1)
>>> print(output[0].asnumpy())
(1024,)
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, eps):
return super().__call__(grad, eps)
apply_came_part1_op=ApplyCamePart1()
class TensorCopySlices(Primitive):
r"""
Copy continues memory.
Inputs:
- **x** (Tensor) - The target Tensor.
- **value** (Tensor) - The tensor to update x.
- **begin** (tuple[int]) - A tuple which represents the location where to start. Only
constant value is allowed.
- **end** (tuple[int]) - A tuple or which represents the maximum location where to end.
Only constant value is allowed.
- **strides** (tuple[int]) - A tuple which represents the stride is continuously added
before reaching the maximum location. Only constant value is allowed.
Outputs:
- **y** (Tensor), has the same shape and data type of x.
Examples:
>>> import numpy as np
>>> from mindspore.ops.operations import _inner_ops
>>> copy_slices = _inner_ops.TensorCopySlices()
>>> out = copy_slices(Tensor(np.zeros((5, 5))), Tensor(np.ones((2, 5))), (3, 0), (5, 5), (1, 1))
>>> print(out)
[[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, value, begin, end, strides):
return super().__call__(x, value, begin, end, strides)
tensor_copy_slices_op=TensorCopySlices()
class NLLLoss2d(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('weight'),
sig.make_sig('reduction', default='mean'),
sig.make_sig('ignore_index', default=-100),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, target, weight, reduction='mean', ignore_index=-100):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_nllloss_2d(self, [input, target, weight, str_to_enum('NLLLoss2d', 'reduction', reduction), ignore_index]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, target, weight, str_to_enum('NLLLoss2d', 'reduction', reduction), ignore_index)
return res
nllloss_2d_op=NLLLoss2d()
class ClampScalar(Primitive):
r"""
.. code-block::
prim = ops.ClampScalar()
out = prim(input, min, max)
is equivalent to
.. code-block::
ops.clamp_scalar(input, min, max)
Refer to :func:`mindspore.ops.clamp_scalar` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('min', default=None),
sig.make_sig('max', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, min=None, max=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_clamp_scalar(self, [input, min, max]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, min, max)
return res
clamp_scalar_op=ClampScalar()
class Divs(Primitive):
r"""
.. code-block::
prim = ops.Divs()
out = prim(input, other)
is equivalent to
.. code-block::
ops.divs(input, other)
Refer to :func:`mindspore.ops.divs` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_divs(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
divs_op=Divs()
class IRFFT2(Primitive):
r"""
.. code-block::
prim = ops.IRFFT2()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.irfft2(input, s, dim, norm)
Refer to :func:`mindspore.ops.irfft2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=(-2, -1)),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=(-2, -1), norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('IRFFT2', 'norm', norm))
irfft2_op=IRFFT2()
[文档]class HSigmoid(Primitive):
r"""
.. code-block::
prim = ops.HSigmoid()
out = prim(input)
is equivalent to
.. code-block::
ops.hardsigmoid(input)
Refer to :func:`mindspore.ops.hardsigmoid` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_hsigmoid(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
hsigmoid_op=HSigmoid()
class LogitGrad(Primitive):
r"""
Computes LogitGrad of input element-wise.
Returns:
Tensor, has the same type as input.
"""
@prim_arg_register
def __init__(self, eps=-1.0):
self._set_prim_arg("eps", eps)
def __call__(self, grad, input):
return super().__call__(grad, input, self.eps)
class BitwiseNot(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bitwise_not(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
bitwise_not_op=BitwiseNot()
class ThresholdGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, threshold):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_threshold_grad(self, [grad_output, input, threshold]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, threshold)
return res
threshold_grad_op=ThresholdGrad()
class SplitTensor(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('split_size'),
sig.make_sig('dim', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, split_size, dim=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_split_tensor(self, [input, split_size, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, split_size, dim)
return res
split_tensor_op=SplitTensor()
class StackExt(Primitive):
r"""
.. code-block::
prim = ops.StackExt(dim)
out = prim(tensors)
is equivalent to
.. code-block::
ops.stack_ext(tensors, dim)
Refer to :func:`mindspore.ops.stack_ext` for more details.
"""
@prim_arg_register
def __init__(self, dim=0):
self._set_prim_arg("dim", dim)
def __call__(self, tensors):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_stack_ext(self, [tensors, self.dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, tensors, self.dim)
return res
class Randn(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('shape'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, shape, seed, offset, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_randn(self, [shape, seed, offset, dtype if dtype is None else dtype_to_type_id('Randn', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, shape, seed, offset, dtype if dtype is None else dtype_to_type_id('Randn', 'dtype', dtype))
return res
randn_op=Randn()
class RepeatInterleaveInt(Primitive):
r"""
Repeat elements of a tensor along an axis, like :func:`mindspore.numpy.repeat`.
Args:
input (Tensor): The tensor to repeat values for. Must be of type: float16,
float32, int8, uint8, int16, int32, or int64.
repeats (int): The number of times to repeat, must be positive.
dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None,
the input Tensor will be flattened and the output will alse be flattened.
output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
Default: ``None``.
Returns:
One tensor with values repeated along the specified dim. If input has shape
:math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('repeats'),
sig.make_sig('dim', default=None),
sig.make_sig('output_size', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, repeats, dim=None, output_size=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_repeat_interleave_int(self, [input, repeats, dim, output_size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, repeats, dim, output_size)
return res
repeat_interleave_int_op=RepeatInterleaveInt()
class ReplicationPad3DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_replication_pad_3d_grad(self, [grad_output, input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, padding)
return res
replication_pad_3d_grad_op=ReplicationPad3DGrad()
class Qr(Primitive):
r"""
Returns the QR decomposition of one or more matrices.
If `mode` is 'reduced'(the default), compute the P columns of Q where P is minimum of the 2 innermost dimensions of
input. If `mode` is 'complete', compute full-sized Q and R.
Args:
full_matrices (bool, optional): Whether compute full-sized QR decomposition. Default: ``False`` .
Inputs:
- **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, the supported dtype are
float16, float32, float64, complex64 and complex128.
Define the shape of input as :math:`(..., m, n)`, p as the
minimum values of m and n.
Outputs:
- **Q** (Tensor) - The orthonormal matrices of input. If `mode` is 'complete', the shape is :math:`(m, m)`,
else the shape is :math:`(m, p)`. The dtype of `Q` is same as `input`.
- **R** (Tensor) - The upper triangular matrices of input. If `mode` is 'complete', the shape is :math:`(m, n)`,
else the shape is :math:`(p, n)`. The dtype of `R` is same as `input`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `mode` is neither 'reduced' nor 'complete'.
ValueError: If the dimension of `input` is less than 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore as ms
>>> from mindspore import Tensor, ops
>>> import numpy as np
>>> x = Tensor(np.array([[20., -31, 7], [4, 270, -90], [-8, 17, -32]]), ms.float32)
>>> Q, R = ops.Qr()(x)
>>> print(Q)
[[-0.912871 0.16366126 0.37400758]
[-0.18257418 -0.9830709 -0.01544376]
[ 0.36514837 -0.08238228 0.92729706]]
>>> print(R)
[[ -21.908903 -14.788506 -1.6431675]
[ 0. -271.9031 92.25824 ]
[ 0. 0. -25.665514 ]]
"""
@prim_arg_register
def __init__(self, full_matrices=False):
self._set_prim_arg("full_matrices", full_matrices)
def __call__(self, x):
return super().__call__(x, self.full_matrices)
class PagedAttention(Primitive):
r"""
The PagedAttention is the fusion of block-wise KV Cache access and self-attention computing.
Args:
query (Tensor): The query tensor with data type of float16.
:math:`(num\_tokens, num\_head, head\_dim)`.
key_cache (Tensor): The cache tensor with data type of float16.
:math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
value_cache (Tensor): The cache tensor with data type of float16.
:math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
block_tables (Tensor): The block mapping table with data type of int32.
:math:`(num\_tokens, max_num_blocks_per_batch)`.
context_lens (Tensor): The context length of each sequence with data type of int32.
:math:`(num\_tokens,)`.
antiquant_scale (Tensor): The antiquant scale of key_cache and value_cache
with data type of float16 or int64. key_cache and value_cache will be the type of int8.
:math:`(2, num\_head * head\_dim,)` for kv_cache_quant_mode `DEFAULT`;
:math:`(2, num\_tokens,)` for kv_cache_quant_mode `PERTOKEN`.
antiquant_offset (Tensor): The antiquant offset of key_cache and value_cache
with data type of float16 or int32. key_cache and value_cache will be the type of int8.
:math:`(2, num\_head * head\_dim,)` for kv_cache_quant_mode `DEFAULT`;
:math:`(2, num\_tokens,)` for kv_cache_quant_mode `PERTOKEN`.
attn_mask (Tensor): The lookahead mask with data type of float16. Default is None.
:math:`(num\_tokens, max_context_lens)`.
q_seq_lens (Tensor): The query length of each sequence with data type of int32. Default is None.
:math:`(batch,)`.
kv_cache_quant_mode (String): The mode of kv cache quant, `DEFAULT` and `PERTOKEN` are supported.
mask_mode (String): The mode of mask, `MASK_DEFAULT` and `TRAPEZOIDAL` are supported. No need to pass
a real mask tensor if `TRAPEZOIDAL` is set.
Outputs:
attention output.
Notes:
No backend implementation in MindSpore, only use to export MindIr and run in MindSpore Lite.
Examples:
>>> from mindspore.ops.operations import _inner_ops
>>> num_tokens = = 4
>>> num_head = 40
>>> num_kv_head = 40
>>> head_dim = 128
>>> block_size = 16
>>> num_blocks = 128
>>> max_seq = 1024
>>> max_num_blocks_per_batch = max_seq // block_size
>>> scale_value = 1.0 / math.sqrt(head_dim)
>>> query = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16))
>>> key_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
>>> value_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
>>> dummy_block_indice = np.random.shuffle(np.arange(num_tokens * max_num_blocks_per_batch, dtype=np.int32))
>>> block_tables = Tensor(np.reshape(dummy_block_indice, (num_tokens, max_num_blocks_per_batch)))
>>> context_lens = Tensor(np.random.randint(max_seq, size=num_tokens).astype(np.int32)))
>>> paged_attention = _inner_ops.PagedAttention()
>>> output = paged_attention(query, key_cache, value_cache, block_tables, context_lens)
>>> print(output)
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key_cache'),
sig.make_sig('value_cache'),
sig.make_sig('block_tables'),
sig.make_sig('context_lens'),
sig.make_sig('antiquant_scale', default=None),
sig.make_sig('antiquant_offset', default=None),
sig.make_sig('attn_mask', default=None),
sig.make_sig('q_seq_lens', default=None),
)
@prim_arg_register
def __init__(self, head_num, scale_value, kv_head_num, kv_cache_quant_mode='DEFAULT', mask_mode='MASK_DEFAULT'):
self._set_prim_arg("head_num", head_num)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("kv_head_num", kv_head_num)
self._set_prim_arg_with_handler("kv_cache_quant_mode", kv_cache_quant_mode, str_to_enum)
self._set_prim_arg_with_handler("mask_mode", mask_mode, str_to_enum)
def __call__(self, query, key_cache, value_cache, block_tables, context_lens, antiquant_scale=None, antiquant_offset=None, attn_mask=None, q_seq_lens=None):
return super().__call__(query, key_cache, value_cache, block_tables, context_lens, antiquant_scale, antiquant_offset, attn_mask, q_seq_lens, self.head_num, self.scale_value, self.kv_head_num, self.kv_cache_quant_mode, self.mask_mode)
class TriangularSolve(Primitive):
r"""
.. code-block::
prim = ops.TriangularSolve()
out = prim(b, A, upper, transpose, unitriangular)
is equivalent to
.. code-block::
ops.triangular_solve(b, A, upper, transpose, unitriangular)
Refer to :func:`mindspore.ops.triangular_solve` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('b'),
sig.make_sig('A'),
sig.make_sig('upper', default=True),
sig.make_sig('transpose', default=False),
sig.make_sig('unitriangular', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, b, A, upper=True, transpose=False, unitriangular=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_triangular_solve(self, [b, A, upper, transpose, unitriangular]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, b, A, upper, transpose, unitriangular)
return res
triangular_solve_op=TriangularSolve()
[文档]class ReduceSum(Primitive):
r"""
Reduces a dimension of a tensor by summing all elements in the dimension, by default. And also can reduce a
dimension of `x` along the `axis`. Determine whether the dimensions of the output and input are the same by
controlling `keep_dims`.
Note:
The `axis` with tensor type is only used for compatibility with older versions and is not recommended.
Args:
keep_dims (bool): If ``True`` , keep these reduced dimensions and the length is 1.
If ``False`` , don't keep these dimensions. Default: ``False`` .
skip_mode (bool): If ``True`` and `axis` is empty tuple or empty list,
the ReduceSum operation isn't performed, skip it.
If ``True`` and `axis` is other values, the ReduceSum calculation is performed normally.
If ``False`` , do reduce. Default: ``False`` .
Inputs:
- **x** (Tensor[Number]) - The input tensor.
- **axis** (Union[int, tuple(int), list(int), Tensor]) - The dimensions to reduce. Default: ``()`` ,
reduce all dimensions when `skip_mode` is ``False`` . Only constant value is allowed. Must be in the range
[-rank(`x`), rank(`x`)).
Outputs:
Tensor, has the same dtype as the `x`.
- If `axis` is ``()`` , `keep_dims` is ``False`` , and `skip_mode` is ``False`` ,
the output is a 0-D tensor representing the sum of all elements in the input tensor.
- If `axis` is ``()`` , and `skip_mode` is ``True`` ,
the ReduceSum operation is not performed, output tensor is equal to the input tensor.
- If `axis` is int, set as 2, and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_3, ..., x_R)`.
- If `axis` is tuple(int) or list(int), set as (2, 3), and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
- If `axis` is 1-D Tensor, set as [2, 3], and `keep_dims` is ``False`` ,
the shape of output is :math:`(x_1, x_4, ..., x_R)`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `skip_mode` is not a bool.
TypeError: If `x` is not a Tensor.
ValueError: If `axis` is None.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = ops.ReduceSum(keep_dims=True)
>>> output = op(x, 1)
>>> output.shape
(3, 1, 5, 6)
>>> # case 1: Reduces a dimension by summing all elements in the dimension.
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
>>> output = op(x)
>>> print(output)
[[[270.]]]
>>> print(output.shape)
(1, 1, 1)
>>> # case 2: Reduces a dimension along axis 0.
>>> output = op(x, 0)
>>> print(output)
[[[12. 12. 12. 12. 12. 12.]
[15. 15. 15. 15. 15. 15.]
[18. 18. 18. 18. 18. 18.]]]
>>> # case 3: Reduces a dimension along axis 1.
>>> output = op(x, 1)
>>> print(output)
[[[ 6. 6. 6. 6. 6. 6.]]
[[15. 15. 15. 15. 15. 15.]]
[[24. 24. 24. 24. 24. 24.]]]
>>> # case 4: Reduces a dimension along axis 2.
>>> output = op(x, 2)
>>> print(output)
[[[ 6.]
[12.]
[18.]]
[[24.]
[30.]
[36.]]
[[42.]
[48.]
[54.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('axis', default=()),
)
@prim_arg_register
def __init__(self, keep_dims=False, skip_mode=False):
self._set_prim_arg("keep_dims", keep_dims)
self._set_prim_arg("skip_mode", skip_mode)
def __call__(self, x, axis=()):
return super().__call__(x, axis, self.keep_dims, self.skip_mode)
class SiLU(Primitive):
r"""
.. code-block::
prim = ops.SiLU()
out = prim(input)
is equivalent to
.. code-block::
ops.silu(input)
Refer to :func:`mindspore.ops.silu` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_silu(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
silu_op=SiLU()
[文档]class Angle(Primitive):
r"""
.. code-block::
prim = ops.Angle()
out = prim(input)
is equivalent to
.. code-block::
ops.angle(input)
Refer to :func:`mindspore.ops.angle` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
angle_op=Angle()
[文档]class RandpermV2(Primitive):
r"""
.. code-block::
prim = ops.RandpermV2(seed, offset, dtype)
out = prim(n)
is equivalent to
.. code-block::
ops.randperm(n, seed, offset, dtype)
Refer to :func:`mindspore.ops.randperm` for more details.
"""
@prim_arg_register
def __init__(self, seed=0, offset=0, dtype=mstype.int64):
self._set_prim_arg("seed", type_it('RandpermV2', 'seed', seed, OpDtype.DT_TENSOR, OpDtype.DT_INT))
self._set_prim_arg("offset", type_it('RandpermV2', 'offset', offset, OpDtype.DT_TENSOR, OpDtype.DT_INT))
self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id)
def __call__(self, n):
return super().__call__(n, self.seed, self.offset, self.dtype)
class Empty(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('size'),
sig.make_sig('dtype', default=None),
sig.make_sig('device', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, size, dtype=None, device=None):
return super().__call__(size, dtype if dtype is None else dtype_to_type_id('Empty', 'dtype', dtype), device)
empty_op=Empty()
class ArgMinExt(Primitive):
r"""
.. code-block::
prim = ops.ArgMinExt()
out = prim(input, dim, keepdim)
is equivalent to
.. code-block::
ops.argmin_ext(input, dim, keepdim)
Refer to :func:`mindspore.ops.argmin_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_argmin_ext(self, [input, dim, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim)
return res
argmin_ext_op=ArgMinExt()
class Scatter(Primitive):
r"""
reverse operation of gather
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim'),
sig.make_sig('index'),
sig.make_sig('src'),
sig.make_sig('reduce', default='none'),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, index, src, reduce='none'):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_scatter(self, [input, dim, index, src, str_to_enum('Scatter', 'reduce', reduce)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, src, str_to_enum('Scatter', 'reduce', reduce))
return res
scatter_op=Scatter()
class RFFTFreq(Primitive):
r"""
.. code-block::
prim = ops.RFFTFreq()
out = prim(n, d, dtype)
is equivalent to
.. code-block::
ops.rfftfreq(n, d, dtype)
Refer to :func:`mindspore.ops.rfftfreq` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('n'),
sig.make_sig('d', default=1.0),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, n, d=1.0, dtype=None):
return super().__call__(n, d, dtype if dtype is None else dtype_to_type_id('RFFTFreq', 'dtype', dtype))
rfftfreq_op=RFFTFreq()
class UpsampleLinear1DGrad(Primitive):
r"""
Upsample the 1-D gradient data with linear interpolation algorithm.
Note:
One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, L]. Must be one of the following types:
float16, float32, float64.
- **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 3 elements:
[batch, channels, length]. Must:
input_size[0] == dy.shape[0]
input_size[1] == dy.shape[1].
- **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``.
It contains 1 elements: length, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0].
- **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 1 elements: length_depth. Must:
dy.shape[2] == floor(input_size[2] * scales[0].
- **align_corners** (bool): An optional bool. Default: ``False``.
Outputs:
- **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_linear1d_grad(self, [dy, input_size, output_size, scales, align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, input_size, output_size, scales, align_corners)
return res
upsample_linear1d_grad_op=UpsampleLinear1DGrad()
class DistCommGather(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, gather_list, rank_size, dst, rank_id, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_gather(self, [input, gather_list, rank_size, dst, rank_id, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, gather_list, rank_size, dst, rank_id, group)
return res
dist_comm_gather_op=DistCommGather()
[文档]class Neg(Primitive):
r"""
.. code-block::
prim = ops.Neg()
out = prim(input)
is equivalent to
.. code-block::
ops.neg(input)
Refer to :func:`mindspore.ops.neg` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_neg(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
neg_op=Neg()
class SigmoidGrad(Primitive):
r"""
Gets the gradient of Sigmoid operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y, dy):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sigmoid_grad(self, [y, dy]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, y, dy)
return res
sigmoid_grad_op=SigmoidGrad()
class Conv1DExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default=0),
sig.make_sig('dilation', default=1),
sig.make_sig('groups', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_conv1d_ext(self, [input, weight, bias, stride, padding, dilation, groups]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, stride, padding, dilation, groups)
return res
conv1d_ext_op=Conv1DExt()
class DCT(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('type', default=2),
sig.make_sig('n', default=None),
sig.make_sig('axis', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, type=2, n=None, axis=-1, norm=None):
return super().__call__(x, type, n, axis, norm if norm is None else str_to_enum('DCT', 'norm', norm))
dct_op=DCT()
class Embedding(Primitive):
r"""
Retrieve the word embeddings in `weight` using indices specified in `input`.
.. warning::
On Ascend, the behavior is unpredictable when the value of `input` is invalid.
Args:
input (Tensor): The indices used to lookup in the `weight`. The data type must be mindspore.int32 or mindspore.int64,
and the value should be in range `[0, weight.shape[0])`.
weight (Union[Parameter, Tensor]): The matrix where to lookup from. The shape must be 2D.
padding_idx (int, optional): If the value is not None, the corresponding row of `weight` will not be updated in training.
The value should be in range `[-weight.shape[0], weight.shape[0])` if it's not ``None``. Default ``None``.
max_norm (float, optional): If not None, firstly get the p-norm result of the `weight` specified by `input` where p is specified by `norm_type`;
if the result is larger then `max_norm`, update the `weight` with :math:`\frac{max\_norm}{result+1e^{-7}}` in-place. Default ``None``.
norm_type (float, optional): Indicates the value of p in p-norm. Default ``2.0``.
scale_grad_by_freq (bool, optional): If ``True`` the gradients will be scaled by the inverse of frequency of the index in `input`. Default ``False``.
Returns:
Tensor, has the same data type as `weight`, the shape is :math:`(*input.shape, weight.shape[1])`.
Raises:
ValueError: If `padding_idx` is out of valid range.
ValueError: If the shape of `weight` is invalid.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, Parameter, ops
>>> input = Tensor([[1, 0, 1, 1], [0, 0, 1, 0]])
>>> weight = Parameter(np.random.randn(3, 3).astype(np.float32))
>>> output = ops.auto_generate.Embedding()(input, weight, max_norm=0.4)
>>> print(output)
[[[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01]],
[[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01]]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight', sig.sig_rw.RW_WRITE),
sig.make_sig('padding_idx', default=None),
sig.make_sig('max_norm', default=None),
sig.make_sig('norm_type', default=2.0),
sig.make_sig('scale_grad_by_freq', default=False),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_embedding(self, [input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq)
return res
embedding_op=Embedding()
[文档]class UnsortedSegmentSum(Primitive):
r"""
.. code-block::
prim = ops.UnsortedSegmentSum()
out = prim(input_x, segment_ids, num_segments)
is equivalent to
.. code-block::
ops.unsorted_segment_sum(input_x, segment_ids, num_segments)
Refer to :func:`mindspore.ops.unsorted_segment_sum` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, segment_ids, num_segments):
return super().__call__(input_x, segment_ids, num_segments)
unsorted_segment_sum_op=UnsortedSegmentSum()
class FlashAttentionScoreGrad(Primitive):
r"""
Calculates the gradient of FlashAttentionScore operation.
.. warning::
This is an experimental API that is subject to change or deletion.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key'),
sig.make_sig('value'),
sig.make_sig('dy'),
sig.make_sig('pse_shift', default=None),
sig.make_sig('drop_mask', default=None),
sig.make_sig('padding_mask', default=None),
sig.make_sig('atten_mask', default=None),
sig.make_sig('softmax_max', default=None),
sig.make_sig('softmax_sum', default=None),
sig.make_sig('softmax_in', default=None),
sig.make_sig('attention_in', default=None),
sig.make_sig('prefix', default=None),
sig.make_sig('actual_seq_qlen', default=None),
sig.make_sig('actual_seq_kvlen', default=None),
)
@prim_arg_register
def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=65536, next_tokens=65536, inner_precise=1, input_layout='BSH', sparse_mode=0):
self._set_prim_arg("head_num", head_num)
self._set_prim_arg("keep_prob", keep_prob)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("pre_tokens", pre_tokens)
self._set_prim_arg("next_tokens", next_tokens)
self._set_prim_arg("inner_precise", inner_precise)
self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
self._set_prim_arg("sparse_mode", sparse_mode)
def __call__(self, query, key, value, dy, pse_shift=None, drop_mask=None, padding_mask=None, atten_mask=None, softmax_max=None, softmax_sum=None, softmax_in=None, attention_in=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_flash_attention_score_grad(self, [query, key, value, dy, pse_shift, drop_mask, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, query, key, value, dy, pse_shift, drop_mask, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, prefix, actual_seq_qlen, actual_seq_kvlen, self.head_num, self.keep_prob, self.scale_value, self.pre_tokens, self.next_tokens, self.inner_precise, self.input_layout, self.sparse_mode)
return res
class MSELossExt(Primitive):
r"""
.. code-block::
prim = ops.MSELossExt()
out = prim(input, target, reduction)
is equivalent to
.. code-block::
ops.mse_loss_ext(input, target, reduction)
Refer to :func:`mindspore.ops.mse_loss_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('reduction', default='mean'),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, target, reduction='mean'):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_mse_loss_ext(self, [input, target, str_to_enum('MSELossExt', 'reduction', reduction)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, target, str_to_enum('MSELossExt', 'reduction', reduction))
return res
mse_loss_ext_op=MSELossExt()
class LogSigmoid(Primitive):
r"""
Applies logsigmoid activation element-wise. The input is a Tensor with any valid shape.
Logsigmoid is defined as:
.. math::
\text{logsigmoid}(x_{i}) = \log(\frac{1}{1 + \exp(-x_i)}),
where :math:`x_{i}` is the element of the input.
LogSigmoid Activation Function Graph:
.. image:: ../images/LogSigmoid.png
:align: center
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input of LogSigmoid with data type of bfloat16, float16 or float32.
The shape is :math:`(*)` where :math:`*` means, any number of additional dimensions.
Returns:
Tensors, with the same type and shape as the `input`.
Raises:
TypeError: If dtype of `input` is not bfloat16, float16 and float32.
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input = Tensor([1.0, 2.0, 3.0], mindspore.float32)
>>> output = ops.auto_generate.LogSigmoid()(input)[0]
>>> print(output)
[-0.31326166 -0.12692806 -0.04858734]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_logsigmoid(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
logsigmoid_op=LogSigmoid()
[文档]class OnesLike(Primitive):
r"""
Returns a Tensor with a value of 1 and its shape and data type is the same as the input.
Refer to :func:`mindspore.ops.ones_like` for more details.
Inputs:
- **input_x** (Tensor) - Tensor of any dimension.
Outputs:
Tensor, has the same shape and type as `input_x` but filled with ones.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
>>> output = ops.OnesLike()(input_x)
>>> print(output)
[[1 1]
[1 1]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
ones_like_op=OnesLike()
class SplitWithSize(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('split_size'),
sig.make_sig('dim', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, split_size, dim=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_split_with_size(self, [input, split_size, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, split_size, dim)
return res
split_with_size_op=SplitWithSize()
[文档]class Triu(Primitive):
r"""
.. code-block::
prim = ops.Triu(diagonal)
out = prim(input)
is equivalent to
.. code-block::
ops.triu(input, diagonal)
Refer to :func:`mindspore.ops.triu` for more details.
"""
@prim_arg_register
def __init__(self, diagonal=0):
self._set_prim_arg("diagonal", type_it('Triu', 'diagonal', diagonal, OpDtype.DT_TENSOR, OpDtype.DT_INT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_triu(self, [input, self.diagonal]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.diagonal)
return res
class NormalFloatFloat(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, mean, std, size, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_normal_float_float(self, [mean, std, size, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, mean, std, size, seed, offset)
return res
normal_float_float_op=NormalFloatFloat()
class RotatedIou(Primitive):
r"""
Calculate the overlap area between rotated rectangles.
.. warning::
This is an experimental API that is subject to change or deletion.
.. note::
The input data types supported by the Ascend platform include
bfloat16, float16, float32.
Args:
trans (bool): Distinguish the rectangles representations
of boxes and query_boxes. If ``True``, the format of boxes
and query_boxes is ``'xyxyt'``, else the format is ``'xywht'``.
The default value is ``False``.
mode (int): Distinguish the calculation mode. If the value
is ``1``, the calculation mode is ``'iof'``, else the
calculation mode is ``'iou'``. The default value is ``0``.
is_cross (bool): If ``True``, use cross-calculation, else use
one-to-one calculation. The default value is ``True``.
v_threshold (float): Provide condition relaxation for
intersection calculation. The default value is ``0.0``.
e_threshold (float): Provide condition relaxation for
intersection calculation. The default value is ``0.0``.
Inputs:
boxes (Tensor): The first set of rectangles which has a
shape of :math:`(B, N, 5)`.
query_boxes (Tensor): The second set of rectangles which
has a shape of :math:`(B, K, 5)`.
Outputs:
Tensor, the shape is :math:`(B, N, K)`.
Raises:
TypeError: If `boxes` is not a Tensor.
TypeError: If `query_boxes` is not a Tensor.
ValueError: If `boxes` and `query_boxes` do not has same first dim.
ValueError: If the third dimension of `boxes` or `query_boxes` is not ``5``.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> a = np.random.uniform(0,1,(2,2,5)).astype(np.float16)
>>> b = np.random.uniform(0,1,(2,3,5)).astype(np.float16)
>>> box1 = Tensor(a)
>>> box2 = Tensor(b)
>>> output = ops.rotated_iou(box1, box2, trans=False, mode=0, is_cross=True)
"""
__mindspore_signature__ = (
sig.make_sig('boxes'),
sig.make_sig('query_boxes'),
sig.make_sig('trans', default=False),
sig.make_sig('mode', default=0),
sig.make_sig('is_cross', default=True),
sig.make_sig('v_threshold', default=0.0),
sig.make_sig('e_threshold', default=0.0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, boxes, query_boxes, trans=False, mode=0, is_cross=True, v_threshold=0.0, e_threshold=0.0):
return super().__call__(boxes, query_boxes, trans, mode, is_cross, v_threshold, e_threshold)
rotated_iou_op=RotatedIou()
class FFTN(Primitive):
r"""
.. code-block::
prim = ops.FFTN()
out = prim(input, s, dim, norm)
is equivalent to
.. code-block::
ops.fftn(input, s, dim, norm)
Refer to :func:`mindspore.ops.fftn` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('s', default=None),
sig.make_sig('dim', default=None),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, s=None, dim=None, norm=None):
return super().__call__(input, s, dim, norm if norm is None else str_to_enum('FFTN', 'norm', norm))
fftn_op=FFTN()
[文档]class ResizeBicubic(Primitive):
r"""
Resize images to size using bicubic interpolation.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
align_corners (bool, optional): If ``True`` , the centers of the 4 corner pixels of the input
and output tensors are aligned, preserving the values at the corner pixels. Default: ``False`` .
half_pixel_centers (bool, optional): Whether to use half-pixel center alignment. If set to ``True`` ,
`align_corners` should be ``False`` . Default: ``False`` .
Inputs:
- **images** (Tensor) - The input image must be a 4-D tensor of shape :math:`(batch, channels, height, width)`.
The format must be NCHW. Types allowed: float16, float32, float64.
- **size** (Union[tuple[int], Tensor[int]]) - A 1-D tensor or tuple with 2 elements: new_height, new_width. Besides, tuple[int] is recommended.
Outputs:
A 4-D tensor with shape :math:`(batch, channels, new\_height, new\_width)` whose dtype is the same as `images` .
Raises:
TypeError: If the type of `images` is not allowed.
TypeError: If the type of `align_corners` is not bool.
TypeError: If the type of `half_pixel_centers` is not bool.
ValueError: If the dim of `images` is not 4.
ValueError: If the dim of `size` is not 1 when `size` is a tensor.
ValueError: If the number of elements in `size` is not 2.
ValueError: If any value of `size` is not positive.
ValueError: If the values of `align_corners` and `half_pixel_centers` are both ``True`` .
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops, nn
>>> class NetResizeBicubic(nn.Cell):
... def __init__(self):
... super(NetResizeBicubic, self).__init__()
... align_corners = False
... half_pixel_centers = False
... self.resize = ops.ResizeBicubic(align_corners, half_pixel_centers)
...
... def construct(self, images, size):
... return self.resize(images, size)
...
>>> images = Tensor(np.array([1, 2, 3, 4]).reshape(1, 1, 2, 2).astype(np.float32))
>>> size = Tensor([1, 4], mindspore.int32)
>>> resizebicubic = NetResizeBicubic()
>>> output = resizebicubic(images, size)
>>> print(output)
[[[[1. 1.5 2. 2.09375]]]]
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, image, size):
return super().__call__(image, size, self.align_corners, self.half_pixel_centers)
class MultiScaleDeformableAttn(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, value, shape, offset, locations, weight):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_multi_scale_deformable_attn(self, [value, shape, offset, locations, weight]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, value, shape, offset, locations, weight)
return res
multi_scale_deformable_attn_op=MultiScaleDeformableAttn()
[文档]class Cholesky(Primitive):
r"""
Performs the Cholesky decomposition on a single or a batch of symmetric positive-definite matrices.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.cholesky` for more details.
Args:
upper (bool, optional): Flag that indicates whether to return a upper or lower triangular matrix.
Default: ``False`` .
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(*, N, N)`, where :math:`*` is zero or more batch dimensions
consisting of symmetric positive-definite matrices, with float32 or float64 data type.
Outputs:
Tensor, has the same shape and data type as `input_x`.
Supported Platforms:
``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[1.0, 1.0], [1.0, 2.0]]), mindspore.float32)
>>> output = ops.Cholesky()(input_x)
>>> print(output)
[[1. 0.]
[1. 1.]]
"""
@prim_arg_register
def __init__(self, upper=False):
self._set_prim_arg("upper", upper)
def __call__(self, input_x):
return super().__call__(input_x, self.upper)
class MishGradExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_mish_grad_ext(self, [dout, x]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, x)
return res
mish_grad_ext_op=MishGradExt()
class ReflectionPad1D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reflection_pad_1d(self, [input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, padding)
return res
reflection_pad_1d_op=ReflectionPad1D()
class MatrixDeterminant(Primitive):
r"""
Calculates the value of the determinant for one or more square matrices.
Refer to :func:`mindspore.ops.det` for more details.
Inputs:
- **x** (Tensor) - A matrix to be calculated. The matrix must be at least two dimensions, and the last two
dimensions must be the same size.
Outputs:
Tensor, the shape is `x_shape[:-2]`, the dtype is same as `x`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
>>> op = ops.MatrixDeterminant()
>>> output = op(input_x)
>>> print(output)
[-16.5 21. ]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
matrix_determinant_op=MatrixDeterminant()
class AllGatherMatmul(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('x2'),
sig.make_sig('group'),
sig.make_sig('world_size'),
sig.make_sig('bias', default=None),
sig.make_sig('gather_index', default=0),
sig.make_sig('gather_output', default=True),
sig.make_sig('comm_turn', default=0),
sig.make_sig('trans_input', default=False),
sig.make_sig('trans_x2', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, x2, group, world_size, bias=None, gather_index=0, gather_output=True, comm_turn=0, trans_input=False, trans_x2=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_all_gather_matmul(self, [input, x2, group, world_size, bias, gather_index, gather_output, comm_turn, trans_input, trans_x2]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, x2, group, world_size, bias, gather_index, gather_output, comm_turn, trans_input, trans_x2)
return res
all_gather_matmul_op=AllGatherMatmul()
class SubScalar(Primitive):
r"""
.. code-block::
prim = ops.SubScalar()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.sub_scalar(input, other, alpha)
Refer to :func:`mindspore.ops.sub_scalar` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('other'),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sub_scalar(self, [input, other, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, alpha)
return res
sub_scalar_op=SubScalar()
class AdaptiveAvgPool1D(Primitive):
r"""
.. code-block::
prim = ops.AdaptiveAvgPool1D()
out = prim(input, output_size)
is equivalent to
.. code-block::
ops.adaptive_avg_pool1d(input, output_size)
Refer to :func:`mindspore.ops.adaptive_avg_pool1d` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, output_size):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_adaptive_avg_pool1d(self, [input, output_size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, output_size)
return res
adaptive_avg_pool1d_op=AdaptiveAvgPool1D()
class EqualExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_equal_ext(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
equal_ext_op=EqualExt()
class ConvolutionStr(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default='valid'),
sig.make_sig('dilation', default=1),
sig.make_sig('transposed', default=False),
sig.make_sig('output_padding', default=0),
sig.make_sig('groups', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None, stride=1, padding='valid', dilation=1, transposed=False, output_padding=0, groups=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_convolution_str(self, [input, weight, bias, stride, str_to_enum('ConvolutionStr', 'padding', padding), dilation, transposed, output_padding, groups]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, stride, str_to_enum('ConvolutionStr', 'padding', padding), dilation, transposed, output_padding, groups)
return res
convolution_str_op=ConvolutionStr()
class LogSoftmaxExt(Primitive):
r"""
.. code-block::
prim = ops.LogSoftmaxExt()
out = prim(input, dim, dtype)
is equivalent to
.. code-block::
ops.log_softmax_ext(input, dim, dtype)
Refer to :func:`mindspore.ops.log_softmax_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_log_softmax_ext(self, [input, dim, dtype if dtype is None else dtype_to_type_id('LogSoftmaxExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, dtype if dtype is None else dtype_to_type_id('LogSoftmaxExt', 'dtype', dtype))
return res
log_softmax_ext_op=LogSoftmaxExt()
[文档]class Atan(Primitive):
r"""
.. code-block::
prim = ops.Atan()
out = prim(input)
is equivalent to
.. code-block::
ops.atan(input)
Refer to :func:`mindspore.ops.atan` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
atan_op=Atan()
class InplaceFloorDivides(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_floor_divides(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
inplace_floor_divides_op=InplaceFloorDivides()
class InnerCommIsend(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dst, group, tag):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_comm_isend(self, [input, dst, group, tag]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dst, group, tag)
return res
inner_comm_isend_op=InnerCommIsend()
class InplaceFillDiagonal(Primitive):
r"""
.. code-block::
prim = ops.InplaceFillDiagonal()
out = prim(input, fill_value, wrap)
is equivalent to
.. code-block::
ops.fill_diagonal_(input, fill_value, wrap)
Refer to :func:`mindspore.ops.fill_diagonal_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('fill_value'),
sig.make_sig('wrap', default=False),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, fill_value, wrap=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_fill_diagonal(self, [input, fill_value, wrap]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, fill_value, wrap)
return res
inplace_fill_diagonal_op=InplaceFillDiagonal()
class LogMatrixDeterminant(Primitive):
r"""
Computes the sign and the log of the absolute value of the determinant of one or more square matrices.
Note:
The type of output always be real-value, even `input` is complex.
Args:
input (Tensor): A matrix to be calculated, its shape is :math:`(..., M, M)`.
The matrix must be at least two dimensions, and the last two
dimensions must be the same size. Data type must be float32, float64, complex64 or complex128.
Returns:
Tensor. The signs of the log determinants. The shape is :math:`input.shape[:-2]`.
Tensor. The absolute values of the log determinants. The shape is :math:`input.shape[:-2]`.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If dtype of `input` not float32, float64, complex64 or complex128.
ValueError: If the last two dimensions of `input` is not same size.
ValueError: If the dimension of `input` is less than 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
>>> sign, output = ops.LogMatrixDeterminant()(input_x)
>>> print(sign)
[-1. 1.]
>>> print(output)
[2.80336046e+00 3.04452229e+00]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
log_matrix_determinant_op=LogMatrixDeterminant()
class LeakyReLUExt(Primitive):
r"""
.. code-block::
prim = ops.LeakyReLUExt()
out = prim(input, negative_slope)
is equivalent to
.. code-block::
ops.leaky_relu_ext(input, negative_slope)
Refer to :func:`mindspore.ops.leaky_relu_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('negative_slope', default=0.01),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, negative_slope=0.01):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_leaky_relu_ext(self, [input, negative_slope]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, negative_slope)
return res
leaky_relu_ext_op=LeakyReLUExt()
[文档]class HShrink(Primitive):
r"""
.. code-block::
prim = ops.HShrink(lambd)
out = prim(input)
is equivalent to
.. code-block::
ops.hardshrink(input, lambd)
Refer to :func:`mindspore.ops.hardshrink` for more details.
"""
@prim_arg_register
def __init__(self, lambd=0.5):
self._set_prim_arg("lambd", type_it('HShrink', 'lambd', lambd, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_hshrink(self, [input, self.lambd]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.lambd)
return res
class BoolNot(Primitive):
r"""
Returns bool_not `not` of bool input.
.. note::
The inputs can be constant/variable value. Usage is the same as 'not' in Python.
This primitive only have 'CPU' implementation, for other platform, it runs using heterogeneous.
Inputs:
- **x** (Scalar) - A constant or variable scalar, the type can be bool.
Outputs:
Scalar, the type is bool.
Raises:
TypeError: If `x` are not bool scalar.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
bool_not_op=BoolNot()
class SwigluGrad(Primitive):
r"""
.. code-block::
prim = ops.SwigluGrad()
out = prim(grad_output, input, dim)
is equivalent to
.. code-block::
ops.swiglu_grad(grad_output, input, dim)
Refer to :func:`mindspore.ops.swiglu_grad` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('grad_output'),
sig.make_sig('input'),
sig.make_sig('dim', default=-1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, dim=-1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_swiglu_grad(self, [grad_output, input, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, dim)
return res
swiglu_grad_op=SwigluGrad()
class DiagExt(Primitive):
r"""
.. code-block::
prim = ops.DiagExt()
out = prim(input, diagonal)
is equivalent to
.. code-block::
ops.diag_ext(input, diagonal)
Refer to :func:`mindspore.ops.diag_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('diagonal', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, diagonal=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_diag_ext(self, [input, diagonal]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, diagonal)
return res
diag_ext_op=DiagExt()
[文档]class Argmin(Primitive):
r"""
Returns the indices of the minimum value along a specified `axis` of a Tensor.
If the shape of input tensor is :math:`(x_1, ..., x_N)`, the shape of the output tensor is
:math:`(x_1, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
Args:
axis (int): Axis where the Argmin operation applies to. Default: ``-1`` .
output_type (:class:`mindspore.dtype`): Output data type.
Supported types: ``mstype.int32`` , ``mstype.int64`` . Default: ``mstype.int32`` .
Inputs:
- **input_x** (Tensor) - Input tensor.
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
Outputs:
Tensor, which is the minimum index in the specified axis of input Tensor.
Raises:
TypeError: If `axis` is not an int.
TypeError: If `output_type` is neither int32 nor int64.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
>>> index = ops.Argmin()(input_x)
>>> print(index)
2
"""
@prim_arg_register
def __init__(self, axis=-1, output_type=mstype.int32):
self._set_prim_arg("axis", axis)
self._set_prim_arg_with_handler("output_type", output_type, dtype_to_type_id)
def __call__(self, x):
return super().__call__(x, self.axis, self.output_type)
class InnerCommAllToAllV(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, group, send_numel_list, recv_numel_list, rank_size, split_sizes_empty):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_comm_all_to_all_v(self, [input, group, send_numel_list, recv_numel_list, rank_size, split_sizes_empty]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, group, send_numel_list, recv_numel_list, rank_size, split_sizes_empty)
return res
inner_comm_all_to_all_v_op=InnerCommAllToAllV()
class LogSigmoidGrad(Primitive):
r"""
.. code-block::
prim = ops.LogSigmoidGrad()
out = prim(dy, input, buffer)
is equivalent to
.. code-block::
ops.logsigmoid_grad(dy, input, buffer)
Refer to :func:`mindspore.ops.logsigmoid_grad` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input, buffer):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_logsigmoid_grad(self, [dy, input, buffer]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, input, buffer)
return res
logsigmoid_grad_op=LogSigmoidGrad()
[文档]class Expm1(Primitive):
r"""
.. code-block::
prim = ops.Expm1()
out = prim(input)
is equivalent to
.. code-block::
ops.expm1(input)
Refer to :func:`mindspore.ops.expm1` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_expm1(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
expm1_op=Expm1()
class Swiglu(Primitive):
r"""
.. code-block::
prim = ops.Swiglu()
out = prim(input, dim)
is equivalent to
.. code-block::
ops.swiglu(input, dim)
Refer to :func:`mindspore.ops.swiglu` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=-1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=-1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_swiglu(self, [input, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim)
return res
swiglu_op=Swiglu()
[文档]class Round(Primitive):
r"""
Returns half to even of a tensor element-wise.
.. math::
out_i \approx input_i
.. note::
The input data types supported by the Ascend platform include
bfloat16 (Atlas training series products are not supported), float16, float32, float64, int32, and int64.
Inputs:
- **input** (Tensor) - The input tensor.
- **decimals** (int, optional) - Number of decimal places to round to (default: 0). If decimals is
negative, it specifies the number of positions to the left of the decimal point. It supports
converting the single-element tensor to an int. When `input` type is int32 or int64, the `decimals`
should be 0.
Outputs:
Tensor, has the same shape and type as the `input`.
Raises:
TypeError: If `input` is not a Tensor.
RuntimeError: If `input` type is int32 or int64, the `decimals` is not 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
>>> round = ops.Round()
>>> output = round(input)
>>> print(output)
[ 1. 2. 2. 2. -4.]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('decimals', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, decimals=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_round(self, [input, decimals]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, decimals)
return res
round_op=Round()
class ReflectionPad2DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reflection_pad_2d_grad(self, [grad_output, input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, padding)
return res
reflection_pad_2d_grad_op=ReflectionPad2DGrad()
[文档]class Erfinv(Primitive):
r"""
.. code-block::
prim = ops.Erfinv()
out = prim(input)
is equivalent to
.. code-block::
ops.erfinv(input)
Refer to :func:`mindspore.ops.erfinv` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_erfinv(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
erfinv_op=Erfinv()
[文档]class Tanh(Primitive):
r"""
.. code-block::
prim = ops.Tanh()
out = prim(input)
is equivalent to
.. code-block::
ops.tanh(input)
Refer to :func:`mindspore.ops.tanh` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_tanh(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
tanh_op=Tanh()
class Addmv(Primitive):
r"""
Performs a matrix-vector product of `mat` and `vec`, and add the input vector `input` to the final result.
If `mat` is a tensor of size :math:`(N, M)` , `vec` is a 1-D tensor of size :math:`M` , then `input` must be
broadcastable with a 1-D tensor of size :math:`N` . In this case, `output` is a 1-D Tensor of size :math:`N` .
.. math::
output = \\beta input + \\alpha (mat @ vec)
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): Vector to be added.
mat (Tensor): The first tensor needs to be multiplied.
vec (Tensor): The second tensor needs to be multiplied.
Keyword Args:
beta (Union[float, int], optional): Coefficient of `input`. Default: ``1``.
alpha (Union[float, int], optional): Coefficient of :math:`mat @ vec` . Default: ``1``.
Returns:
Tensor, with a shape of :math:`(N,)` , and its dtype is the same as `input`.
Raises:
TypeError: If dtype of `input`, `mat` or `vec` is not tensor.
TypeError: If dtypes of `mat` and `vec` are not the same.
ValueError: If `mat` is not a 2-D tensor.
ValueError: If `vec` is not a 1-D tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, mint
>>> input = Tensor(np.array([2., 3.]).astype(np.float32))
>>> mat = Tensor(np.array([[2., 5., 3.], [4., 2., 2.]]).astype(np.float32))
>>> vec = Tensor(np.array([3., 2., 4.]).astype(np.float32))
>>> output = mint.addmv(input, mat, vec)
>>> print(output)
[30. 27.]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('mat'),
sig.make_sig('vec'),
sig.make_sig('beta', default=1),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mat, vec, beta=1, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_addmv(self, [input, mat, vec, beta, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mat, vec, beta, alpha)
return res
addmv_op=Addmv()
class LogSumExp(Primitive):
r"""
.. code-block::
prim = ops.LogSumExp()
out = prim(input, dim, keepdim)
is equivalent to
.. code-block::
ops.logsumexp_ext(input, dim, keepdim)
Refer to :func:`mindspore.ops.logsumexp_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim'),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_logsumexp(self, [input, dim, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim)
return res
logsumexp_op=LogSumExp()
class ScalarCast(Primitive):
r"""
.. code-block::
prim = ops.ScalarCast()
out = prim(input_x, input_y)
is equivalent to
.. code-block::
ops.scalar_cast(input_x, input_y)
Refer to :func:`mindspore.ops.scalar_cast` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, input_y):
return super().__call__(input_x, dtype_to_type_id('ScalarCast', 'input_y', input_y))
scalar_cast_op=ScalarCast()
class FFTOrtho(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('axes', default=None),
sig.make_sig('forward', default=True),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, axes=None, forward=True):
return super().__call__(input, axes, forward)
fft_ortho_op=FFTOrtho()
[文档]class CeLU(Primitive):
r"""
.. code-block::
prim = ops.CeLU(alpha)
out = prim(x)
is equivalent to
.. code-block::
ops.celu(x, alpha)
Refer to :func:`mindspore.ops.celu` for more details.
"""
@prim_arg_register
def __init__(self, alpha=1.0):
self._set_prim_arg("alpha", alpha)
def __call__(self, x):
return super().__call__(x, self.alpha)
class Unique2(Primitive):
r"""
Returns the unique elements of input tensor.
when `return_inverse=True`, also return a tensor containing the index of each value of input
tensor corresponding to the output unique tensor.
when `return_counts=True`, also return a tensor containing the number of occurrences for each
unique value or tensor
Inputs:
- **input**(Tensor) - The input tensor.
- **sorted**(bool) - Whether to sort the unique elements in ascending order before returning as output.
Default: ``True`` .
- **return_inverse**(bool) - Whether to also return the indices for where elements in the original input ended up in
the returned unique list. Default: ``False`` .
- **return_counts**(bool) - Whether to also return the counts for each unique element. Default: ``False`` .
Returns:
A tensor or a tuple of tensors containing some of tensor objects (`output`, `inverse_indices`, `counts`).
- **output**(Tensor) - the output list of unique scalar elements.
- **inverse_indices**(Tensor) - Return when ``return_inverse`` is True. It represents the indices for where
elements in the original input map to in the output; The shape is input.shape[dim].
- **counts**(Tensor) - Return when ``return_counts`` is True. It represents the number of occurrences for each
unique value or tensor. The shape is output.shape(dim).
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, nn
>>> from mindspore import ops
>>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
>>> unique = ops.auto_generate.Unique2()
>>> output = unique(x, return_inverse=True)
>>> print(output)
(Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
>>> y = output[0]
>>> print(y)
[1 2 5]
>>> idx = output[1]
>>> print(idx)
[0 1 2 1]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('sorted', default=True),
sig.make_sig('return_inverse', default=False),
sig.make_sig('return_counts', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, sorted=True, return_inverse=False, return_counts=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_unique2(self, [input, sorted, return_inverse, return_counts]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, sorted, return_inverse, return_counts)
return res
unique2_op=Unique2()
class GenerateEodMaskV2(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('ele_pos'),
sig.make_sig('cur_step'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('start', default=0),
sig.make_sig('steps', default=1),
sig.make_sig('error_mode', default='cycle'),
sig.make_sig('flip_mode', default='bitflip'),
sig.make_sig('multiply_factor', default=0.0),
sig.make_sig('bit_pos', default=0),
sig.make_sig('flip_probability', default=0.0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, ele_pos, cur_step, seed, offset, start=0, steps=1, error_mode='cycle', flip_mode='bitflip', multiply_factor=0.0, bit_pos=0, flip_probability=0.0):
return super().__call__(input, ele_pos, cur_step, seed, offset, start, steps, str_to_enum('GenerateEodMaskV2', 'error_mode', error_mode), str_to_enum('GenerateEodMaskV2', 'flip_mode', flip_mode), multiply_factor, bit_pos, flip_probability)
generate_eod_mask_v2_op=GenerateEodMaskV2()
class Log2(Primitive):
r"""
.. code-block::
prim = ops.Log2()
out = prim(input)
is equivalent to
.. code-block::
ops.log2_ext(input)
Refer to :func:`mindspore.ops.log2_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_log2(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
log2_op=Log2()
class EmbeddingApplyAdaGrad(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingApplyAdaGrad()
out = prim(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
is equivalent to
.. code-block::
ops.embedding_apply_ada_grad(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
Refer to :func:`mindspore.ops.embedding_apply_ada_grad` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('var_handle'),
sig.make_sig('lr'),
sig.make_sig('grad'),
sig.make_sig('keys'),
sig.make_sig('global_step'),
sig.make_sig('embedding_dim'),
sig.make_sig('mask_zero', default=(0,)),
sig.make_sig('padding_key', default=(0,)),
sig.make_sig('padding_key_mask', default=(1,)),
sig.make_sig('completion_key', default=(0,)),
sig.make_sig('completion_key_mask', default=(1,)),
sig.make_sig('_embedding_dim', default=1),
sig.make_sig('_max_key_num', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("_process_node_engine_id", 'PS')
def __call__(self, var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
return super().__call__(var_handle, lr, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
embedding_apply_ada_grad_op=EmbeddingApplyAdaGrad()
[文档]class Squeeze(Primitive):
r"""
Return the Tensor after deleting the dimension of size 1 in the specified `axis`.
Refer to :func:`mindspore.ops.squeeze` for more details.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
axis (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will remove
all the dimensions of size 1 in the given axis parameter. If specified, it must be int32 or int64.
Default: ``()`` .
Inputs:
- **input** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Outputs:
Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> squeeze = ops.Squeeze(2)
>>> output = squeeze(input)
>>> print(output)
[[1. 1.]
[1. 1.]
[1. 1.]]
"""
@prim_arg_register
def __init__(self, axis=None):
self._set_prim_arg("axis", type_it('Squeeze', 'axis', axis, (OpDtype.DT_INT, OpDtype.DT_LIST_INT), OpDtype.DT_TUPLE_INT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_squeeze(self, [input, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.axis)
return res
class BinaryCrossEntropyGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('grad_output'),
sig.make_sig('weight', default=None),
)
@prim_arg_register
def __init__(self, reduction='mean'):
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
def __call__(self, input, target, grad_output, weight=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_binary_cross_entropy_grad(self, [input, target, grad_output, weight, self.reduction]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, target, grad_output, weight, self.reduction)
return res
[文档]class Real(Primitive):
r"""
.. code-block::
prim = ops.Real()
out = prim(input)
is equivalent to
.. code-block::
ops.real(input)
Refer to :func:`mindspore.ops.real` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
real_op=Real()
class UnstackExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_unstack_ext(self, [input, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim)
return res
unstack_ext_op=UnstackExt()
[文档]class LessEqual(Primitive):
r"""
.. code-block::
prim = ops.LessEqual()
out = prim(input, other)
is equivalent to
.. code-block::
ops.less_equal(input, other)
Refer to :func:`mindspore.ops.less_equal` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_less_equal(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
less_equal_op=LessEqual()
class DistCommAllToAllVSingle(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, other, input, group, send_numel_list, recv_numel_list, rank_size, split_sizes_empty):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_all_to_all_v_single(self, [other, input, group, send_numel_list, recv_numel_list, rank_size, split_sizes_empty]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, other, input, group, send_numel_list, recv_numel_list, rank_size, split_sizes_empty)
return res
dist_comm_all_to_all_v_single_op=DistCommAllToAllVSingle()
class DistCommBatchIsendIrecv(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, group, op_types, remotes_ranks):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_batch_isend_irecv(self, [input, group, op_types, remotes_ranks]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, group, op_types, remotes_ranks)
return res
dist_comm_batch_isend_irecv_op=DistCommBatchIsendIrecv()
[文档]class GatherD(Primitive):
r"""
.. code-block::
prim = ops.GatherD()
out = prim(x, dim, index)
is equivalent to
.. code-block::
ops.gather_d(x, dim, index)
Refer to :func:`mindspore.ops.gather_d` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dim, index):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_gather_d(self, [x, dim, index]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, dim, index)
return res
gather_d_op=GatherD()
[文档]class Div(Primitive):
r"""
Computes the quotient of dividing the first input tensor by the second input tensor element-wise.
Refer to :func:`mindspore.ops.div` for more details.
Note:
- One of the two inputs must be a Tensor, when the two inputs have different shapes,
they must be able to broadcast to a common shape.
- The two inputs can not be bool type at the same time,
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
- The two inputs comply with the implicit type conversion rules to make the data types
consistent.
Inputs:
- **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
- **y** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
a bool or a tensor whose data type is
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
Outputs:
Tensor, the shape is the same as the one of the input `x` , `y` after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> # case 1 :has same data type and shape of the two inputs
>>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
>>> y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
>>> div = ops.Div()
>>> output = div(x, y)
>>> print(output)
[-1.3333334 2.5 2. ]
>>> # case 2 : different data type and shape of the two inputs
>>> x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
>>> y = Tensor(2, mindspore.int32)
>>> output = div(x, y)
>>> print(output)
[-2. 2.5 3.]
>>> print(output.dtype)
Float32
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_div(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
div_op=Div()
class FFTFreq(Primitive):
r"""
.. code-block::
prim = ops.FFTFreq()
out = prim(n, d, dtype)
is equivalent to
.. code-block::
ops.fftfreq(n, d, dtype)
Refer to :func:`mindspore.ops.fftfreq` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('n'),
sig.make_sig('d', default=1.0),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, n, d=1.0, dtype=None):
return super().__call__(n, d, dtype if dtype is None else dtype_to_type_id('FFTFreq', 'dtype', dtype))
fftfreq_op=FFTFreq()
class AddScalar(Primitive):
r"""
.. code-block::
prim = ops.AddScalar()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.add_scalar(input, other, alpha)
Refer to :func:`mindspore.ops.add_scalar` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('other'),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_add_scalar(self, [input, other, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, alpha)
return res
add_scalar_op=AddScalar()
class SpeedFusionAttention(Primitive):
r"""
The interface is used for self-attention fusion computing.
If `pse_type` is ``1`` , calculation formula is:
.. math::
attention\_out = Dropout(Softmax(Mask(scale * (pse + query * key^{T}), atten\_mask)), keep\_prob) * value
If `pse_type` is other valid value, calculation formula is:
.. math::
attention\_out = Dropout(Softmax(Mask(scale * (query * key^{T}) + pse, atten\_mask)), keep\_prob) * value
- B: Batch size. Value range 1 to 2k.
- S1: Sequence length of query. Value range 1 to 512k.
- S2: Sequence length of key and value. Value range 1 to 512k.
- N1: Num heads of query. Value range 1 to 256.
- N2: Num heads of key and value, and N2 must be a factor of N1.
- D: Head size. The value ranges is a multiple of 16, with the max value of 512.
- H1: Hidden size of query, which equals to N1 * D.
- H2: Hidden size of key and value, which equals to N2 * D.
.. warning::
- This is an experimental API that is subject to change or deletion.
- Only support on Atlas A2 training series.
Note:
This interface is not supported in `graph mode (mode=mindspore.GRAPH_MODE)
<https://www.mindspore.cn/docs/en/master/model_train/program_form/static_graph.html>`_.
Args:
query (Tensor): The query tensor. Input tensor of shape :math:`(B, S1, H1)`,
:math:`(B, N1, S1, D)`, :math:`(S1, B, H1)`, :math:`(B, S1, N1, D)` or :math:`(T1, N1, D)`.
key (Tensor): The key tensor. Input tensor of shape :math:`(B, S2, H2)`,
:math:`(B, N2, S2, D)`, :math:`(S2, B, H2)`, :math:`(B, S2, N2, D)` or :math:`(T2, N2, D)`.
value (Tensor): The value tensor. Input tensor of shape :math:`(B, S2, H2)`,
:math:`(B, N2, S2, D)`, :math:`(S2, B, H2)`, :math:`(B, S2, N2, D)` or :math:`(T2, N2, D)`.
The `key` and `value` should have the same shape.
head_num (int): The head num of query, equal to N1.
input_layout (str): Specifies the layout of input `query`, `key` and `value`. The value can be ``"BSH"`` ,
``"BNSD"`` , ``"SBH"`` , ``"BSND"`` or ``"TND"`` . ``"TND"`` is an experimental format.
When `input_layout` is ``"TND"`` , the following restrictions must be met.
There are two lists that represent the length of the input sequence: list_seq_q and list_seq_k. Each
value in the list indicates the length of the sequence in the batch. For example, list_seq_q = [4, 2, 6],
list_seq_k = [10, 3, 9]. The element of list indicate S. T1 is sum(list_seq_q) = 12, T2 is
sum(list_seq_k) = 22.
max_seqlen_q = max(list_seq_q), max_seqlen_k = max(list_seq_k).
qk_pointer = sum(list_seq_q * list_seq_k), which is the sum of the element multiplication.
- The lengths of two lists are the same, and size of list is batch. batch is less than or equal to 1024.
- When `input_layout` is ``"TND"`` , `actual_seq_qlen` and `actual_seq_kvlen` must be not ``None`` .
Otherwise, they are ``None`` .
- The `actual_seq_qlen` and `actual_seq_kvlen` are the cumulative sum of sequence of key/value, so they must
be non-decreasing.
- If `pse` is not ``None`` , list_seq_q and list_seq_k must be same. The maximum value of list_seq_q and
list_seq_k is greater than 1024. `pse` should be :math:`(B, N1, 1024, S2)` and
:math:`(1, N1, 1024, S2)`, and S2 is equal to max_seqlen_k.
- `atten_mask` must be a lower trianglar matrix, so `sparse_mode` should be 2 or 3. The shape of
`atten_mask` should be :math:`(2048, 2048)`.
- Prefix is ``None`` .
- `next_tokens` is 0, and `pre_tokens` is not less than max_seqlen_q.
- When `sparse_mode` is 3, S1 of each batch should be less than or equal to S2.
- 0 should not exist in list_seq_k.
Keyword Args:
pse (Tensor, optional): The position embedding code, dtype is same as `query`. Default: ``None`` .
If S is greater than 1024 and the mask of the lower triangle is used, enter only the inverse 1024 lines of
the lower triangle for memory optimization. Input tensor of shape :math:`(B, N1, S1, S2)`,
:math:`(1, N1, S1, S2)`, :math:`(B, N1, 1024, S2)`, :math:`(1, N1, 1024, S2)`.
- ALiBi scenario: `pse` must meet the ALiBi rule, and `sparse_mode` is 2 or 3 for the lower triangle.
In this scenario, `pse` is :math:`(B, N1, 1024, S2)`, :math:`(1, N1, 1024, S2)`.
- Non-ALiBi scenario: `pse` is :math:`(B, N1, S1, S2)`, :math:`(1, N1, S1, S2)`.
- The shape of `pse` should be :math:`(B, N1, 1024, S2)` and :math:`(1, N1, 1024, S2)` when `input_layout`
is ``"TND"`` .
- If `pse_type` is 2 or 3, dtype of `pse` must be float32, and shape of `pse` should be :math:`(B, N1)` or
:math:`(N1,)`.
padding_mask (Tensor, optional): Reserved parameter. Not implemented yet. Default: ``None`` .
atten_mask (Tensor, optional): The attention mask tensor. For each element, 0/False indicates retention and 1/True
indicates discard. Input tensor of shape :math:`(B, N1, S1, S2)`, :math:`(B, 1, S1, S2)`, :math:`(S1, S2)`
or :math:`(2048, 2048)`. Default: ``None`` .
- In compression scenario, `sparse_mode` is 2, 3, or 4, `atten_mask` must be :math:`(2048, 2048)`.
- When `sparse_mode` is 5, `atten_mask` must be :math:`(B, N1, S1, S2)`, :math:`(B, 1, S1, S2)`.
- When `sparse_mode` is 0 and 1, `atten_mask` should be :math:`(B, N1, S1, S2)`, :math:`(B, 1, S1, S2)`,
:math:`(S1, S2)`.
scale (float, optional): The scale factor of score. Generally, the value is 1.0 / (D ** 0.5). Default: ``1.0`` .
keep_prob (float, optional): The keep probability of dropout. Value range is (0.0, 1.0]. Default: ``1.0`` .
pre_tokens (int, optional): Parameter for sparse computation, represents how many tokens are counted forward.
When `sparse_mode` is set to 1, 2, 3, or 5, this parameter does not take effect. Default: ``2147483647`` .
next_tokens (int, optional): Parameter for sparse computation, represents how many tokens are counted backward.
When `sparse_mode` is set to 1, 2, 3, or 5, this parameter does not take effect. Default: ``2147483647`` .
The value of pre_tokens corresponds to S1, and the value of next_tokens corresponds to S2. They define the
valid area on the `atten_mask` matrix. It must ensure that the band is not empty.
The following values are not allowed:
- pre_tokens < 0 and next_tokens < 0.
- (pre_tokens < 0 and next_tokens >= 0) and (next_tokens < abs(pre_tokens) or abs(pre_tokens) >= S2).
- (pre_tokens >= 0 and next_tokens < 0) and (abs(next_tokens) > pre_tokens or abs(next_tokens) >= S1).
inner_precise (int, optional): The parameter is reserved and not implemented yet. Default: ``0`` .
prefix (Union[tuple[int]], list[int], optional): N value of each Batch in the prefix sparse calculation
scenario. Input tensor of shape :math:`(B,)`. B max value 32. Not none only when sparse_mode is 5.
If S1 > S2, N ranges from 0 to S2. If S1 <= S2, N ranges from S2 - S1 to S2. Default: ``None`` .
actual_seq_qlen (Union[tuple[int]], list[int], optional): Size of query corresponding to each batch, array
with increasing values and the last value equal to T1. Default: ``None`` .
actual_seq_kvlen (Union[tuple[int]], list[int], optional): Size of key and value corresponding to each batch,
array with increasing values and the last value equal to T2. Default: ``None`` .
sparse_mode (int, optional): Indicates sparse mode. Default ``0`` .
- 0: Indicates the defaultMask mode. If `atten_mask` is not passed, the mask operation is not performed,
and preTokens and nextTokens(internally assigned as INT_MAX) are ignored. If passed in, the full
`atten_mask` matrix (S1 * S2) needs to be passed in, indicating that the part between preTokens and
nextTokens needs to be calculated.
- 1: Represents allMask, that is, passing in the complete `atten_mask` matrix.
- 2: Representing the leftUpCausal mode corresponds to the lower triangle scenario divided by the left
vertex, and the optimized `atten_mask` matrix (2048*2048) is required.
- 3: Representing the rightDownCausal model corresponds to the lower triangle scene divided by the lower
right vertex, and the optimized `atten_mask` matrix (2048*2048) is required.
- 4: Represents the band scenario, that is, the part between counting preTokens and nextTokens, and the
optimized `atten_mask` matrix (2048*2048) is required.
- 5: Represents the prefix scenario, that is, on the basis of rightDownCasual, a matrix with length S1 and
width N is added to the left side. The value of N is obtained by the new input prefix, and the N value
of each Batch axis is different. Currently not enabled.
- 6: Represents the global scenario. Currently not enabled.
- 7: Represents the dilated scenario. Currently not enabled.
- 8: Represents the block_local scenario. Currently not enabled.
gen_mask_parallel (bool, optional): Debug parameter, a switch to control dropout_gen_mask execution method.
If ``True`` , dropout_gen_mask is executed in parallel. If ``False`` , execution is serial.
Not implemented yet. Default: ``True`` .
sync (bool, optional): Debug parameter, a switch to control dropout_gen_mask execution method.
If ``True`` , dropout_gen_mask is executed synchronously. If ``False`` , execution is asynchronous.
Not implemented yet. Default: ``False`` .
pse_type (int, optional): Indicates how to use `pse`. Default ``1`` .
- 0: `pse` is passed from outside, and the calculation process is to first mul `scale` and then add `pse`.
- 1: `pse` is passed from outside, and the calculation process is to add `pse` first and then mul `scale`.
- 2: `pse` is generated internally and generates standard alibi position information. The internally
generated alibi matrix 0 line is aligned with the upper left corner of :math:`query * key^{T}`.
- 3: `pse` is generated internally, and the generated alibi position information is based on the standard
and then the square root of sqrt is done. The internally generated alibi matrix 0 line is aligned with
the upper left corner of :math:`query * key^{T}`.
q_start_idx (Union[tuple[int], list[int]], optional): Int array with length 1. Default: ``None`` .
When pse_type is configured as ``2`` or ``3`` , it indicates the number of cells that the internally
generated alibi code is offset in the S1 direction. A positive number indicates that 0 moves diagonally
upward.
kv_start_idx (Union[tuple[int], list[int]], optional): Int array with length 1. Default: ``None`` .
When pse_type is configured as ``2`` or ``3`` , it indicates the number of cells that the internally
generated alibi code is offset in the S2 direction. A positive number indicates that 0 moves diagonally
upward.
Returns:
A tuple of tensors containing `attention_out`, `softmax_max`, `softmax_sum`, `softmax_out`, `seed`, `offset`
and `numels` .
- `attention_out` is the output of attention, it's shape, and data type are the same as the query.
- `softmax_max` is the max intermediate result calculated by Softmax, used for grad calculation.
- `softmax_sum` is the sum intermediate result calculated by Softmax, used for grad calculation.
- `softmax_out` is a reserved parameter.
- `seed` is generated seed, used for Dropout.
- `offset` is generated offset, used for Dropout.
- `numels` is the length of generated dropout_mask.
Raises:
TypeError: `query`, `key` and `value` don't have the same dtype.
TypeError: Dtype of `atten_mask` is not bool or uint8.
TypeError: `scale` or `keep_prob` is not a float number.
TypeError: `input_layout` is not a string.
TypeError: `head_num` is not an int.
TypeError: `sparse_mode` is not an int.
TypeError: `pse` is not Tensor type.
TypeError: `padding_mask` is not Tensor type.
TypeError: `atten_mask` is not Tensor type.
TypeError: `pse_type` is not an int.
ValueError: `input_layout` is a string but not valid.
ValueError: The specified value of `sparse_mode` is invalid.
ValueError: The specified value of `pse_type` is invalid.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import mindspore.common.dtype as mstype
>>> import numpy as np
>>> from mindspore import ops, Tensor
>>> query = Tensor(np.ones([2, 4, 64]), dtype=mstype.float16)
>>> key = Tensor(np.ones([2, 4, 64]), dtype=mstype.float16)
>>> value = Tensor(np.ones([2, 4, 64]), dtype=mstype.float16)
>>> head_num = 4
>>> input_layout = "BSH"
>>> output = ops.speed_fusion_attention(query, key, value, head_num, input_layout)
>>> print(output[0].shape)
(2, 4, 64)
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key'),
sig.make_sig('value'),
sig.make_sig('head_num'),
sig.make_sig('input_layout'),
sig.make_sig('seed'),
sig.make_sig('offset'),
sig.make_sig('pse', default=None),
sig.make_sig('padding_mask', default=None),
sig.make_sig('atten_mask', default=None),
sig.make_sig('scale', default=1.0),
sig.make_sig('keep_prob', default=1.0),
sig.make_sig('pre_tokens', default=2147483647),
sig.make_sig('next_tokens', default=2147483647),
sig.make_sig('inner_precise', default=0),
sig.make_sig('prefix', default=None),
sig.make_sig('actual_seq_qlen', default=None),
sig.make_sig('actual_seq_kvlen', default=None),
sig.make_sig('sparse_mode', default=0),
sig.make_sig('gen_mask_parallel', default=True),
sig.make_sig('sync', default=False),
sig.make_sig('pse_type', default=1),
sig.make_sig('q_start_idx', default=None),
sig.make_sig('kv_start_idx', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, query, key, value, head_num, input_layout, seed, offset, pse=None, padding_mask=None, atten_mask=None, scale=1.0, keep_prob=1.0, pre_tokens=2147483647, next_tokens=2147483647, inner_precise=0, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None, sparse_mode=0, gen_mask_parallel=True, sync=False, pse_type=1, q_start_idx=None, kv_start_idx=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_speed_fusion_attention(self, [query, key, value, head_num, str_to_enum('SpeedFusionAttention', 'input_layout', input_layout), seed, offset, pse, padding_mask, atten_mask, scale, keep_prob, pre_tokens, next_tokens, inner_precise, prefix, actual_seq_qlen, actual_seq_kvlen, sparse_mode, gen_mask_parallel, sync, pse_type, q_start_idx, kv_start_idx]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, query, key, value, head_num, str_to_enum('SpeedFusionAttention', 'input_layout', input_layout), seed, offset, pse, padding_mask, atten_mask, scale, keep_prob, pre_tokens, next_tokens, inner_precise, prefix, actual_seq_qlen, actual_seq_kvlen, sparse_mode, gen_mask_parallel, sync, pse_type, q_start_idx, kv_start_idx)
return res
speed_fusion_attention_op=SpeedFusionAttention()
[文档]class ArgMaxWithValue(Primitive):
r"""
Calculates the maximum value along with the given axis for the input tensor, and returns the maximum values and
indices.
Note:
In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
.. warning::
- If there are multiple maximum values, the index of the first maximum value is used.
- The value range of `axis` is [-dims, dims - 1]. "dims" is the dimension length of `input`.
Also see :func:`mindspore.ops.max`.
Args:
axis (int): The dimension to reduce. Default: ``0`` .
keep_dims (bool): Whether to reduce dimension, if ``True`` , the output will keep same dimension with the
input, the output will reduce dimension if ``false`` . Default: ``False`` .
Inputs:
- **input** (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
:math:`(input_1, input_2, ..., input_N)`.
Outputs:
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
tensor.
- **index** (Tensor) - The index for the maximum value of the input tensor, with dtype int64. If `keep_dims`
is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1}, ..., input_N)`.
Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1}, ..., input_N)` .
- **values** (Tensor) - The maximum value of input tensor, with the same shape as `index`, and same dtype as `input`.
Raises:
TypeError: If `keep_dims` is not a bool.
TypeError: If `axis` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
>>> index, output = ops.ArgMaxWithValue()(input_x)
>>> print(index, output)
3 0.7
>>> index, output = ops.ArgMaxWithValue(keep_dims=True)(input_x)
>>> print(index, output)
[3] [0.7]
"""
@prim_arg_register
def __init__(self, axis=0, keep_dims=False):
self._set_prim_arg("axis", axis)
self._set_prim_arg("keep_dims", keep_dims)
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_argmax_with_value(self, [input, self.axis, self.keep_dims]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.axis, self.keep_dims)
return res
class SoftmaxBackward(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('out'),
sig.make_sig('dim', default=-1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, out, dim=-1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_softmax_backward(self, [dout, out, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dout, out, dim)
return res
softmax_backward_op=SoftmaxBackward()
class FullLike(Primitive):
r"""
Return a Tensor of the same shape as `input` and filled with `fill_value`.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): Input Tensor and the output Tensor have the same shape as `input`.
fill_value (Number): Value to fill the returned tensor. Complex numbers are not supported for now.
Keyword Args:
dtype (mindspore.dtype, optional): The specified type of output tensor. `bool_` and `number` are supported,
for details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
Returns:
Tensor.
Raises:
TypeError: If `input` is not a Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, mint
>>> input = Tensor([[0, 1], [2, 1]], dtype=mindspore.int32)
>>> output = mint.full_like(input, 1)
>>> print(output)
[[1 1]
[1 1]]
>>> input = Tensor([[0, 1, 1], [2, 1, 2], [1, 3, 4]], dtype=mindspore.int32)
>>> output = mint.full_like(input, 0, dtype=mindspore.float32)
>>> print(output)
[[0. 0. 0.]
[0. 0. 0.]
[0. 0. 0.]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('fill_value'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, fill_value, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_full_like(self, [input, fill_value, dtype if dtype is None else dtype_to_type_id('FullLike', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, fill_value, dtype if dtype is None else dtype_to_type_id('FullLike', 'dtype', dtype))
return res
full_like_op=FullLike()
[文档]class ReLU(Primitive):
r"""
.. code-block::
prim = ops.ReLU()
out = prim(input)
is equivalent to
.. code-block::
ops.relu(input)
Refer to :func:`mindspore.ops.relu` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_relu(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
relu_op=ReLU()
class InnerCommReduceScatter(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, rank_size, op_type, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_comm_reduce_scatter(self, [input, rank_size, op_type, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, rank_size, op_type, group)
return res
inner_comm_reduce_scatter_op=InnerCommReduceScatter()
class XLogYScalarOther(Primitive):
r"""
Computes the first input tensor multiplied by the logarithm of second input tensor element-wise.
Returns zero when `input` is zero.
.. math::
out_i = input_{i}\ln{other_{i}}
Inputs of `input` and `other` comply with the implicit type conversion rules to make the data types consistent.
Inputs:
- **input** (Tensor) - The first input is a tensor.
- **other** (number) - The second input is a number.
Outputs:
- **y** (Tensor) - the shape is the same as the first input,
and the data type is the one with higher precision or higher digits among the two inputs.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `other` is not a number.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore.ops.auto_generate import XLogYScalarOther
>>> input = Tensor(np.array([-5, 0, 4]), mindspore.float32)
>>> other = 2
>>> op = XLogYScalarOther()
>>> output = op(input, other)
>>> print(output)
[-3.465736 0. 2.7725887]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_xlogy_scalar_other(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
xlogy_scalar_other_op=XLogYScalarOther()
class Sub(Primitive):
r"""
.. code-block::
prim = ops.Sub()
out = prim(input, other)
is equivalent to
.. code-block::
ops.sub(input, other)
Refer to :func:`mindspore.ops.sub` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sub(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
sub_op=Sub()
[文档]class Flatten(Primitive):
r"""
Flattens a tensor without changing its batch size on the 0-th axis.
Refer to :func:`mindspore.ops.flatten` for more details.
Inputs:
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened, where :math:`N` is batch size.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
the product of the remaining dimension.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = ops.Flatten()
>>> output = flatten(input_x)
>>> print(output.shape)
(1, 24)
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x):
return super().__call__(input_x)
flatten_op=Flatten()
class MaximumGradGrad(Primitive):
r"""
Grad for maximum grad.
"""
@prim_arg_register
def __init__(self, grad_x=True, grad_y=True):
self._set_prim_arg("grad_x", grad_x)
self._set_prim_arg("grad_y", grad_y)
def __call__(self, x, y, dx, dy):
return super().__call__(x, y, dx, dy, self.grad_x, self.grad_y)
[文档]class Complex(Primitive):
r"""
Returns a complex Tensor from the real part and the imag part.
.. warning::
This is an experimental API that is subject to change or deletion.
Inputs:
- **real** (Tensor) - The real input tensor. types: float32, float64.
- **imag** (Tensor) - The imag input tensor. types: float32, float64.
Outputs:
Tensor, has the complex type.
Raises:
TypeError: If the dtype of input is not one of: float32, float64.
TypeError: If the dtypes of two inputs are not same.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> real = Tensor(np.array([1]), mindspore.float32)
>>> imag = Tensor(np.array([2]), mindspore.float32)
>>> complex = ops.Complex()
>>> output = complex(real, imag)
>>> print(output)
[1.+2.j]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, real, imag):
return super().__call__(real, imag)
complex_op=Complex()
class Chunk(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('chunks'),
sig.make_sig('dim', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, chunks, dim=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_chunk(self, [input, chunks, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, chunks, dim)
return res
chunk_op=Chunk()
[文档]class Floor(Primitive):
r"""
.. code-block::
prim = ops.Floor()
out = prim(input)
is equivalent to
.. code-block::
ops.floor(input)
Refer to :func:`mindspore.ops.floor` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_floor(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
floor_op=Floor()
class ReflectionPad2D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reflection_pad_2d(self, [input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, padding)
return res
reflection_pad_2d_op=ReflectionPad2D()
class SilentCheckV2(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('val'),
sig.make_sig('input_grad'),
sig.make_sig('sfda'),
sig.make_sig('step'),
sig.make_sig('c_min_steps', default=7),
sig.make_sig('c_thresh_l1', default=1000000.0),
sig.make_sig('c_coeff_l1', default=100000.0),
sig.make_sig('c_thresh_l2', default=10000.0),
sig.make_sig('c_coeff_l2', default=5000.0),
sig.make_sig('npu_asd_detect', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, val, input_grad, sfda, step, c_min_steps=7, c_thresh_l1=1000000.0, c_coeff_l1=100000.0, c_thresh_l2=10000.0, c_coeff_l2=5000.0, npu_asd_detect=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_silent_check_v2(self, [val, input_grad, sfda, step, c_min_steps, c_thresh_l1, c_coeff_l1, c_thresh_l2, c_coeff_l2, npu_asd_detect]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, val, input_grad, sfda, step, c_min_steps, c_thresh_l1, c_coeff_l1, c_thresh_l2, c_coeff_l2, npu_asd_detect)
return res
silent_check_v2_op=SilentCheckV2()
[文档]class GreaterEqual(Primitive):
r"""
.. code-block::
prim = ops.GreaterEqual()
out = prim(input, other)
is equivalent to
.. code-block::
ops.greater_equal(input, other)
Refer to :func:`mindspore.ops.greater_equal` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_greater_equal(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
greater_equal_op=GreaterEqual()
class VarMean(Primitive):
r"""
By default, return the variance and mean of each dimension in Tensor.
If dim is a dimension list, calculate the variance and mean of the corresponding dimension.
The variance (:math:`\sigma ^2`) is calculated as:
.. math::
\sigma ^2 = \frac{1}{N - \delta N} \sum_{j=0}^{N-1} \left(self_{ij} - \overline{x_{i}}\right)^{2}
where :math:`x` is the sample set of elements, :math:`\bar{x}` is the sample mean,
:math:`N` is the number of samples and :math:`\delta N` is the `correction` .
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input tensor. Supported dtypes: float16, float32.
dim (Union[int, tuple(int), list(int)], optional):
Specify the dimensions for calculating variance and mean. Default value: ``None``.
Keyword Args:
correction (int, optional): Difference between the sample size and sample degrees of freedom.
Defaults to Bessel's correction. Default: ``1``.
keepdim (bool, optional): Whether to preserve the dimensions of the output Tensor.
If True, retain the reduced dimension with a size of 1. Otherwise, remove the dimensions.
Default value: ``False``.
Returns:
A tuple of variance and mean.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `dim` is not one of the following data types: int, tuple, list, or Tensor.
TypeError: If `keepdim` is not a bool.
ValueError: If `dim` is out of range.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> input = ms.Tensor([[1, 2, 3, 4], [-1, 1, 4, -10]], ms.float32)
>>> output_var, output_mean = ms.mint.var_mean(input, 1, correction=2, keepdim=True)
>>> print(output_var)
[[ 2.5]
[54.5]]
>>> print(output_mean)
[[ 2.5]
[-1.5]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('correction', default=1),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, correction=1, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_var_mean(self, [input, dim, correction, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, correction, keepdim)
return res
var_mean_op=VarMean()
class IsNegInf(Primitive):
r"""
.. code-block::
prim = ops.IsNegInf()
out = prim(input)
is equivalent to
.. code-block::
ops.isneginf_ext(input)
Refer to :func:`mindspore.ops.isneginf_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_isneginf(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
isneginf_op=IsNegInf()
class DistCommIrecv(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, tag, src, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_irecv(self, [input, tag, src, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, tag, src, group)
return res
dist_comm_irecv_op=DistCommIrecv()
class Mm(Primitive):
r"""
.. code-block::
prim = ops.Mm()
out = prim(input, mat2)
is equivalent to
.. code-block::
ops.mm_ext(input, mat2)
Refer to :func:`mindspore.ops.mm_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mat2):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_mm_ext(self, [input, mat2]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mat2)
return res
mm_ext_op=Mm()
class PagedAttentionMask(Primitive):
r"""
The PagedAttentionMask is the fusion of block-wise KV Cache access and self-attention(with alibi-mask) computing.
Args:
query (Tensor): The query tensor with data type of float16.
:math:`(num\_tokens, num\_head, head\_dim)`.
key_cache (Tensor): The cache tensor with data type of float16.
:math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
value_cache (Tensor): The cache tensor with data type of float16.
:math:`(num\_blocks, block\_size, num\_head, head\_dim)`.
block_tables (Tensor): The block mapping table with data type of int32.
:math:`(num\_tokens, max_num_blocks_per_batch)`.
context_lens (Tensor): The context length of each sequence with data type of int32.
:math:`(num\_tokens,)`.
antiquant_scale (Tensor): The antiquant scale of key_cache and value_cache
with data type of float16. key_cache and value_cache will be the type of int8.
:math:`(2, num\_head * head\_dim,)`.
antiquant_offset (Tensor): The antiquant offset of key_cache and value_cache
with data type of float16. key_cache and value_cache will be the type of int8.
:math:`(2, num\_head * head\_dim,)`.
alibi_mask (Tensor): The bias after q @ k_t / (head_dim) ** 0.5 with data type of query.
:math:`(num\_tokens, num\_head, q\_len, kv\_len)`.
attn_mask (Tensor): The mask after alibi_mask with data type of float16.
:math:`(num\_tokens, q\_len, kv\_len)`.
Outputs:
attention output.
Notes:
No backend implementation in MindSpore, only use to export MindIr and run in MindSpore Lite.
Examples:
>>> from mindspore.ops.operations import _inner_ops
>>> num_tokens = = 4
>>> num_head = 40
>>> num_kv_head = 40
>>> head_dim = 128
>>> block_size = 16
>>> num_blocks = 128
>>> max_seq = 1024
>>> max_num_blocks_per_batch = max_seq // block_size
>>> scale_value = 1.0 / math.sqrt(head_dim)
>>> query = Tensor(np.random.randn(num_tokens, num_head, head_dim).astype(np.float16))
>>> key_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
>>> value_cache = Parameter(default_input=Tensor(np.random.randn(num_blocks, block_size, num_head, head_dim).astype(np.float16)))
>>> dummy_block_indice = np.random.shuffle(np.arange(num_tokens * max_num_blocks_per_batch, dtype=np.int32))
>>> block_tables = Tensor(np.reshape(dummy_block_indice, (num_tokens, max_num_blocks_per_batch)))
>>> context_lens = Tensor(np.random.randint(max_seq, size=num_tokens).astype(np.int32)))
>>> alibi_mask = Tensor(np.random.randn(num_tokens, num_head, 1, max_seq).astype(np.int32)))
>>> paged_attention_mask = _inner_ops.PagedAttentionMask()
>>> output = paged_attention_mask(query, key_cache, value_cache, block_tables, context_lens, alibi_mask)
>>> print(output)
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key_cache'),
sig.make_sig('value_cache'),
sig.make_sig('block_tables'),
sig.make_sig('context_lens'),
sig.make_sig('antiquant_scale', default=None),
sig.make_sig('antiquant_offset', default=None),
sig.make_sig('alibi_mask', default=None),
)
@prim_arg_register
def __init__(self, head_num, scale_value, kv_head_num, kv_cache_quant_mode='DEFAULT'):
self._set_prim_arg("head_num", head_num)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("kv_head_num", kv_head_num)
self._set_prim_arg_with_handler("kv_cache_quant_mode", kv_cache_quant_mode, str_to_enum)
def __call__(self, query, key_cache, value_cache, block_tables, context_lens, antiquant_scale=None, antiquant_offset=None, alibi_mask=None):
return super().__call__(query, key_cache, value_cache, block_tables, context_lens, antiquant_scale, antiquant_offset, alibi_mask, self.head_num, self.scale_value, self.kv_head_num, self.kv_cache_quant_mode)
class Conv2DExt(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default=0),
sig.make_sig('dilation', default=1),
sig.make_sig('groups', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_conv2d_ext(self, [input, weight, bias, stride, padding, dilation, groups]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, stride, padding, dilation, groups)
return res
conv2d_ext_op=Conv2DExt()
[文档]class Sin(Primitive):
r"""
.. code-block::
prim = ops.Sin()
out = prim(input)
is equivalent to
.. code-block::
ops.sin(input)
Refer to :func:`mindspore.ops.sin` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sin(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
sin_op=Sin()
class MultiScaleDeformableAttnGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, value, shape, offset, locations_trans, weight, grad_output):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_multi_scale_deformable_attn_grad(self, [value, shape, offset, locations_trans, weight, grad_output]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, value, shape, offset, locations_trans, weight, grad_output)
return res
multi_scale_deformable_attn_grad_op=MultiScaleDeformableAttnGrad()
class Eig(Primitive):
r"""
Computes the eigenvalues and eigenvectors of a square matrix(batch square matrices).
Args:
compute_v (bool, optional): If ``True`` , compute both eigenvalues and eigenvectors;
If `False`, just eigenvalues will be computed. Default: ``False`` .
Inputs:
- **x** (Tensor) - Square matrices of shape :math:`(*, N, N)`, with float32, float64, complex64 or
complex128 data type.
Outputs:
- **eigen_values** (Tensor) - Shape :math:`(*, N)`. Each inner most vector represents eigenvalues of
the corresponding matrix. The eigenvalues may not have an order.
- **eigen_vectors** (Tensor) - If `compute_v` is `False`, it's an empty tensor. Otherwise, this tensor has
shape :math:`(*, N, N)`, whose columns represent normalized (unit length) eigenvectors of corresponding
eigenvalues.
Raises:
TypeError: If `compute_v` is not a bool.
TypeError: If dtype of `x` is not one of: float64, float32, complex64 or complex128.
TypeError: If `x` is not a Tensor.
ValueError: If `x` is not a square(batch squares).
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[1.0, 0.0], [0.0, 2.0]]), mindspore.float32)
>>> u, v = ops.Eig(True)(x)
>>> print(u)
[1.+0.j 2.+0.j]
>>> print(v)
[[1.+0.j 0.+0.j]
[0.+0.j 1.+0.j]]
"""
@prim_arg_register
def __init__(self, compute_v=False):
self._set_prim_arg("compute_v", compute_v)
def __call__(self, x):
return super().__call__(x, self.compute_v)
class InplaceRandom(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('from_', default=0),
sig.make_sig('to', default=None),
sig.make_sig('seed', default=0),
sig.make_sig('offset', default=0),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, from_=0, to=None, seed=0, offset=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_random(self, [input, from_, to, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, from_, to, seed, offset)
return res
inplace_random_op=InplaceRandom()
[文档]class NonZero(Primitive):
r"""
Return a Tensor of the positions of all non-zero values.
Inputs:
- **input** (Tensor) - The input Tensor.
- Ascend: its rank can be equal to 0 except O2 mode.
- CPU/GPU: its rank should be greater than or eaqual to 1.
Outputs:
Tensor, a 2-D Tensor whose data type is int64, containing the positions of all non-zero values of the input.
If the dimension of `input` is `D` and the number of non-zero in `input` is `N` , then the shape of output is :math:`(N, D)` .
Raises:
TypeError: If `input` is not Tensor.
RuntimeError: On GPU or CPU or Ascend O2 mode, if dim of `input` is equal to 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 0, 2, 0, 3]), mindspore.int32)
>>> output = ops.NonZero()(input)
>>> print(output)
[[0]
[2]
[4]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_non_zero(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
non_zero_op=NonZero()
class InplaceZero(Primitive):
r"""
.. code-block::
prim = ops.InplaceZero()
out = prim(input)
is equivalent to
.. code-block::
ops.zero_(input)
Refer to :func:`mindspore.ops.zero_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_zero(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
inplace_zero_op=InplaceZero()
class IFFT(Primitive):
r"""
.. code-block::
prim = ops.IFFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.ifft(input, n, dim, norm)
Refer to :func:`mindspore.ops.ifft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('IFFT', 'norm', norm))
ifft_op=IFFT()
class ReluGrad(Primitive):
r"""
Computes gradient for the ReLU activation.
Args:
y_backprop (Tensor): Input gradients tensor, has the same dtype and shape as `x`.
x (Tensor): Origin input tensor.
Returns:
Tensor, has the same dtype and shape as `x`.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, y_backprop, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_relu_grad(self, [y_backprop, x]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, y_backprop, x)
return res
relu_grad_op=ReluGrad()
class BatchNormGatherStatsWithCounts(Primitive):
r"""
.. code-block::
prim = ops.BatchNormGatherStatsWithCounts()
out = prim(input, mean, invstd, running_mean, running_var, momentum, eps, counts)
is equivalent to
.. code-block::
ops.batch_norm_gather_stats_with_counts(input, mean, invstd, running_mean, running_var, momentum, eps, counts)
Refer to :func:`mindspore.ops.batch_norm_gather_stats_with_counts` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('mean'),
sig.make_sig('invstd'),
sig.make_sig('running_mean', sig.sig_rw.RW_WRITE, default=None),
sig.make_sig('running_var', sig.sig_rw.RW_WRITE, default=None),
sig.make_sig('momentum', default=1e-1),
sig.make_sig('eps', default=1e-5),
sig.make_sig('counts', default=None),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, mean, invstd, running_mean=None, running_var=None, momentum=1e-1, eps=1e-5, counts=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_batch_norm_gather_stats_with_counts(self, [input, mean, invstd, running_mean, running_var, momentum, eps, counts]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mean, invstd, running_mean, running_var, momentum, eps, counts)
return res
batch_norm_gather_stats_with_counts_op=BatchNormGatherStatsWithCounts()
class Hardtanh(Primitive):
r"""
.. code-block::
prim = ops.Hardtanh()
out = prim(input, min_val, max_val)
is equivalent to
.. code-block::
ops.hardtanh(input, min_val, max_val)
Refer to :func:`mindspore.ops.hardtanh` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('min_val', default=-1),
sig.make_sig('max_val', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, min_val=-1, max_val=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_hardtanh(self, [input, min_val, max_val]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, min_val, max_val)
return res
hardtanh_op=Hardtanh()
class InplaceDivMods(Primitive):
r"""
.. code-block::
prim = ops.InplaceDivMods()
out = prim(input, other, rounding_mode)
is equivalent to
.. code-block::
ops.divmod_scalar_(input, other, rounding_mode)
Refer to :func:`mindspore.ops.divmod_scalar_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
sig.make_sig('rounding_mode', default=None),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other, rounding_mode=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_divmods(self, [input, other, rounding_mode if rounding_mode is None else str_to_enum('InplaceDivMods', 'rounding_mode', rounding_mode)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, rounding_mode if rounding_mode is None else str_to_enum('InplaceDivMods', 'rounding_mode', rounding_mode))
return res
inplace_divmods_op=InplaceDivMods()
class EmbeddingFeatureMappingFind(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingFeatureMappingFind()
out = prim(table_name, feature_size, num)
is equivalent to
.. code-block::
ops.embedding_feature_mapping_find(table_name, feature_size, num)
Refer to :func:`mindspore.ops.embedding_feature_mapping_find` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('table_name'),
sig.make_sig('feature_size'),
sig.make_sig('num', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, table_name, feature_size, num=1):
return super().__call__(table_name, feature_size, num)
embedding_feature_mapping_find_op=EmbeddingFeatureMappingFind()
class SequenceConcat(Primitive):
r"""
.. code-block::
prim = ops.SequenceConcat(axis)
out = prim(x)
is equivalent to
.. code-block::
ops.sequence_concat(x, axis)
Refer to :func:`mindspore.ops.sequence_concat` for more details.
"""
@prim_arg_register
def __init__(self, axis=0):
self._set_prim_arg("axis", axis)
def __call__(self, x):
return super().__call__(x, self.axis)
[文档]class ZerosLike(Primitive):
r"""
Returns a Tensor with a value of 0 and its shape and data type is the same as the input.
Inputs:
- **input_x** (Tensor) - Input Tensor of any dimension.
Outputs:
Tensor, has the same shape and data type as `input_x` but filled with zeros.
Raises:
TypeError: If `input_x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> zeroslike = ops.ZerosLike()
>>> input_x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
>>> output = zeroslike(input_x)
>>> print(output)
[[0. 0.]
[0. 0.]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
zeros_like_op=ZerosLike()
[文档]class OneHot(Primitive):
r"""
Computes a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`, while all
other locations take value `off_value`.
Note:
If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
On Ascend, if `on_value` is Int64 dtype, `indices` must be Int64 dtype, and the value for `on_value` and
`off_value` can only be 1 and 0.
Args:
axis (int): Position to insert the value. e.g. If shape of `indices` is :math:`(N, C)`, and `axis` is -1,
the output shape will be :math:`(N, C, D)`, If `axis` is 0, the output shape will be :math:`(D, N, C)`.
Default: ``-1`` .
Inputs:
- **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
Data type must be int32 or int64.
- **depth** (Union[int, Tensor]) - A scalar defining the depth of the one-hot dimension.
- **on_value** (Tensor) - A value to fill in output when `indices[j] = i`.
- **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.
It has the same data type as `on_value`.
Outputs:
Tensor, one-hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`.
Raises:
TypeError: If `axis` or `depth` is not an int.
TypeError: If dtype of `on_value` is not int32, int64, float16 or float32.
TypeError: If dtype of `indices` is not int32 or int64.
TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
ValueError: If `axis` is not in range [-1, len(indices_shape)].
ValueError: If `depth` is less than 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
>>> onehot = ops.OneHot()
>>> output = onehot(indices, depth, on_value, off_value)
>>> print(output)
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]]
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", axis)
def __call__(self, indices, depth, on_value, off_value):
return super().__call__(indices, depth, on_value, off_value, self.axis)
[文档]class Sigmoid(Primitive):
r"""
.. code-block::
prim = ops.Sigmoid()
out = prim(input)
is equivalent to
.. code-block::
ops.sigmoid(input)
Refer to :func:`mindspore.ops.sigmoid` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sigmoid(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
sigmoid_op=Sigmoid()
[文档]class LinSpace(Primitive):
r"""
Returns a Tensor whose value is `num` evenly spaced in the interval `start` and `stop` (including `start` and
`stop`), and the length of the output Tensor is `num`.
Refer to :func:`mindspore.ops.linspace` for more details.
Inputs:
- **start** (Tensor) - Start value of interval, 0-D Tensor with dtype float32 or float64.
- **stop** (Tensor) - Last value of interval, 0-D Tensor with dtype float32 or float64.
- **num** (Union[int, Tensor]) - Number of ticks in the interval, inclusive of `start` and `stop`.
Must be a positive integer. When the input is Tensor, it must be a 0-D Tensor with dtype int32 or int64.
Outputs:
Tensor, has the same shape and dtype as `start`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> start = Tensor(1, mindspore.float32)
>>> stop = Tensor(10, mindspore.float32)
>>> num = 5
>>> output = ops.LinSpace()(start, stop, num)
>>> print(output)
[ 1. 3.25 5.5 7.75 10. ]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, start, stop, num):
return super().__call__(start, stop, num)
lin_space_op=LinSpace()
[文档]class IsClose(Primitive):
r"""
Returns a tensor of Boolean values indicating whether each element of `input`
is "close" to the corresponding element of `other`. Closeness is defined as:
.. math::
|input-other| <= atol + rtol * |other|
Refer to :func:`mindspore.ops.isclose` for more details.
Args:
rtol(float, optional): Relative tolerance. Default: ``1e-05`` .
atol(float, optional): Absolute tolerance. Default: ``1e-08`` .
equal_nan(bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``False`` .
Inputs:
- **input** (Tensor) - First tensor to compare.
- **other** (Tensor) - Second tensor to compare.
Outputs:
Tensor, with the same shape as `input` and `other` after broadcasting, its dtype is bool.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops import IsClose
>>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
>>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
>>> isclose = IsClose()
>>> output = isclose(input, other)
>>> print(output)
[ True False False False True]
"""
@prim_arg_register
def __init__(self, rtol=1e-05, atol=1e-08, equal_nan=False):
self._set_prim_arg("rtol", type_it('IsClose', 'rtol', rtol, (OpDtype.DT_BOOL, OpDtype.DT_INT), OpDtype.DT_FLOAT))
self._set_prim_arg("atol", type_it('IsClose', 'atol', atol, (OpDtype.DT_BOOL, OpDtype.DT_INT), OpDtype.DT_FLOAT))
self._set_prim_arg("equal_nan", equal_nan)
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_isclose(self, [input, other, self.rtol, self.atol, self.equal_nan]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, self.rtol, self.atol, self.equal_nan)
return res
class DistCommAllReduce(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('op_type'),
sig.make_sig('group'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, op_type, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_all_reduce(self, [input, op_type, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, op_type, group)
return res
dist_comm_all_reduce_op=DistCommAllReduce()
class ReshapeAndCache(Primitive):
r"""
.. code-block::
prim = ops.ReshapeAndCache()
out = prim(key, value, key_cache, value_cache, slot_mapping)
is equivalent to
.. code-block::
ops.reshape_and_cache(key, value, key_cache, value_cache, slot_mapping)
Refer to :func:`mindspore.ops.reshape_and_cache` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('key', dtype=sig.sig_dtype.T),
sig.make_sig('value', dtype=sig.sig_dtype.T),
sig.make_sig('key_cache', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('value_cache', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('slot_mapping', dtype=sig.sig_dtype.T1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, key, value, key_cache, value_cache, slot_mapping):
return super().__call__(key, value, key_cache, value_cache, slot_mapping)
reshape_and_cache_op=ReshapeAndCache()
class RmsNorm(Primitive):
r"""
.. code-block::
prim = ops.RmsNorm(epsilon)
out = prim(x, gamma)
is equivalent to
.. code-block::
ops.rms_norm(x, gamma, epsilon)
Refer to :func:`mindspore.ops.rms_norm` for more details.
"""
@prim_arg_register
def __init__(self, epsilon=1e-6):
self._set_prim_arg("epsilon", epsilon)
def __call__(self, x, gamma):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_rms_norm(self, [x, gamma, self.epsilon]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, gamma, self.epsilon)
return res
[文档]class GridSampler3D(Primitive):
r"""
Given an input and a grid, the output is calculated using the input values
and pixel positions in the grid. Only volume (5-D) input is supported.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.grid_sample` for more details.
Args:
interpolation_mode (str, optional): An optional string specifying the interpolation method.
The optional values are ``"bilinear"`` or ``"nearest"`` . Default: ``"bilinear"`` .
- ``"nearest"``: Nearest neighbor interpolation. Each output pixel is assigned the value of the
nearest input pixel. This method is simple and fast but can result in blocky or pixelated outputs.
- ``"bilinear"``: Bilinear interpolation. Each output pixel is a weighted average of the four nearest input
pixels, computed using bilinear interpolation. This method produces smoother results compared
to nearest neighbor interpolation.
padding_mode (str, optional): An optional string specifying the pad method.
The optional values are ``"zeros"`` , ``"border"`` or ``"reflection"`` . Default: ``"zeros"`` .
When the sampling grid is outside input's bounds, effects of various padding modes are as follows:
- ``"zeros"``: Pads the input tensor with zeros.
- ``"border"``: Pads the input tensor with the values of the pixels on the border of the tensor.
- ``"reflection"``: Pads the input tensor by reflecting the values of the pixels at the
boundary of the tensor.
align_corners (bool, optional): An optional bool specifying alignment method. If set to ``True`` ,
the extrema (-1 and 1) are considered as referring to
the center points of the input's corner pixels. If set to ``False`` , they are instead considered as
referring to the corner points of the input's corner pixels, making the sampling more resolution agnostic.
Default: ``False`` .
Inputs:
- **input_x** (Tensor) - A 5-D tensor with dtype of float16, float32 or float64
and shape of :math:`(N, C, D_{in}, H_{in}, W_{in})`.
- **grid** (Tensor) - A 5-D tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, D_{out},
H_{out}, W_{out}, 3)`.
Outputs:
A 5-D Tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, C, D_{out}, H_{out}, W_{out})`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> gridsampler = ops.GridSampler3D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
>>> input_x = Tensor(np.arange(32).reshape((2, 2, 2, 2, 2)).astype(np.float32))
>>> grid = Tensor(np.arange(-0.2, 1, 0.1).reshape((2, 2, 1, 1, 3)).astype(np.float32))
>>> output = gridsampler(input_x, grid)
>>> print(output)
[[[[[ 3.3 ]]
[[ 4.35 ]]]
[[[11.300001]]
[[12.349999]]]]
[[[[21.4 ]]
[[22.449999]]]
[[[29.4 ]]
[[30.449999]]]]]
"""
@prim_arg_register
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum)
self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum)
self._set_prim_arg("align_corners", align_corners)
def __call__(self, input_x, grid):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_grid_sampler_3d(self, [input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners)
return res
class InnerStridedSlice(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, begin, end, strides):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_strided_slice(self, [input_x, begin, end, strides]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input_x, begin, end, strides)
return res
inner_strided_slice_op=InnerStridedSlice()
class InplacePut(Primitive):
r"""
.. code-block::
prim = ops.InplacePut()
out = prim(input, index, source, accumulate)
is equivalent to
.. code-block::
ops.put_(input, index, source, accumulate)
Refer to :func:`mindspore.ops.put_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('index'),
sig.make_sig('source'),
sig.make_sig('accumulate', default=False),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, index, source, accumulate=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_put(self, [input, index, source, accumulate]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, index, source, accumulate)
return res
inplace_put_op=InplacePut()
[文档]class NLLLoss(Primitive):
r"""
Gets the negative log likelihood loss between logits and labels.
The nll loss with :math:`reduction = none` can be described as:
.. math::
\ell(x, t)=L=\left\{l_{1}, \ldots, l_{N}\right\}^{\top},
\quad l_{n}=-w_{t_{n}} x_{n, t_{n}},
\quad w_{c}=\text { weight }[c] \cdot 1
where :math:`x` is the logits, :math:`t` is the labels, :math:`w` is the weight,
:math:`N` is the batch size, :math:`c` belonging to [0, C-1] is class index,
where :math:`C` is the number of classes.
If :math:`reduction \neq none` (default ``'mean'`` ), then
.. math::
\ell(x, t)=\left\{\begin{array}{ll}
\sum_{n=1}^{N} \frac{1}{\sum_{n=1}^{N} w_{t n}} l_{n}, & \text { if reduction }=\text { 'mean'; } \\
\sum_{n=1}^{N} l_{n}, & \text { if reduction }=\text { 'sum' }
\end{array}\right.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
``'sum'`` . Default: ``'mean'`` .
- ``'none'``: no reduction will be applied.
- ``'mean'``: compute and return the weighted mean of elements in the output.
- ``'sum'``: the output elements will be summed.
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. Default: ``-100`` .
Inputs:
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type only supports float32 or float16
or bfloat16(only supported by Atlas A2 training series products).
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N,)`, where each value belong to
:math:`[0, C-1]`. Data type only supports uint8 or int32 or int64.
- **weight** (Tensor) - The rescaling weight to each class, with shape :math:`(C,)` and data type only
supports float32 or float16 or bfloat16(only supported by Atlas A2 training series products). It should
have the same data type as `logits` .
Returns:
Tuple of 2 tensors composed with `loss` and `total_weight`.
- **loss** (Tensor) - When `reduction` is ``'none'`` and `logits` is a 2D tensor,
the `loss` shape is :math:`(N,)`. Otherwise, the `loss` is a scalar.
The data type is the same with that of `logits`.
- **total_weight** (Tensor) - The `total_weight` is a scalar. The data type is the same with that of `weight`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> logits = Tensor(np.array([[0.5488135, 0.71518934],
... [0.60276335, 0.5448832],
... [0.4236548, 0.6458941]]).astype(np.float32))
>>> labels = Tensor(np.array([0, 0, 0]).astype(np.int32))
>>> weight = Tensor(np.array([0.3834415, 0.79172504]).astype(np.float32))
>>> nll_loss = ops.NLLLoss(reduction="mean")
>>> loss, weight = nll_loss(logits, labels, weight)
>>> print(loss)
-0.52507716
>>> print(weight)
1.1503246
"""
@prim_arg_register
def __init__(self, reduction='mean', ignore_index=-100):
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
self._set_prim_arg("ignore_index", ignore_index)
def __call__(self, logits, labels, weight):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_nllloss(self, [logits, labels, weight, self.reduction, self.ignore_index]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, logits, labels, weight, self.reduction, self.ignore_index)
return res
class TExt(Primitive):
r"""
.. code-block::
prim = ops.TExt()
out = prim(input)
is equivalent to
.. code-block::
ops.t_ext(input)
Refer to :func:`mindspore.ops.t_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_t_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
t_ext_op=TExt()
class FFTShapeCopy(Primitive):
r"""
Truncate or zero-fill the gradient of an fft operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, shape):
return super().__call__(dout, shape)
fft_shapecopy_op=FFTShapeCopy()
class BitwiseOrTensor(Primitive):
r"""
Returns bitwise `or` of two tensors element-wise.
Inputs:
- **input** (Tensor) - The input tensor must be of integral or Boolean types.
- **other** (Tensor) - The second input tensor with same type as the `input`.
Outputs:
Tensor, has the same type as the `input`.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bitwise_or_tensor(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
bitwise_or_tensor_op=BitwiseOrTensor()
class Betainc(Primitive):
r"""
Calculates the regularized incomplete beta function
:math:`I_{x}(a, b)`. It is defined as the ratio of the incomplete beta function
to the complete beta function:
.. math::
I_{x}(a, b)=\frac{B(x ; a, b)}{B(a, b)}
where
.. math::
B(x ; a, b)=\int_{0}^{x} t^{a-1}(1-t)^{b-1} dt
is the incomplete beta function and
.. math::
B(a, b) = \int_0^1 t^{a-1} (1-t)^{b-1} dt
is the complete beta function.
Inputs:
- **a** (Tensor) - Peak location of beta distribution.
A Tensor of types: float32, float64.
- **b** (Tensor) - Spread of the beta distribution.
A Tensor, must have the same dtype and shape as `a` .
- **x** (Tensor) - Upper limit of integration of the incomplete beta function.
A Tensor, must have the same dtype and shape as `a` .
Outputs:
A Tensor, has the same dtype and shape as `a` .
Raises:
TypeError: If dtype of `a` is not float32 nor float64.
TypeError: If either dtype of `b` and `x` is not the same as the `a`.
ValueError: If either shape of `b` and `x` is not the same as the `a`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> a = Tensor(np.array([0.3, 0.1, 0.4]), mindspore.float32)
>>> b = Tensor(np.array([0.4, 0.5, 0.9]), mindspore.float32)
>>> x = Tensor(np.array([0.2, 0.6, 0.5]), mindspore.float32)
>>> betainc = ops.Betainc()
>>> print(betainc(a, b, x))
[0.41462693 0.8706035 0.7298298 ]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, a, b, x):
return super().__call__(a, b, x)
betainc_op=Betainc()
class NewZeros(Primitive):
r"""
Return a tensor of `size` filled with zeros.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
size (Union[int, tuple(int), list(int)]): An int, list or tuple of integers defining the output shape.
dtype (:class:`mindspore.dtype`, optional): The desired dtype of the output tensor. If None, the returned
tensor has the same dtype as `input`. Default: ``None``.
Inputs:
- **input** (Tensor) - Tensor of any dimension.
Outputs:
Tensor, the shape and dtype is defined above and filled with zeros.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If `size` is neither an int nor a tuple/list of int.
TypeError: If `dtype` is not a MindSpore dtype.
ValueError: If `size` contains negative values.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> x = Tensor((), mindspore.int32)
>>> ops.auto_generate.NewZeros()(x, (2, 3))
Tensor(shape=[2, 3], dtype=Int32, value=
[[0, 0, 0],
[0, 0, 0]])
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('size'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, size, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_new_zeros(self, [input, size, dtype if dtype is None else dtype_to_type_id('NewZeros', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, size, dtype if dtype is None else dtype_to_type_id('NewZeros', 'dtype', dtype))
return res
new_zeros_op=NewZeros()
[文档]class ResizeNearestNeighbor(Primitive):
r"""
Resizes the input tensor to a given size by using the nearest neighbor algorithm. The nearest
neighbor algorithm selects the value of the nearest point and does not consider the
values of neighboring points at all, yielding a piecewise-constant interpolant.
Args:
size (Union[tuple, list]): The target size. The dimension of size must be 2.
align_corners (bool): Whether the centers of the 4 corner pixels of the input and output tensors are aligned.
Default: ``False`` .
half_pixel_centers (bool, optional): Whether half pixel center. If set to ``True`` ,
`align_corners` should be False. Default: ``False`` .
Inputs:
- **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`.
Outputs:
Tensor, the shape of the output tensor is :math:`(N, C, NEW\_H, NEW\_W)`.
The data type is the same as the `input_x`.
Raises:
TypeError: If `size` is neither tuple nor list.
TypeError: If `align_corners` is not a bool.
ValueError: If length of `size` is not equal to 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32)
>>> size = (2, 2)
>>> output = ops.ResizeNearestNeighbor(size=size)(input_tensor)
>>> print(output)
[[[[-0.1 0.3]
[ 0.4 0.5]]]]
"""
@prim_arg_register
def __init__(self, size, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("size", type_it('ResizeNearestNeighbor', 'size', size, OpDtype.DT_LIST_INT, OpDtype.DT_TUPLE_INT))
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, input_x):
return super().__call__(input_x, self.size, self.align_corners, self.half_pixel_centers)
class UpsampleNearest1D(Primitive):
r"""
Performs nearest neighbor upsampling operation.
This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest
neighbor algorithm.
One of `output_size` or `scales` must be given, and can not specified both at the same time.
Inputs:
- **x** (Tensor) - 3D tensor of shape :math:`(N, C, L_{in})`.
Supporting types: [uint8, float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size.
Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors.
Default: ``None``.
Outputs:
- **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is
:math:`(N, C, L_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not int [uint8, float16, float32, float64].
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 3D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 1 when `scales` is specified.
ValueError: If size of `output_size` is not equal 1 when `output_size` is specified.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor(np.arange(10).reshape(1, 2, 5), mstype.float32)
>>> output_size = [8,]
>>> net = ops.auto_generate.UpsampleNearest1D()
>>> output = net(x, output_size, None)
>>> print(output)
[[[0., 0., 1., 1., 2., 3., 3., 4.],
[5., 5., 6., 6., 7., 8., 8., 9.]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_nearest1d(self, [x, output_size, scales]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, output_size, scales)
return res
upsample_nearest1d_op=UpsampleNearest1D()
[文档]class BiasAdd(Primitive):
r"""
Returns the sum of the input Tensor and the bias Tensor. Before adding, the bias Tensor will be broadcasted to be
consistent with the shape of the input Tensor.
Args:
data_format (str, optional): The format of input and output data.
It should be ``"NHWC"`` , ``"NCHW"`` or ``"NCDHW"`` .
Default is ``"NCHW"`` .
Inputs:
- **input_x** (Tensor) - The input tensor. The shape can be 2-5 dimensions. Supported dtypes:
- Ascend/CPU: all Number type.
- GPU: float16, float32, int8.
- **bias** (Tensor) - The bias tensor, with shape :math:`(C)`. C must be the same as channel dimension C of
`input_x`. It has the same type as `input_x`.
Outputs:
Tensor, with the same shape and data type as `input_x`.
Raises:
TypeError: If `data_format` is not a str.
ValueError: If value of `data_format` is not in the range of ['NHWC','NCHW','NCDHW'].
TypeError: If `input_x` or `bias` is not a Tensor.
TypeError: If dtype of `input_x` or `bias` is inconsistent.
TypeError: If dimension of `input_x` is not in the range [2, 5].
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
>>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)
>>> bias_add = ops.BiasAdd()
>>> output = bias_add(input_x, bias)
>>> print(output.shape)
(2, 3)
"""
@prim_arg_register
def __init__(self, data_format='NCHW'):
self._set_prim_arg_with_handler("data_format", data_format, str_to_enum)
def __call__(self, input_x, bias):
return super().__call__(input_x, bias, self.data_format)
class MatmulReduceScatter(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('x2'),
sig.make_sig('group'),
sig.make_sig('world_size'),
sig.make_sig('reduce_op', default='sum'),
sig.make_sig('bias', default=None),
sig.make_sig('comm_turn', default=0),
sig.make_sig('trans_input', default=False),
sig.make_sig('trans_x2', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, x2, group, world_size, reduce_op='sum', bias=None, comm_turn=0, trans_input=False, trans_x2=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_matmul_reduce_scatter(self, [input, x2, group, world_size, str_to_enum('MatmulReduceScatter', 'reduce_op', reduce_op), bias, comm_turn, trans_input, trans_x2]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, x2, group, world_size, str_to_enum('MatmulReduceScatter', 'reduce_op', reduce_op), bias, comm_turn, trans_input, trans_x2)
return res
matmul_reduce_scatter_op=MatmulReduceScatter()
class InplaceAddExt(Primitive):
r"""
.. code-block::
prim = ops.InplaceAddExt()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.inplace_add_ext(input, other, alpha)
Refer to :func:`mindspore.ops.inplace_add_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_add_ext(self, [input, other, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, alpha)
return res
inplace_add_ext_op=InplaceAddExt()
class EmbeddingApplyFtrl(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingApplyFtrl()
out = prim(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
is equivalent to
.. code-block::
ops.embedding_apply_ftrl(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
Refer to :func:`mindspore.ops.embedding_apply_ftrl` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('var_handle'),
sig.make_sig('lr'),
sig.make_sig('lr_power'),
sig.make_sig('lambda1'),
sig.make_sig('lambda2'),
sig.make_sig('grad'),
sig.make_sig('keys'),
sig.make_sig('global_step'),
sig.make_sig('embedding_dim'),
sig.make_sig('mask_zero', default=(0,)),
sig.make_sig('padding_key', default=(0,)),
sig.make_sig('padding_key_mask', default=(1,)),
sig.make_sig('completion_key', default=(0,)),
sig.make_sig('completion_key_mask', default=(1,)),
sig.make_sig('_embedding_dim', default=1),
sig.make_sig('_max_key_num', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("_process_node_engine_id", 'PS')
def __call__(self, var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
return super().__call__(var_handle, lr, lr_power, lambda1, lambda2, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
embedding_apply_ftrl_op=EmbeddingApplyFtrl()
class HFFT(Primitive):
r"""
.. code-block::
prim = ops.HFFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.hfft(input, n, dim, norm)
Refer to :func:`mindspore.ops.hfft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('HFFT', 'norm', norm))
hfft_op=HFFT()
class TraceV2Grad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dout'),
sig.make_sig('shape'),
sig.make_sig('offset', default=0),
sig.make_sig('axis1', default=1),
sig.make_sig('axis2', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, shape, offset=0, axis1=1, axis2=0):
return super().__call__(dout, shape, offset, axis1, axis2)
tracev2_grad_op=TraceV2Grad()
class TupleToTensor(Primitive):
r"""
.. code-block::
prim = ops.TupleToTensor()
out = prim(input_tuple, dtype)
is equivalent to
.. code-block::
ops.tuple_to_tensor(input_tuple, dtype)
Refer to :func:`mindspore.ops.tuple_to_tensor` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input_tuple'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_tuple, dtype=None):
return super().__call__(input_tuple, dtype if dtype is None else dtype_to_type_id('TupleToTensor', 'dtype', dtype))
tuple_to_tensor_op=TupleToTensor()
[文档]class Softmax(Primitive):
r"""
Applies the Softmax operation to the input tensor on the specified axis.
Refer to :func:`mindspore.ops.softmax` for more details.
Args:
axis (Union[int, tuple], optional): The axis to perform the Softmax operation. Default: ``-1`` .
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of
additional dimensions.
Outputs:
Tensor, with the same type and shape as the input.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
>>> softmax = ops.Softmax()
>>> output = softmax(input)
>>> print(output)
[0.01165623 0.03168492 0.08612854 0.23412167 0.6364086 ]
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", type_it('Softmax', 'axis', axis, OpDtype.DT_INT, OpDtype.DT_TUPLE_INT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_softmax(self, [input, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.axis)
return res
class UpsampleBicubic2D(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None, align_corners=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_bicubic2d(self, [x, output_size, scales, align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, output_size, scales, align_corners)
return res
upsample_bicubic2d_op=UpsampleBicubic2D()
class InplaceUniform(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('from_'),
sig.make_sig('to'),
sig.make_sig('seed'),
sig.make_sig('offset'),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, from_, to, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_uniform(self, [input, from_, to, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, from_, to, seed, offset)
return res
inplace_uniform_op=InplaceUniform()
class InnerIndex(Primitive):
r"""
Index the Tensor using an `indices`.
.. warning::
This is an internal and non-standard interface. The target interface is aclnn.
Inputs:
- **input** (Tensor) - The input Tensor.
- **indices** (tuple[Tensor], list[Tensor]) - the indices of type is int64, used to index into the `input`.
The size of indices should <= the rank of `input` and the tensors in indices should be broadcastable.
Outputs:
Tensor, has the same dtype as input Tensor.
Raises:
TypeError: If `input` is not a Tensor.
TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
TypeError: If the dtype of tensors in `indices` is int64.
ValueError: If the tensors in `indices` is not be broadcastable.
ValueError: If size(`indices`) > rank(`input`).
ValueError: If rank of `input` = 0.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor, ops
>>> input1 = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.int64)
>>> indices1 = Tensor(np.array([0, 1, 1]), mindspore.int64)
>>> indices2 = Tensor(np.array([1, 2, 1]), mindspore.int64)
>>> output = ops.auto_generate.InnerIndex()(input1, [indices1, indices2])
>>> print(output)
[2 6 5]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, indices):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_index(self, [input, indices]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, indices)
return res
inner_index_op=InnerIndex()
class Col2ImGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('kernel_size'),
sig.make_sig('dilation', default=1),
sig.make_sig('padding', default=0),
sig.make_sig('stride', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, kernel_size, dilation=1, padding=0, stride=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_col2im_grad(self, [input, to_pair('Col2ImGrad', 'kernel_size', kernel_size), to_pair('Col2ImGrad', 'dilation', dilation), to_pair('Col2ImGrad', 'padding', padding), to_pair('Col2ImGrad', 'stride', stride)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, to_pair('Col2ImGrad', 'kernel_size', kernel_size), to_pair('Col2ImGrad', 'dilation', dilation), to_pair('Col2ImGrad', 'padding', padding), to_pair('Col2ImGrad', 'stride', stride))
return res
col2im_grad_op=Col2ImGrad()
class IRFFT(Primitive):
r"""
.. code-block::
prim = ops.IRFFT()
out = prim(input, n, dim, norm)
is equivalent to
.. code-block::
ops.irfft(input, n, dim, norm)
Refer to :func:`mindspore.ops.irfft` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('n', default=None),
sig.make_sig('dim', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, n=None, dim=-1, norm=None):
return super().__call__(input, n, dim, norm if norm is None else str_to_enum('IRFFT', 'norm', norm))
irfft_op=IRFFT()
class AdaptiveMaxPool1D(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, output_size):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_adaptive_max_pool1d(self, [input, output_size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, output_size)
return res
adaptive_max_pool1d_op=AdaptiveMaxPool1D()
class ClampTensor(Primitive):
r"""
.. code-block::
prim = ops.ClampTensor()
out = prim(input, min, max)
is equivalent to
.. code-block::
ops.clamp_tensor(input, min, max)
Refer to :func:`mindspore.ops.clamp_tensor` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('min', default=None),
sig.make_sig('max', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, min=None, max=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_clamp_tensor(self, [input, min, max]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, min, max)
return res
clamp_tensor_op=ClampTensor()
class TrilExt(Primitive):
r"""
.. code-block::
prim = ops.TrilExt(diagonal)
out = prim(input)
is equivalent to
.. code-block::
ops.tril_ext(input, diagonal)
Refer to :func:`mindspore.ops.tril_ext` for more details.
"""
@prim_arg_register
def __init__(self, diagonal=0):
self._set_prim_arg("diagonal", type_it('TrilExt', 'diagonal', diagonal, OpDtype.DT_TENSOR, OpDtype.DT_INT))
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_tril_ext(self, [input, self.diagonal]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, self.diagonal)
return res
[文档]class IsInf(Primitive):
r"""
.. code-block::
prim = ops.IsInf()
out = prim(input)
is equivalent to
.. code-block::
ops.isinf(input)
Refer to :func:`mindspore.ops.isinf` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_isinf(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
isinf_op=IsInf()
class NormalFloatTensor(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, mean, std, seed, offset):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_normal_float_tensor(self, [mean, std, seed, offset]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, mean, std, seed, offset)
return res
normal_float_tensor_op=NormalFloatTensor()
class BatchMatMulExt(Primitive):
r"""
.. code-block::
prim = ops.BatchMatMulExt()
out = prim(input, mat2)
is equivalent to
.. code-block::
ops.bmm_ext(input, mat2)
Refer to :func:`mindspore.ops.bmm_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mat2):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_bmm_ext(self, [input, mat2]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mat2)
return res
bmm_ext_op=BatchMatMulExt()
class IncreFlashAttention(Primitive):
r"""
The interface for fully inference.
B -- Batch size
N -- Num heads
kvN -- Num key value heads
S -- Sequence length
D -- Head dim
H -- Hidden size
kvH -- Hidden size of key value
where :math:`H=N\times D`, :math:`kvH=kvN\times D`
Self attention constructs an attention model based on the relationship between input samples themselves. The
principle is to assume that there is a length of the input sample sequence :math:`x` of :math:`n`, and each
element of :math:`x` is a :math:`d` dimensional vector, which can be viewed as a token embedding. This sequence
can be transformed through 3 weight matrices to obtain 3 matrices with dimensions of :math:`n\times d`. The self
attention calculation formula is defined as:
.. math::
Attention(Q,K,V)=Softmax(\frac{QK^{T} }{\sqrt{d} } )V
where the product of :math:`Q` and :math:`K^{T}` represents the attention of input :math:`x`. To avoid the value
becoming too large, it is usually scaled by dividing it by the square root of :math:`d` and perform softmax
normalization on each row, yields a matrix of :math:`n\times d` after multiplying :math:`V`.
.. warning::
This is an experimental API that is subject to change or deletion.
Note:
- If there is no input parameter and no default value, None needs to be passed.
- The shape of the tensor corresponding to the key and value parameters needs to be completely consistent.
- :math:`N` of parameter query is equal with num_heads. :math:`N` of parameter key and parameter value is equal
with num_key_value_heads. num_heads is a multiple of num_key_value_heads.
- Quantization
- When the data type of query, key, and value is float16 and the data type of output is int8, the input
parameter quant_scale2 is required and quant_offset2 is optional.
- When antiquant_scale exists, key and value need to be passed by int8. antiquant_offset is optional.
- The data type of antiquant_scale and antiquant_offset should be consistency with that of query.
- pse_shift
- The pse_shift data type needs to be consistent with the query data type, and only supports D-axis alignment,
which means that the D-axis can be divided by 16.
- Page attention:
- The necessary condition for enabling page attention is that the block_table exists, and the key
and value are arranged in a contiguous memory according to the index in the block_table. The support for key
and value dtypes is float16/bfloat16/int8.
- In the enabling scenario of page attention, 16 alignment is required when input types of key and value are
float16/bfloat16, and 32 alignment is required when input types of key and value are int8. It is
recommended to use 128.
- The maximum max_block_num_per_seq currently supported by block_table is 16k, and exceeding 16k will result
in interception and error messages; If you encounter :math:`S` being too large and causing
max_block_num_per_seq to exceed 16k, you can increase the block_size to solve the problem.
- The multiplication of all dimensions of the shape of the parameters key and value in the page attention
scenario cannot exceed the representation range of int32.
- When performing per-channel post quantization, page attention cannot be enabled simultaneously.
- kv_padding_size:
- The calculation formula for the starting point of KV cache transfer is
:math:`S-kv\_padding\_size-actual\_seq\_lengths`. The calculation formula for the transfer endpoint of KV
cache is :math:`S-kv\_padding\_size`. When the starting or ending point of the KV cache transfer is less
than 0, the returned data result is all 0.
- When kv_padding_size is less than 0, it will be set to 0.
- kv_padding_size needs to be enabled together with the actual_seq_lengths parameter, otherwise it is
considered as the KV right padding scene.
- It needs to be enabled together with the atten_mask parameter and ensure that the meaning of atten_mask is
correct, that is, it can correctly hide invalid data. Otherwise, it will introduce accuracy issues.
- kv_padding_size does not support page attention scenarios
Args:
num_heads (int): The number of heads.
input_layout (str): the data layout of the input qkv, support 'BSH' and 'BNSD'. Default: ``'BSH'``.
scale_value (double): The scale value indicating the scale coefficient, which is used as the scalar of
Muls in the calculation. Default: ``1.0``.
num_key_value_heads (int): Head numbers of key/value which are used in GQA algorithm.
The value 0 indicates if the key and value have the same head nums, use num_heads. Default: ``0``.
block_size (int): The maximum number of tokens stored in each block of KV in page attention. Default: ``0``.
inner_precise (int): Default: ``1``.
Inputs:
- **query** (Tensor) - The query tensor with data type of float16 or bfloat16.
The shape is :math:`(B, 1, H)` / :math:`(B, N, 1, D)`.
- **key** (TensorList) - The key tensor with data type of float16 or bfloat16 or int8.
The shape is :math:`(B, S, kvH)` / :math:`(B, kvN, S, D)`.
- **value** (TensorList) - The value tensor with data type of float16 or bfloat16 or int8.
The shape is :math:`(B, S, kvH)` / :math:`(B, kvN, S, D)`.
- **attn_mask** (Tensor, optional) - The attention mask tensor with data type of bool or int8 or uint8.
The shape is :math:`(B, S)` / :math:`(B, 1, S)` / :math:`(B, 1, 1, S)`. Default: ``None``.
- **actual_seq_lengths** (Union[Tensor, tuple[int], list[int]], optional) - Describe actual sequence length of
each input with data type of int32 or int64. The shape is :math:`(B, )`. Default: ``None``.
- **pse_shift** (Tensor, optional) - The position encoding tensor with data type of float16 or bfloat16. Input
tensor of shape :math:`(1, N, 1, S)` / :math:`(B, N, 1, S)`. Default: ``None``.
- **dequant_scale1** (Tensor, optional) - Quantitative parametor, the tensor with data type of uint64 or
float32. It is disable now. Default: ``None``.
- **quant_scale1** (Tensor, optional) - Quantitative parametor, the tensor with data type of float32. It is
disable now. Default: ``None``.
- **dequant_scale2** (Tensor, optional) - Quantitative parametor, the tensor with data type of uint64 or
float32. It is disable now. Default: ``None``.
- **quant_scale2** (Tensor, optional) - Post quantitative parametor, the tensor with data type of float32.
The shape is :math:`(1,)`. Default: ``None``.
- **quant_offset2** (Tensor, optional) - Post quantitative parametor, the tensor with data type of float32.
The shape is :math:`(1,)`. Default: ``None``.
- **antiquant_scale** (Tensor, optional) - Pseudo quantitative parametor, the tensor with data type of float16
or bfloat16. The shape is :math:`(2, kvN, 1, D)` when input_layout is 'BNSD' or :math:`(2, kvH)` when
input_layout is 'BSH'. Default: ``None``.
- **antiquant_offset** (Tensor, optional) - Pseudo quantitative parametor, the tensor with data type of
float16 or bfloat16. The shape is :math:`(2, kvN, 1, D)` when input_layout is 'BNSD' or :math:`(2, kvH)`
when input_layout is 'BSH'. Default: ``None``.
- **block_table** (Tensor, optional) - The tensor with data type of int32. The shape is
:math:`(B, max\_block\_num\_per\_seq)`, where
:math:`max\_block\_num\_per\_seq = ceil(\frac{max(actual\_seq\_length)}{block\_size} )`. Default: ``None``.
- **kv_padding_size** (Tensor, optional) - The tensor with data type of int64. The range of values is
:math:`0\le kv\_padding\_size \le S-max(actual\_seq\_length)`. The shape is :math:`()` or :math:`(1,)`.
Default: ``None``.
Outputs:
attention_out (Tensor), the shape is :math:`(B, 1, H)` / :math:`(B, N, 1, D)`.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import ops
>>> from mindspore.common import Tensor
>>> from mindspore.common import dtype as mstype
>>> import numpy as np
>>> from mindspore.ops.auto_generate import IncreFlashAttention
>>> B, N, S, D, kvN = 1, 4, 10, 128, 1
>>> query = Tensor(np.random.randn(B, 1, N * D), mstype.float16)
>>> key = [Tensor(np.random.randn(B, S, kvN * D), mstype.float16)]
>>> value = [Tensor(np.random.randn(B, S, kvN * D), mstype.float16)]
>>> ifa_ms = IncreFlashAttention(num_heads=N, num_key_value_heads=kvN)
>>> attn_out = ifa_ms(query, key, value)
>>> attn_out
Tensor(shape=[1, 1, 512], dtype=Float16, value=
[[[-1.5161e-01, -2.1814e-01, -1.6284e-01 ... 1.0283e+00, -1.1143e+00, -1.7607e+00]]])
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key'),
sig.make_sig('value'),
sig.make_sig('attn_mask', default=None),
sig.make_sig('actual_seq_lengths', default=None),
sig.make_sig('pse_shift', default=None),
sig.make_sig('dequant_scale1', default=None),
sig.make_sig('quant_scale1', default=None),
sig.make_sig('dequant_scale2', default=None),
sig.make_sig('quant_scale2', default=None),
sig.make_sig('quant_offset2', default=None),
sig.make_sig('antiquant_scale', default=None),
sig.make_sig('antiquant_offset', default=None),
sig.make_sig('block_table', default=None),
sig.make_sig('kv_padding_size', default=None),
)
@prim_arg_register
def __init__(self, num_heads=1, input_layout='BSH', scale_value=1.0, num_key_value_heads=0, block_size=0, inner_precise=1):
self._set_prim_arg("num_heads", num_heads)
self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("num_key_value_heads", num_key_value_heads)
self._set_prim_arg("block_size", block_size)
self._set_prim_arg("inner_precise", inner_precise)
def __call__(self, query, key, value, attn_mask=None, actual_seq_lengths=None, pse_shift=None, dequant_scale1=None, quant_scale1=None, dequant_scale2=None, quant_scale2=None, quant_offset2=None, antiquant_scale=None, antiquant_offset=None, block_table=None, kv_padding_size=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_incre_flash_attention(self, [query, key, value, attn_mask, actual_seq_lengths, pse_shift, dequant_scale1, quant_scale1, dequant_scale2, quant_scale2, quant_offset2, antiquant_scale, antiquant_offset, block_table, kv_padding_size, self.num_heads, self.input_layout, self.scale_value, self.num_key_value_heads, self.block_size, self.inner_precise]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, query, key, value, attn_mask, actual_seq_lengths, pse_shift, dequant_scale1, quant_scale1, dequant_scale2, quant_scale2, quant_offset2, antiquant_scale, antiquant_offset, block_table, kv_padding_size, self.num_heads, self.input_layout, self.scale_value, self.num_key_value_heads, self.block_size, self.inner_precise)
return res
class InplaceMaskedFillScalar(Primitive):
r"""
.. code-block::
prim = ops.InplaceMaskedFillScalar()
out = prim(input, mask, value)
is equivalent to
.. code-block::
ops.masked_fill_scalar_(input, mask, value)
Refer to :func:`mindspore.ops.masked_fill_scalar_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('mask'),
sig.make_sig('value'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, mask, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_masked_fill_scalar(self, [input, mask, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mask, value)
return res
inplace_masked_fill_scalar_op=InplaceMaskedFillScalar()
class ResizeBilinearGrad(Primitive):
r"""
Performs grad of ResizeBilinear operation.
Args:
grads (Tensor): A 4-D Tensor with shape [batch, channel, height, width].
image (Tensor): A 4-D Tensor with shape [batch, channel, height, width], The origin image tensor that was resized.
align_corners (bool): If true, the centers of the 4 corner pixels of the input and output tensors are
aligned, preserving the values at the corner pixels.Default: ``False``.
half_pixel_centers (bool): An optional bool. Default: ``False``.
Outputs:
A 4-D Tensor , with the same shape and data type as `image`.
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, grads, image):
return super().__call__(grads, image, self.align_corners, self.half_pixel_centers)
class ReplicationPad1DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_replication_pad_1d_grad(self, [grad_output, input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, padding)
return res
replication_pad_1d_grad_op=ReplicationPad1DGrad()
class DecoderKVCache(Primitive):
r"""
.. code-block::
prim = ops.DecoderKVCache()
out = prim(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
is equivalent to
.. code-block::
ops.decoder_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
Refer to :func:`mindspore.ops.decoder_k_v_cache` for more details.
"""
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len):
return super().__call__(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
decoder_k_v_cache_op=DecoderKVCache()
[文档]class Reciprocal(Primitive):
r"""
Returns reciprocal of a tensor element-wise.
.. math::
out_{i} = \frac{1}{x_{i}}
Inputs:
- **x** (Tensor) - The input tensor.
Outputs:
Tensor, has the same shape as the `x`.
Raises:
TypeError: If `x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
>>> reciprocal = ops.Reciprocal()
>>> output = reciprocal(x)
>>> print(output)
[1. 0.5 0.25]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_reciprocal(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
reciprocal_op=Reciprocal()
class NonZeroExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_non_zero_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
non_zero_ext_op=NonZeroExt()
[文档]class GridSampler2D(Primitive):
r"""
This operation samples 2d `input_x` by using interpolation based on flow field grid,
which is usually gennerated by :func:`mindspore.ops.affine_grid`.
.. warning::
This is an experimental API that is subject to change or deletion.
Refer to :func:`mindspore.ops.grid_sample` for more details.
Args:
interpolation_mode (str, optional): An optional string specifying the interpolation method.
The optional values are
``"bilinear"`` or ``"nearest"`` . Default: ``"bilinear"`` .
- ``"nearest"``: Nearest neighbor interpolation. Each output pixel is assigned the value of the
nearest input pixel. This method is simple and fast but can result in blocky or pixelated outputs.
- ``"bilinear"``: Bilinear interpolation. Each output pixel is a weighted average of the four nearest input
pixels, computed using bilinear interpolation. This method produces smoother results compared
to nearest neighbor interpolation.
padding_mode (str, optional): An optional string specifying the pad method.
The optional values are ``"zeros"`` , ``"border"`` or ``"reflection"`` . Default: ``"zeros"`` .
When the sampling grid is outside input's bounds, effects of various padding modes are as follows:
- ``"zeros"``: Pads the input tensor with zeros.
- ``"border"``: Pads the input tensor with the values of the pixels on the border of the tensor.
- ``"reflection"``: Pads the input tensor by reflecting the values of the pixels at the
boundary of the tensor.
align_corners (bool, optional): An optional bool. When set to ``True`` ,
the centers of the corner pixels of the input
and output tensors are aligned. When set to ``False`` , it is not aligned. Default: ``False`` .
Inputs:
- **input_x** (Tensor) - A 4-D tensor with shape
:math:`(N, C, H_{in}, W_{in})`. Supported dtypes:
- Ascend: float16, float32.
- GPU/CPU: float16, float32, float64.
- **grid** (Tensor) - A 4-D tensor whose dtype is the same as `input_x` and whose shape is
:math:`(N, H_{out}, W_{out}, 2)`.
Used to specify the sampling pixel locations normalized by the input spatial
dimensions.
Outputs:
A 4-D Tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, C, H_{out}, W_{out})`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> gridsampler = ops.GridSampler2D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
>>> input_x = Tensor(np.arange(16).reshape((2, 2, 2, 2)).astype(np.float32))
>>> grid = Tensor(np.arange(-9, 9, 0.5).reshape((2, 3, 3, 2)).astype(np.float32))
>>> output = gridsampler(input_x, grid)
>>> print(output)
[[[[ 0. 0. 0. ]
[ 0. 0. 0. ]
[ 0. 0. 0.5 ]]
[[ 0. 0. 0. ]
[ 0. 0. 0. ]
[ 0. 1.5 4.5 ]]]
[[[10. 8.25 1.375]
[ 0. 0. 0. ]
[ 0. 0. 0. ]]
[[14. 11.25 1.875]
[ 0. 0. 0. ]
[ 0. 0. 0. ]]]]
"""
@prim_arg_register
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum)
self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum)
self._set_prim_arg("align_corners", align_corners)
def __call__(self, input_x, grid):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_grid_sampler_2d(self, [input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners)
return res
[文档]class Identity(Primitive):
r"""
.. code-block::
prim = ops.Identity()
out = prim(input_x)
is equivalent to
.. code-block::
ops.deepcopy(input_x)
Refer to :func:`mindspore.ops.deepcopy` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x):
return super().__call__(input_x)
identity_op=Identity()
class IDCT(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('type', default=2),
sig.make_sig('n', default=None),
sig.make_sig('axis', default=-1),
sig.make_sig('norm', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, type=2, n=None, axis=-1, norm=None):
return super().__call__(x, type, n, axis, norm if norm is None else str_to_enum('IDCT', 'norm', norm))
idct_op=IDCT()
class ExpandAs(Primitive):
r"""
.. code-block::
prim = ops.ExpandAs()
out = prim(input, other)
is equivalent to
.. code-block::
ops.expand_as(input, other)
Refer to :func:`mindspore.ops.expand_as` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_expand_as(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
expand_as_op=ExpandAs()
class InplaceSubScalar(Primitive):
r"""
.. code-block::
prim = ops.InplaceSubScalar()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.sub_scalar_(input, other, alpha)
Refer to :func:`mindspore.ops.sub_scalar_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_sub_scalar(self, [input, other, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, alpha)
return res
inplace_sub_scalar_op=InplaceSubScalar()
class RepeatInterleaveTensor(Primitive):
r"""
Repeat elements of a tensor along an axis, like :func:`mindspore.numpy.repeat`.
Args:
input (Tensor): The tensor to repeat values for. Must be of type: float16,
float32, int8, uint8, int16, int32, or int64.
repeats (Union[tuple, list, Tensor]): The number of times to repeat, must be positive.
dim (int, optional): The dim along which to repeat, Default: ``None``. if dims is None,
the input Tensor will be flattened and the output will alse be flattened.
output_size (int, optional): Total output size for the given axis (e.g. sum of repeats),
Default: ``None``.
Returns:
One tensor with values repeated along the specified dim. If input has shape
:math:`(s1, s2, ..., sn)` and dim is i, the output will have shape :math:`(s1, s2, ...,
si * repeats, ..., sn)`. The output type will be the same as the type of `input`.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('repeats'),
sig.make_sig('dim', default=None),
sig.make_sig('output_size', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, repeats, dim=None, output_size=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_repeat_interleave_tensor(self, [input, repeats, dim, output_size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, repeats, dim, output_size)
return res
repeat_interleave_tensor_op=RepeatInterleaveTensor()
class InplaceSubExt(Primitive):
r"""
.. code-block::
prim = ops.InplaceSubExt()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.sub_tensor_(input, other, alpha)
Refer to :func:`mindspore.ops.sub_tensor_` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_sub_ext(self, [input, other, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, alpha)
return res
inplace_sub_ext_op=InplaceSubExt()
class UpsampleNearest2D(Primitive):
r"""
Performs nearest neighbor upsampling operation.
This operator scale up the volumetric input with specified `output_size` or `scales` factors, using nearest
neighbor algorithm.
One of `output_size` or `scales` must be given, and can not specified both at the same time.
Inputs:
- **x** (Tensor) - 4D tensor of shape :math:`(N, C, H_{in}, W_{in})`.
Supporting types: [uint8, float16, float32, float64].
- **output_size** (Union[tuple[int], list[int]]): A tuple or list of int specifying the output volumetric size.
Default: ``None``.
- **scales** (Union[tuple[float], list[float]]): A tuple or list of float specifying the upsampling factors.
Default: ``None``.
Outputs:
- **y** (Tensor) - Upsampled output with the same type as `x` , whose shape is
:math:`(N, C, H_{out}, W_{out})`.
Raises:
TypeError: When `output_size` is not ``None`` and `output_size` is not list[int] or tuple[int].
TypeError: When `scales` is not ``None`` and `scales` is not list[float] or tuple[float].
TypeError: If dtype of `x` is not int [uint8, float16, float32, float64].
ValueError: If any value of `output_size` is negative or zero when `output_size` is not ``None``.
ValueError: If any value of `scales` is negative or zero when `scales` is not ``None``.
ValueError: If shape of `x` is not 4D.
ValueError: If none of `scales` and `output_size` is specified or both specified.
ValueError: If size of `scales` is not equal 2 when `scales` is specified.
ValueError: If size of `output_size` is not equal 2 when `output_size` is specified.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> from mindspore import dtype as mstype
>>> x = Tensor(np.arange(12).astype(np.float32).reshape(1, 2, 2, 3))
>>> output_size = [4, 4]
>>> net = ops.auto_generate.UpsampleNearest2D()
>>> output = net(x, output_size, None)
>>> print(output)
[[[[0., 0., 1., 2.],
[0., 0., 1., 2.],
[3., 3., 4., 5.],
[3., 3., 4., 5.]],
[[6., 6., 7., 8.],
[6., 6., 7., 8.],
[9., 9., 10., 10.],
[9., 9., 10., 10.]]]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, output_size=None, scales=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_nearest2d(self, [x, output_size, scales]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, output_size, scales)
return res
upsample_nearest2d_op=UpsampleNearest2D()
class BatchNormStats(Primitive):
r"""
.. code-block::
prim = ops.BatchNormStats()
out = prim(input, eps)
is equivalent to
.. code-block::
ops.batch_norm_stats(input, eps)
Refer to :func:`mindspore.ops.batch_norm_stats` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, eps):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_batch_norm_stats(self, [input, eps]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, eps)
return res
batch_norm_stats_op=BatchNormStats()
[文档]class RealDiv(Primitive):
r"""
Divides the first input tensor by the second input tensor in floating-point type element-wise.
Refer to :func:`mindspore.ops.div` for more details.
Inputs:
- **x** (Union[Tensor, Number, bool]) - The first input is a number or
a bool or a tensor whose data type is number or bool.
- **y** (Union[Tensor, Number, bool]) - The second input is a number or
a bool when the first input is a tensor or a tensor whose data type is number or bool.
Outputs:
Tensor, the shape is the same as the one after broadcasting,
and the data type is the one with higher precision or higher digits among the two inputs.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
>>> realdiv = ops.RealDiv()
>>> output = realdiv(x, y)
>>> print(output)
[0.25 0.4 0.5 ]
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, y):
return super().__call__(x, y)
real_div_op=RealDiv()
class TraceExt(Primitive):
r"""
.. code-block::
prim = ops.TraceExt()
out = prim(input)
is equivalent to
.. code-block::
ops.trace_ext(input)
Refer to :func:`mindspore.ops.trace_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_trace_ext(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
trace_ext_op=TraceExt()
class InplaceThreshold(Primitive):
r"""
.. code-block::
prim = ops.InplaceThreshold()
out = prim(input, threshold, value)
is equivalent to
.. code-block::
ops.inplace_threshold(input, threshold, value)
Refer to :func:`mindspore.ops.inplace_threshold` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('threshold'),
sig.make_sig('value'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, threshold, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_threshold(self, [input, threshold, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, threshold, value)
return res
inplace_threshold_op=InplaceThreshold()
class GridSampler2DGrad(Primitive):
r"""
Computes gradients for GridSampler2D operation.
Args:
- **grad** (Tensor) - A 4-D tensor whose dtype is float16 or float32 and whose shape is :math:`(N, C,
H_{out}, W_{out})`. The shape is inconsistent with the shape of the output result of forward calculation.
- **input_x** (Tensor) - A 4-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, C,
H_{in}, W_{in})`.
- **grid** (Tensor) - A 4-D tensor whose dtype is the same as `grad` and whose
shape is :math:`(N, H_{out}, W_{out}, 2)`.
interpolation_mode (str): An optional string specifying the interpolation method. The optional values are
"bilinear" or "nearest". Default: "bilinear".
padding_mode (str): An optional string specifying the pad method. The optional values are "zeros", "border" or
"reflection". Default: "zeros".
align_corners (bool): An optional bool. If "true", the centers of the corner pixels of the input and output
tensors are aligned. Defaults to "false".
Returns:
- **dx** (Tensor) - A 4-D tensor whose dtype and shape are the same as `input_x`.
- **dgrid** (Tensor) - A 4-D tensor whose dtype and shape are the same as `grid`.
Raises:
TypeError: If `grad`, `input_x` or `grid` is not a Tensor.
TypeError: If the dtypes of `grad`, `input_x` and `grid` are inconsistent.
TypeError: If the dtype of `grad`, `input_x` or `grid` is not a valid type.
TypeError: If `align_corners` is not a boolean value.
ValueError: If the rank of `grad`, `input_x` or `grid` is not equal to 4.
ValueError: If the first dimension of `grad`, `input_x` and `grid` are inconsistent.
ValueError: If the last dimension of `grid` is not equal to 2.
ValueError: If `interpolation_mode` is not "bilinear", "nearest" or a string value.
ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value.
ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation.
Supported Platforms:
``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False, output_mask=(1, 1)):
self._set_prim_arg_with_handler("interpolation_mode", interpolation_mode, str_to_enum)
self._set_prim_arg_with_handler("padding_mode", padding_mode, str_to_enum)
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("output_mask", output_mask)
def __call__(self, grad, input_x, grid):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_grid_sampler_2d_grad(self, [grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners, self.output_mask]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad, input_x, grid, self.interpolation_mode, self.padding_mode, self.align_corners, self.output_mask)
return res
class ConvTranspose2D(Primitive):
r"""
.. code-block::
prim = ops.ConvTranspose2D()
out = prim(input, weight, bias, stride, padding, output_padding, groups, dilation)
is equivalent to
.. code-block::
ops.conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation)
Refer to :func:`mindspore.ops.conv_transpose2d` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('stride', default=1),
sig.make_sig('padding', default=0),
sig.make_sig('output_padding', default=0),
sig.make_sig('groups', default=1),
sig.make_sig('dilation', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_conv_transpose2d(self, [input, weight, bias, stride, padding, output_padding, groups, dilation]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias, stride, padding, output_padding, groups, dilation)
return res
conv_transpose2d_op=ConvTranspose2D()
class Norm(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('p', default=2.0),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, p=2.0, dim=None, keepdim=False, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_norm(self, [input, p, dim, keepdim, dtype if dtype is None else dtype_to_type_id('Norm', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, p, dim, keepdim, dtype if dtype is None else dtype_to_type_id('Norm', 'dtype', dtype))
return res
norm_op=Norm()
[文档]class Conj(Primitive):
r"""
.. code-block::
prim = ops.Conj()
out = prim(input)
is equivalent to
.. code-block::
ops.conj(input)
Refer to :func:`mindspore.ops.conj` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
conj_op=Conj()
class Muls(Primitive):
r"""
.. code-block::
prim = ops.Muls()
out = prim(input, other)
is equivalent to
.. code-block::
ops.muls(input, other)
Refer to :func:`mindspore.ops.muls` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_muls(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
muls_op=Muls()
class Dot(Primitive):
r"""
.. code-block::
prim = ops.Dot()
out = prim(input, other)
is equivalent to
.. code-block::
ops.dot(input, other)
Refer to :func:`mindspore.ops.dot` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dot(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
dot_op=Dot()
[文档]class ReLU6(Primitive):
r"""
.. code-block::
prim = ops.ReLU6()
out = prim(x)
is equivalent to
.. code-block::
ops.relu6(x)
Refer to :func:`mindspore.ops.relu6` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
relu6_op=ReLU6()
class HSigmoidGrad(Primitive):
r"""
Gets the gradient of HSigmoid operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grads, input_x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_hsigmoid_grad(self, [grads, input_x]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grads, input_x)
return res
hsigmoid_grad_op=HSigmoidGrad()
class RemainderTensorTensor(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_remainder_tensor_tensor(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
remainder_tensor_tensor_op=RemainderTensorTensor()
class Correlate(Primitive):
r"""
.. code-block::
prim = ops.Correlate(mode)
out = prim(a, v)
is equivalent to
.. code-block::
ops.correlate(a, v, mode)
Refer to :func:`mindspore.ops.correlate` for more details.
"""
@prim_arg_register
def __init__(self, mode='valid'):
self._set_prim_arg_with_handler("mode", mode, str_to_enum)
def __call__(self, a, v):
return super().__call__(a, v, self.mode)
class PromptKVCache(Primitive):
r"""
.. code-block::
prim = ops.PromptKVCache(align_mode)
out = prim(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len)
is equivalent to
.. code-block::
ops.prompt_k_v_cache(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len, align_mode)
Refer to :func:`mindspore.ops.prompt_k_v_cache` for more details.
"""
@prim_arg_register
def __init__(self, align_mode='LEFT'):
self._set_prim_arg_with_handler("align_mode", align_mode, str_to_enum)
self.add_prim_attr("side_effect_mem", True)
def __call__(self, cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len):
return super().__call__(cache, update, valid_seq_len, batch_index, seq_len_axis, new_max_seq_len, cur_max_seq_len, self.align_mode)
class Copy(Primitive):
r"""
.. code-block::
prim = ops.Copy()
out = prim(input)
is equivalent to
.. code-block::
ops.copy(input)
Refer to :func:`mindspore.ops.copy` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_copy(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
copy_op=Copy()
class IndexFillTensor(Primitive):
r"""
.. code-block::
prim = ops.IndexFillTensor()
out = prim(input, dim, index, value)
is equivalent to
.. code-block::
ops.index_fill_tensor(input, dim, index, value)
Refer to :func:`mindspore.ops.index_fill_tensor` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, index, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_index_fill_tensor(self, [input, dim, index, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, value)
return res
index_fill_tensor_op=IndexFillTensor()
class InnerCommAllGather(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, rank_size, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inner_comm_all_gather(self, [input, rank_size, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, rank_size, group)
return res
inner_comm_all_gather_op=InnerCommAllGather()
class SumExt(Primitive):
r"""
.. code-block::
prim = ops.SumExt()
out = prim(input, dim, keepdim, dtype)
is equivalent to
.. code-block::
ops.sum_ext(input, dim, keepdim, dtype)
Refer to :func:`mindspore.ops.sum_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, keepdim=False, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_sum_ext(self, [input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('SumExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('SumExt', 'dtype', dtype))
return res
sum_ext_op=SumExt()
class ResizeBicubicGrad(Primitive):
r"""
Computes gradients for ResizeBicubicGrad operation.
Args:
grads (Tensor): A Tensor of type float. 4-D with shape [batch, height, width, channels]. The format must be NHWC.
image (Tensor): A Tensor. Must be one of the following types: float, double.
4-D with shape [batch, orig_height, orig_width, channels], The origin image tensor that was resized.
The format must be NHWC.
align_corners (bool): If true, the centers of the 4 corner pixels of the input and output tensors are
aligned, preserving the values at the corner pixels.Default: ``False``.
half_pixel_centers (bool): An optional bool. Default: ``False``.
Outputs:
A 4-D Tensor , with the same shape and data type as `image`.
Rasise:
TypeError: If `grads` is not allowed.
TypeError: If `image` is not allowed.
ValueError: If `image` dim is not 4.
ValueError: If `size` dim is not 4.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, grads, image):
return super().__call__(grads, image, self.align_corners, self.half_pixel_centers)
class NLLLossGrad(Primitive):
r"""
Computes the gradients of `NLLLoss`.
"""
@prim_arg_register
def __init__(self, reduction='mean', ignore_index=-100):
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
self._set_prim_arg("ignore_index", ignore_index)
def __call__(self, logits, loss_grad, labels, weight, total_weight):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_nllloss_grad(self, [logits, loss_grad, labels, weight, total_weight, self.reduction, self.ignore_index]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, logits, loss_grad, labels, weight, total_weight, self.reduction, self.ignore_index)
return res
class EmbeddingFeatureMappingTableSize(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingFeatureMappingTableSize()
out = prim(table_name)
is equivalent to
.. code-block::
ops.embedding_feature_mapping_table_size(table_name)
Refer to :func:`mindspore.ops.embedding_feature_mapping_table_size` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, table_name):
return super().__call__(table_name)
embedding_feature_mapping_table_size_op=EmbeddingFeatureMappingTableSize()
class InplaceClampScalar(Primitive):
r"""
.. code-block::
prim = ops.InplaceClampScalar()
out = prim(input, min, max)
is equivalent to
.. code-block::
ops.inplace_clamp_scalar(input, min, max)
Refer to :func:`mindspore.ops.inplace_clamp_scalar` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('min', default=None),
sig.make_sig('max', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, min=None, max=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_clamp_scalar(self, [input, min, max]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, min, max)
return res
inplace_clamp_scalar_op=InplaceClampScalar()
class HShrinkGrad(Primitive):
r"""
Computes gradients for HShrinkGrad operation.
Args:
Gradients (Tensor) - the gradients of loss to output of HShrink function.
Currently gradients data type only support float16 and float32.
Features (Tensor) - Must be the input `input_x` of the forward operator HSHrink.
Currently features data type only support float16 and float32.
lambd (float): the lambda value for the Hardshrink formulation. Default: 0.5
Returns:
backprops - Tensor, with the same shape and data type as `features`.
Rasise:
ValueError: If `lambd` is not a float.
ValueError: If shape of `gradients` is not the same as `features`.
TypeError: If dtype of `gradients` is not the same as `features`.
TypeError: If dtype of `gradients` or `features` is neither float16 nor float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
@prim_arg_register
def __init__(self, lambd=0.5):
self._set_prim_arg("lambd", type_it('HShrinkGrad', 'lambd', lambd, (OpDtype.DT_INT, OpDtype.DT_BOOL), OpDtype.DT_FLOAT))
def __call__(self, gradients, features):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_hshrink_grad(self, [gradients, features, self.lambd]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, gradients, features, self.lambd)
return res
class DistCommReduceScatterTensor(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, other, input, rank_size, op_type, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_reduce_scatter_tensor(self, [other, input, rank_size, op_type, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, other, input, rank_size, op_type, group)
return res
dist_comm_reduce_scatter_tensor_op=DistCommReduceScatterTensor()
class AdaptiveAvgPool3DGradExt(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_grad, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_adaptive_avg_pool3d_grad_ext(self, [input_grad, input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input_grad, input)
return res
adaptive_avg_pool3d_grad_ext_op=AdaptiveAvgPool3DGradExt()
class InplaceAddsExt(Primitive):
r"""
.. code-block::
prim = ops.InplaceAddsExt()
out = prim(input, other, alpha)
is equivalent to
.. code-block::
ops.inplace_adds_ext(input, other, alpha)
Refer to :func:`mindspore.ops.inplace_adds_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_adds_ext(self, [input, other, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other, alpha)
return res
inplace_adds_ext_op=InplaceAddsExt()
class LinalgVectorNorm(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('ord', default=2),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, ord=2, dim=None, keepdim=False, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_linalg_vector_norm(self, [x, ord, dim, keepdim, dtype if dtype is None else dtype_to_type_id('LinalgVectorNorm', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, ord, dim, keepdim, dtype if dtype is None else dtype_to_type_id('LinalgVectorNorm', 'dtype', dtype))
return res
linalg_vector_norm_op=LinalgVectorNorm()
[文档]class Lerp(Primitive):
r"""
.. code-block::
prim = ops.Lerp()
out = prim(input, end, weight)
is equivalent to
.. code-block::
ops.lerp(input, end, weight)
Refer to :func:`mindspore.ops.lerp` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, end, weight):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_lerp(self, [input, end, weight]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, end, weight)
return res
lerp_op=Lerp()
class Nansum(Primitive):
r"""
Computes sum of `input` over a given dimension, treating NaNs as zero.
.. warning::
It is only supported on Atlas A2 Training Series Products.
This is an experimental API that is subject to change or deletion.
Args:
input (Tensor): The input Tensor.
dim (Union[int, tuple(int)], optional): The dimensions to sum.
Dim must be in the range [-rank(input), rank(input)). Default: ``None``, which indicates the sum of all elements in a tensor.
keepdim (bool, optional): Whether the output Tensor keeps dimensions or not. Default: ``False``.
Keyword Args:
dtype (:class:`mindspore.dtype`, optional): The dtype of output Tensor. Default: ``None``.
Returns:
Tensor, the sum of input `input` in the given dimension dim, treating NaNs as zero.
- If dim is None, keepdim is False,
the output is a 0-D Tensor representing the sum of all elements in the input Tensor.
- If dim is int, set as 2, and keepdim is False,
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
- If dim is tuple(int) or list(int), set as (2, 3), and keepdim is False,
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
Raises:
TypeError: If `input` is not Tensor.
TypeError: If `keepdim` is not a bool.
TypeError: If the dtype of `input` or `dtype` is complex type.
ValueError: If `dim` not in [-rank(`input`), rank(`input`)).
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([[float("nan"), 2, 3], [1, 2, float("nan")]]), mindspore.float32)
>>> output1 = ops.nansum(x, dim=0, keepdim=False, dtype=mindspore.float32)
>>> output2 = ops.nansum(x, dim=0, keepdim=True, dtype=mindspore.float32)
>>> print(output1)
[1. 4. 3.]
>>> print(output2)
[[1. 4. 3.]]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, keepdim=False, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_nansum(self, [input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('Nansum', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim, dtype if dtype is None else dtype_to_type_id('Nansum', 'dtype', dtype))
return res
nansum_op=Nansum()
[文档]class FFTWithSize(Primitive):
r"""
Fourier transform, can be adjusted by parameters to achieve FFT/IFFT/RFFT/IRFFT.
For fft, it computes the following expression:
.. math::
X[\omega_1, \dots, \omega_d] =
\sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d]
e^{-j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}},
where :math:`d` = `signal_ndim` is number of dimensions for the
signal, and :math:`N_i` is the size of signal dimension :math:`i`.
For ifft, it computes the following expression:
.. math::
X[\omega_1, \dots, \omega_d] =
\frac{1}{\prod_{i=1}^d N_i} \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d]
e^{\ j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}},
where :math:`d` = `signal_ndim` is number of dimensions for the
signal, and :math:`N_i` is the size of signal dimension :math:`i`.
Note:
- FFT/IFFT requires complex64 or complex128 inputs, return complex64 or complex128 outputs.
- RFFT requires bool, uint8, int8, int16, int32, int64, float32 and float64 inputs,
return complex64 or complex128 outputs.
- IRFFT requires complex64 or complex128 inputs, return float32 or float64 outputs.
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
signal_ndim (int): The number of dimensions in each signal, this controls how many dimensions
of the fourier transform are realized, can only be 1, 2 or 3.
inverse (bool): Whether it is the inverse transformation, used to select from FFT and RFFT or IFFT and IRFFT.
- when set to ``True``: IFFT and IRFFT.
- when set to ``False``: FFT and RFFT.
real (bool): Whether it is the real transformation, combines with `inverse` to select a specific
transformation mode:
- `inverse` is ``False`` , `real` is ``False`` : corresponds to FFT.
- `inverse` is ``True`` , `real` is ``False`` : corresponds to IFFT.
- `inverse` is ``False`` , `real` is ``True`` : corresponds to RFFT.
- `inverse` is ``True`` , `real` is ``True`` : corresponds to IRFFT.
norm (str, optional): The normalization, optional values: [ ``"backward"`` , ``"forward"`` , ``"ortho"`` ].
Default value: ``"backward"`` .
- ``"backward"`` has the direct transforms unscaled and the inverse transforms scaled by :math:`1/n`,
where n is the input x's element numbers.
- ``"ortho"`` has both direct and inverse transforms are scaled by :math:`1/\sqrt n`.
- ``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse transforms unscaled.
onesided (bool, optional): Controls whether the input is halved to avoid redundancy. Default: ``True`` .
signal_sizes (tuple, optional): Size of the original signal (the signal before rfft, no batch dimension),
only in IRFFT mode and set `onesided` to ``True`` requires the parameter, the following conditions must be
satisfied. Default: ``()`` .
- The length of `signal_sizes` is equal to the signal_ndim of the IRFFT:
:math:`len(signal\_sizes)=signal\_ndim`.
- The last dimension of `signal_sizes` divided by 2 is equal to
the last dimension of the IRFFT input: :math:`signal\_size[-1]/2+1=x.shape[-1]`.
- `signal_sizes` has exactly the same dimensions as the input shape
except for the last dimension: :math:`signal\_sizes[:-1]=x.shape[:-1]`.
Inputs:
- **x** (Tensor) - The dimension of the input tensor must be greater than or equal to signal_ndim.
Outputs:
A tensor containing the complex-to-complex, real-to-complex or complex-to-real Fourier transform result.
Raises:
TypeError: If the input type of FFT/IFFT/IRFFT is not one of: complex64, complex128.
TypeError: If the input type is not Tensor.
ValueError: If `x` dimension is less than signal_ndim.
ValueError: If signal_ndim is greater than 3 or less than 1.
ValueError: If norm is none of "backward", "forward" or "ortho".
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> # case FFT: signal_ndim: 1, inverse: False, real: False.
>>> fft_in = Tensor(np.array([2, 1, 2]), mindspore.complex64)
>>> fft_net = ops.FFTWithSize(signal_ndim=1, inverse=False, real=False)
>>> fft_output = fft_net(fft_in)
>>> print(fft_output)
[5. +0.j 0.5 +0.86602545j 0.50000006-0.8660255j ]
>>> # case IFFT: signal_ndim: 1, inverse: True, real: False.
>>> ifft_in = fft_output
>>> ifft_net = ops.FFTWithSize(signal_ndim=1, inverse=True, real=False)
>>> ifft_output = ifft_net(ifft_in)
>>> print(ifft_output)
[2. -1.9868216e-08j 0.99999994+0.0000000e+00j
1.9999999 +7.9472862e-08j]
>>> # case RFFT2D: signal_ndim: 2, inverse: False, real: True.
>>> rfft_in = Tensor(np.array([[2, 1, 2], [3, 1, 6]]), mindspore.float32)
>>> rfft_net = ops.FFTWithSize(signal_ndim=2, inverse=False, real=True)
>>> rfft_output = rfft_net(rfft_in)
>>> print(rfft_output)
[[ 1.5000000e+01+1.1920929e-07j -2.3841858e-07+5.1961522e+00j]
[-5.0000000e+00-2.9802322e-08j 9.9999988e-01-3.4641016e+00j]]
>>> # case IRFFT2D: signal_ndim: 2, inverse: True, real: True.
>>> irfft_in = rfft_output
>>> irfft_net = ops.FFTWithSize(signal_ndim=2, inverse=True, real=True, signal_sizes=rfft_in.shape)
>>> irfft_output = irfft_net(irfft_in)
>>> print(irfft_output)
[[2. 1. 2. ]
[3. 0.99999994 5.9999995 ]]
"""
@prim_arg_register
def __init__(self, signal_ndim, inverse, real, norm='backward', onesided=True, signal_sizes=()):
self._set_prim_arg("signal_ndim", signal_ndim)
self._set_prim_arg("inverse", inverse)
self._set_prim_arg("real", real)
self._set_prim_arg_with_handler("norm", norm, str_to_enum)
self._set_prim_arg("onesided", onesided)
self._set_prim_arg("signal_sizes", signal_sizes)
def __call__(self, x):
return super().__call__(x, self.signal_ndim, self.inverse, self.real, self.norm, self.onesided, self.signal_sizes)
class InplaceFloorDivide(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('other'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_floor_divide(self, [input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, other)
return res
inplace_floor_divide_op=InplaceFloorDivide()
class UpsampleNearest3DGrad(Primitive):
r"""
Upsample the 3-D gradient data with the nearest neighbor interpolation algorithm.
Note:
Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, D, H, W], Must be one of the following types:
float16, float32, float64.
- **input_size** (tuple[int]): An required tuple[int], which contain 5 elements:
[min_batch, channels, depth, height, width].
Must: input_size[0] == dy.shape[0], input_size[1] == dy.shape[1].
- **output_size** (tuple[int]): An optional tuple[int]. Default: ``None``.
It contains 3 elements: depth, height, width, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0],
dy.shape[3] == output_size[1],
dy.shape[4] == output_size[2].
- **scales** (tuple[float]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width. Must:
dy.shape[2] == floor(input_size[2] * scales[0],
dy.shape[3] == floor(input_size[3] * scales[1],
dy.shape[4] == floor(input_size[4] * scales[2].
Outputs:
- **dx**- (Tensor) - A 5-D tensor. Has the same type as `dy`, shape depends on `input_size`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_nearest3d_grad(self, [dy, input_size, output_size, scales]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, input_size, output_size, scales)
return res
upsample_nearest3d_grad_op=UpsampleNearest3DGrad()
[文档]class BatchMatMul(Primitive):
r"""
Computes matrix multiplication between two tensors by batch.
.. math::
\text{output}[..., :, :] = \text{matrix}(x[..., :, :]) * \text{matrix}(y[..., :, :])
The rank of the two input tensors must be at least `2`, and the two input tensors must have the same rank
if the environment is GPU or CPU.
Args:
transpose_a (bool): If ``True`` , the last two dimensions of `x` is transposed before multiplication.
Default: ``False`` .
transpose_b (bool): If ``True`` , the last two dimensions of `y` is transposed before multiplication.
Default: ``False`` .
Inputs:
- **x** (Tensor) - The first tensor to be multiplied. The shape of the tensor is :math:`(*B, N, C)`,
where :math:`*B` represents the batch size which can be multidimensional, :math:`N` and :math:`C` are the
size of the last two dimensions. If `transpose_a` is ``True`` , its shape must be :math:`(*B, C, N)`.
- **y** (Tensor) - The second tensor to be multiplied. The shape of the tensor is :math:`(*B, C, M)`. If
`transpose_b` is ``True`` , its shape must be :math:`(*B, M, C)`.
Outputs:
Tensor, the shape of the output tensor is :math:`(*B, N, M)`.
Raises:
TypeError: If `transpose_a` or `transpose_b` is not a bool.
ValueError: If length of shape of `x` is not equal to length of shape of `y` or
length of shape of inputs is less than 2.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32)
>>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
>>> batmatmul = ops.BatchMatMul()
>>> output = batmatmul(x, y)
>>> print(output.shape)
(2, 4, 1, 4)
>>> x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
>>> y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
>>> batmatmul = ops.BatchMatMul(transpose_a=True)
>>> output = batmatmul(x, y)
>>> print(output.shape)
(2, 4, 1, 4)
"""
@prim_arg_register
def __init__(self, transpose_a=False, transpose_b=False):
self._set_prim_arg("transpose_a", transpose_a)
self._set_prim_arg("transpose_b", transpose_b)
def __call__(self, x, y):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_batch_mat_mul(self, [x, y, self.transpose_a, self.transpose_b]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, y, self.transpose_a, self.transpose_b)
return res
class EmbeddingApplyRmsprop(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingApplyRmsprop()
out = prim(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
is equivalent to
.. code-block::
ops.embedding_apply_rmsprop(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
Refer to :func:`mindspore.ops.embedding_apply_rmsprop` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('var_handle'),
sig.make_sig('lr'),
sig.make_sig('rho'),
sig.make_sig('momentum'),
sig.make_sig('epsilon'),
sig.make_sig('grad'),
sig.make_sig('keys'),
sig.make_sig('global_step'),
sig.make_sig('embedding_dim'),
sig.make_sig('mask_zero', default=(0,)),
sig.make_sig('padding_key', default=(0,)),
sig.make_sig('padding_key_mask', default=(1,)),
sig.make_sig('completion_key', default=(0,)),
sig.make_sig('completion_key_mask', default=(1,)),
sig.make_sig('_embedding_dim', default=1),
sig.make_sig('_max_key_num', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("_process_node_engine_id", 'PS')
def __call__(self, var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
return super().__call__(var_handle, lr, rho, momentum, epsilon, grad, keys, global_step, embedding_dim, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
embedding_apply_rmsprop_op=EmbeddingApplyRmsprop()
class LayerNormGradV3(Primitive):
r"""
Applies the layer Normalization to the input array.
This operator will calculate the input gradients of LayerNormV3.
Inputs:
x (Tensor): The inputs of layer norm operator.
dy (Tensor): The gradient of outputs of layer norm operator.
rstd (Tensor): The rstd of x.
mean (Tensor): The mean of x.
gamma (Tensor): The weights of normalized elements.
begin_norm_axis (int): The begin axis for the input to apply LayerNormV3. Default: 1.
begin_params_axis (int): The begin axis for the parameter input to apply LayerNormV3. Default: 1.
Outputs:
tuple[int], tuple of 3 values (the gradients of LayerNormV3 input, gamma, beta).
pd_x (Tensor): the gradients of LayerNormV3 input x.
pd_gamma (Tensor): the gradients of gamma.
pd_beta (Tensor): the gradients of beta.
"""
@prim_arg_register
def __init__(self, begin_norm_axis=1, begin_params_axis=1):
self._set_prim_arg("begin_norm_axis", begin_norm_axis)
self._set_prim_arg("begin_params_axis", begin_params_axis)
def __call__(self, x, dy, variance, mean, gamma):
return super().__call__(x, dy, variance, mean, gamma, self.begin_norm_axis, self.begin_params_axis)
class EluGrad(Primitive):
r"""
Gradients of Elu operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, dout, out):
return super().__call__(dout, out)
elu_grad_op=EluGrad()
class ResizeNearestNeighborGrad(Primitive):
r"""
Compute gradient of `ResizeNearestNeighbor` operator.
Note:
The shape of input parameter `size` must be (height, width).
Inputs:
- **align_corners** (bool) - Whether the centers of the 4 corner pixels of the input
and output tensors are aligned. Default: ``False``.
- **half_pixel_centers** (bool, optional) - Whether half pixel center. If set to ``True``,
`align_corners` should be False. Default: ``False``.
"""
@prim_arg_register
def __init__(self, align_corners=False, half_pixel_centers=False):
self._set_prim_arg("align_corners", align_corners)
self._set_prim_arg("half_pixel_centers", half_pixel_centers)
def __call__(self, grads, size):
return super().__call__(grads, size, self.align_corners, self.half_pixel_centers)
class AddN(Primitive):
r"""
.. code-block::
prim = ops.AddN()
out = prim(x)
is equivalent to
.. code-block::
ops.addn(x)
Refer to :func:`mindspore.ops.addn` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
addn_op=AddN()
[文档]class Elu(Primitive):
r"""
.. code-block::
prim = ops.Elu(alpha)
out = prim(input_x)
is equivalent to
.. code-block::
ops.elu(input_x, alpha)
Refer to :func:`mindspore.ops.elu` for more details.
"""
@prim_arg_register
def __init__(self, alpha=1.0):
self._set_prim_arg("alpha", alpha)
def __call__(self, input_x):
return super().__call__(input_x, self.alpha)
[文档]class TensorShape(Primitive):
r"""
Returns the shape of the input tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input_x = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
>>> output = ops.TensorShape()(input_x)
>>> print(output)
[3 2 1]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x):
return super().__call__(input_x)
tensor_shape_op=TensorShape()
class UpsampleBicubic2DGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_bicubic2d_grad(self, [dy, input_size, output_size, scales, align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, input_size, output_size, scales, align_corners)
return res
upsample_bicubic2d_grad_op=UpsampleBicubic2DGrad()
class OneHotExt(Primitive):
r"""
Computes a one-hot tensor.
The locations represented by tensor in `tensor` take value `1`, while all
other locations take value `0`.
Args:
- **tensor** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
Data type must be int32 or int64.
- **num_classes** (int) - A scalar defining the depth of the one-hot dimension.
Returns:
Tensor, one-hot tensor.
Raises:
TypeError: If `num_classes` is not an int.
TypeError: If dtype of `tensor` is not int32 or int64.
ValueError: If `num_classes` is less than 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> import mindspore.ops as ops
>>> from mindspore import Tensor
>>> tensor = Tensor(np.array([0, 1, 2]), mindspore.int32)
>>> num_classes = 3
>>> output = ops.extend.one_hot(tensor, num_classes)
>>> print(output)
[[1 0 0]
[0 1 0]
[0 0 1]]
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", axis)
def __call__(self, tensor, num_classes, on_value, off_value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_one_hot_ext(self, [tensor, num_classes, on_value, off_value, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, tensor, num_classes, on_value, off_value, self.axis)
return res
class UpsampleBilinear2DGrad(Primitive):
r"""
Upsample the 2-D gradient data with bilinear interpolation algorithm.
Note:
One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
Inputs:
- **dy** (Tensor) - Tensor of shape [N, C, H, W]. Must be one of the following types:
float16, float32, float64.
- **input_size** (Union[tuple[int], list[int]]): An required tuple[int] which contains 4 elements:
[batch, channels, height, width]. Must:
input_size[0] == dy.shape[0]
input_size[1] == dy.shape[1].
- **output_size** (Union[tuple[int], list[int]]): An optional tuple[int]. Default: ``None``.
It contains 2 elements: height, width, whose elements should be the same as `dy`. Must:
dy.shape[2] == output_size[0],
dy.shape[3] == output_size[1].
- **scales** (Union[tuple[float], list[float]]): An optional tuple[float]. Default: ``None``.
The scale array along each dimension, contain 2 elements: scale_height, scale_width. Must:
dy.shape[2] == floor(input_size[2] * scales[0],
dy.shape[3] == floor(input_size[3] * scales[1].
- **align_corners** (bool): An optional bool. Default: ``False``.
Outputs:
- **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('input_size'),
sig.make_sig('output_size', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('align_corners', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, input_size, output_size=None, scales=None, align_corners=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_upsample_bilinear2d_grad(self, [dy, input_size, output_size, scales, align_corners]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, input_size, output_size, scales, align_corners)
return res
upsample_bilinear2d_grad_op=UpsampleBilinear2DGrad()
[文档]class Dense(Primitive):
r"""
.. code-block::
prim = ops.Dense()
out = prim(input, weight, bias)
is equivalent to
.. code-block::
ops.dense(input, weight, bias)
Refer to :func:`mindspore.ops.dense` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, weight, bias=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dense(self, [input, weight, bias]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, weight, bias)
return res
dense_op=Dense()
class ZerosLikeExt(Primitive):
r"""
Returns a Tensor with a value of 0 and its shape and data type is the same as the input.
Refer to :func:`mindspore.ops.zeros_like` for more details.
Args:
- **input_x** (Tensor) - Tensor of any dimension.
Returns:
Return a tensor filled with the value 0, with the same size as input.
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dtype', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_zeros_like_ext(self, [input, dtype if dtype is None else dtype_to_type_id('ZerosLikeExt', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dtype if dtype is None else dtype_to_type_id('ZerosLikeExt', 'dtype', dtype))
return res
zeros_like_ext_op=ZerosLikeExt()
class ConstantPadND(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('padding'),
sig.make_sig('value', default=0.0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, padding, value=0.0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_constant_pad_nd(self, [input, padding, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, padding, value)
return res
constant_pad_nd_op=ConstantPadND()
class Arange(Primitive):
r"""
Creates a sequence of numbers that begins at `start` and extends by increments of
`step` up to but not including `end`.
Inputs:
start (number): The first number in the sequence.
Must have type: int32 ,int64, float32, or float64.
end (number): Upper end of the sequence, exclusive.
Must have type: int32 ,int64, float32, or float64.
step (number): Number that increments `start`.
Must have type: int32 ,int64, float32, or float64.
dtype (mindspore.dtype, optional): Specified dtype of the result tensor. Default: ``None`` .
Supported values are: int32, int64, float32, float64, and bfloat16.
Outputs:
A 1-D Tensor with the required dtype. When dtype is ``None``, then:
If `start`, `end` and `step` are all integers, the Tensor dtype is int64.
If at least one of `start`, `end` and `step` is floating-point numbers, the Tensor dtype is float32.
Raises:
TypeError: If the datatype of `start`, `end` or `step` is not supported.
ValueError: If `step` = 0.
ValueError: If `start` >= `end` when `step` > 0.
ValueError: If `start` <= `end` when `step` < 0.
Supported Platforms:
`Ascend`
Examples:
>>> from mindspore import ops
>>> start = 0
>>> end = 10
>>> step = 4
>>> net = ops.Arange()
>>> output = net(start, end, step)
>>> print(output)
[0 4 8]
"""
__mindspore_signature__ = (
sig.make_sig('start', dtype=sig.sig_dtype.T),
sig.make_sig('end', dtype=sig.sig_dtype.T),
sig.make_sig('step', dtype=sig.sig_dtype.T),
sig.make_sig('dtype', dtype=sig.sig_dtype.T1, default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, start, end, step, dtype=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_arange(self, [start, end, step, dtype if dtype is None else dtype_to_type_id('Arange', 'dtype', dtype)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, start, end, step, dtype if dtype is None else dtype_to_type_id('Arange', 'dtype', dtype))
return res
arange_op=Arange()
[文档]class ACos(Primitive):
r"""
.. code-block::
prim = ops.ACos()
out = prim(input)
is equivalent to
.. code-block::
ops.acos(input)
Refer to :func:`mindspore.ops.acos` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
return super().__call__(input)
acos_op=ACos()
class GmmV2Backward(Primitive):
r"""
.. code-block::
prim = ops.GmmV2Backward()
out = prim(grad, x, weight, group_list, group_list_type)
is equivalent to
.. code-block::
ops.gmm_v2_backward(grad, x, weight, group_list, group_list_type)
Refer to :func:`mindspore.ops.gmm_v2_backward` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('grad'),
sig.make_sig('x'),
sig.make_sig('weight'),
sig.make_sig('group_list', default=None),
sig.make_sig('group_list_type', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad, x, weight, group_list=None, group_list_type=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_gmm_v2_backward(self, [grad, x, weight, group_list, group_list_type]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad, x, weight, group_list, group_list_type)
return res
gmm_v2_backward_op=GmmV2Backward()
[文档]class GLU(Primitive):
r"""
Computes GLU (Gated Linear Unit activation function) of the input tensor.
.. math::
{GLU}(a, b)= a \otimes \sigma(b)
where :math:`a` is the first half of the `x` Tensor after `x` is split and :math:`b` is the second half.
Here :math:`\sigma` is the sigmoid function, and :math:`\otimes` is the Hadamard product.
See `Language Modeling with Gated Convluational Networks <https://arxiv.org/abs/1612.08083>`_ .
.. warning::
This is an experimental API that is subject to change or deletion.
Args:
axis (int, optional): Axis to split the input `x`. The value range is `[-r, r)` where `r` is
the number of dimensions of `x`. Default: ``-1`` , the last dimension in `x`.
Inputs:
- **x** (Tensor) - Tensor to be calculated. Dtype is floating point and the shape
is :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional
dimensions. :math:`N` is required to be an even number, where :math:`N` is the
size of `x` on the dimension selected by `axis`.
Outputs:
Tensor, the same dtype as `x`, with the shape :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2`.
Raises:
TypeError: If `x` is not a Tensor or `axis` is not an int.
IndexError: If the value of `axis` is out of the range of `[-r, r)`, where `r` is the number
of dimensions of `x`.
RuntimeError: If dtype of `x` is not supported.
RuntimeError: If the length of `x` in the dimension selected by `axis` is not even.
Supported Platforms:
``Ascend`` ``CPU``
Examples:
>>> from mindspore import ops, Tensor
>>> from mindspore import dtype as mstype
>>> import numpy as np
>>> axis = 0
>>> x = Tensor(np.array([0.3220, 0.9545, 0.7879, 0.0975, 0.3698,
... 0.5135, 0.5740, 0.3435, 0.1895, 0.8764,
... 0.4980, 0.9673, 0.9879, 0.6988, 0.9022,
... 0.9304, 0.1558, 0.0153, 0.1559, 0.9852]).reshape([2, 2, 5]), mstype.float32)
>>> glu = ops.GLU(axis=axis)
>>> y = glu(x)
>>> print(y)
[[[0.20028052 0.6916126 0.57412136 0.06512236 0.26307625]
[0.3682598 0.3093122 0.17306386 0.10212085 0.63814086]]]
"""
@prim_arg_register
def __init__(self, axis=-1):
self._set_prim_arg("axis", axis)
def __call__(self, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_glu(self, [x, self.axis]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, self.axis)
return res
class TransposeExt(Primitive):
r"""
.. code-block::
prim = ops.TransposeExt()
out = prim(input, dim0, dim1)
is equivalent to
.. code-block::
ops.transpose_ext(input, dim0, dim1)
Refer to :func:`mindspore.ops.transpose_ext` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim0, dim1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_transpose_ext(self, [input, dim0, dim1]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim0, dim1)
return res
transpose_ext_op=TransposeExt()
class MaxPoolWithMask(Primitive):
r"""
Performs max pooling on the input Tensor and returns both max values and mask.
Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs
regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size
:math:`(h_{ker}, w_{ker})` and stride :math:`(s_0, s_1)`, the operation is as follows:
.. math::
\text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
\text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
.. warning::
This is an experimental API that is subject to change or deletion. Only support on Atlas training series.
Args:
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and argmax
value, is an int number that represents height and width of the kernel, or a tuple of
two int numbers that represent height and width respectively.
strides (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents
not only the height of movement but also the width of movement, or a tuple of two int numbers that
represent height and width of movement respectively. Default: ``1``.
pads (Union[int, tuple[int]], optional): An int number that represents the depth,
height and width of movement are both strides, or a tuple of two int numbers that represent
depth, height and width of movement respectively.
Default: 0.
dilation (Union[int, tuple[int]], optional): Control the stride of elements in the kernel.
Default: ``(1, 1)`` .
ceil_mode (bool, optional): Whether to use ceil instead of floor to calculate output shape.
Default: ``False`` .
argmax_type (mindspore.dtype, optional) : The dtype for argmax.
Default: ``mstype.int64`` . [Disabled in Ascend.]
Inputs:
- **x** (Tensor) - Tensor of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})` with data type of float16
and float32 in Ascend.
Outputs:
Tuple of 2 Tensors, representing the maxpool result and mask are generated.
- **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`.
It has the same data type as `x`.
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{pads[0]} - \text{dilation[0]}
\times (\text{kernel_size[0]} - 1) - 1}{\text{strides[0]}} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{pads[1]} - \text{dilation[1]}
\times (\text{kernel_size[1]} - 1) - 1}{\text{strides[1]}} + 1\right\rfloor
- **mask** (Tensor) - Maxpooling mask. Data type is int8 in Ascend.
Raises:
TypeError: If `x` is not a Tensor.
ValueError: If length of shape of `x` is not equal to 4.
TypeError: If `kernel_size` , `strides` , `pads` or `dilation` is not int or tuple.
ValueError: If `kernel_size`, `strides` or `dilation` is less than 1.
ValueError: If `pads` is less than 0.
ValueError: If `pads` is more than half of `kernel_size`.
TypeError: If `ceil_mode` is not bool.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self, kernel_size, strides=None, pads=0, dilation=(1, 1), ceil_mode=False, argmax_type=mstype.int64):
self._set_prim_arg_with_handler("kernel_size", kernel_size, to_kernel_size)
self._set_prim_arg_with_handler("strides", strides, to_strides)
self._set_prim_arg_with_handler("pads", pads, to_output_padding)
self._set_prim_arg_with_handler("dilation", dilation, to_dilations)
self._set_prim_arg("ceil_mode", ceil_mode)
self._set_prim_arg_with_handler("argmax_type", argmax_type, dtype_to_type_id)
def __call__(self, x):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_max_pool_with_mask(self, [x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, self.kernel_size, self.strides, self.pads, self.dilation, self.ceil_mode, self.argmax_type)
return res
class EmbeddingApplyAdamW(Primitive):
r"""
.. code-block::
prim = ops.EmbeddingApplyAdamW()
out = prim(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
is equivalent to
.. code-block::
ops.embedding_apply_adam_w(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
Refer to :func:`mindspore.ops.embedding_apply_adam_w` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('var_handle'),
sig.make_sig('beta1_power'),
sig.make_sig('beta2_power'),
sig.make_sig('lr'),
sig.make_sig('weight_decay'),
sig.make_sig('beta1'),
sig.make_sig('beta2'),
sig.make_sig('epsilon'),
sig.make_sig('grad'),
sig.make_sig('keys'),
sig.make_sig('max_grad_norm'),
sig.make_sig('global_step'),
sig.make_sig('embedding_dim'),
sig.make_sig('ams_grad', default=(0,)),
sig.make_sig('mask_zero', default=(0,)),
sig.make_sig('padding_key', default=(0,)),
sig.make_sig('padding_key_mask', default=(1,)),
sig.make_sig('completion_key', default=(0,)),
sig.make_sig('completion_key_mask', default=(1,)),
sig.make_sig('_embedding_dim', default=1),
sig.make_sig('_max_key_num', default=1),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("_process_node_engine_id", 'PS')
def __call__(self, var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad=(0,), mask_zero=(0,), padding_key=(0,), padding_key_mask=(1,), completion_key=(0,), completion_key_mask=(1,), _embedding_dim=1, _max_key_num=1):
return super().__call__(var_handle, beta1_power, beta2_power, lr, weight_decay, beta1, beta2, epsilon, grad, keys, max_grad_norm, global_step, embedding_dim, ams_grad, mask_zero, padding_key, padding_key_mask, completion_key, completion_key_mask, _embedding_dim, _max_key_num)
embedding_apply_adam_w_op=EmbeddingApplyAdamW()
class Clone(Primitive):
r"""
.. code-block::
prim = ops.Clone()
out = prim(input)
is equivalent to
.. code-block::
ops.clone(input)
Refer to :func:`mindspore.ops.clone` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_clone(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
clone_op=Clone()
class MinDim(Primitive):
r"""
Calculates the minimum value along with the given dim for the input tensor, and returns the minimum values and
indices.
Args:
input (Tensor) - The input tensor, can be any dimension. Set the shape of input tensor as
:math:`(input_1, input_2, ..., input_N)` , Complex tensor is not supported.
dim (int): The dimension to reduce.
keepdim (bool): Whether to reduce dimension, if ``True`` the output will keep the same dimension as the
input, the output will reduce dimension if ``false``. Default: ``False``.
Returns:
tuple (Tensor), tuple of 2 tensors, containing the minimum value of the self tensor along the given
dimension `dim` and the corresponding index.
- **values** (Tensor) - The minimum value of input tensor, with the same shape as `index`, and same dtype as `input`.
- **index** (Tensor) - The index for the minimum value of the input tensor, with dtype int64. If `keepdim`
is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ..., input_{dim-1}, 1, input_{dim+1}, ..., input_N)`.
Otherwise, the shape is :math:`(input_1, input_2, ..., input_{dim-1}, input_{dim+1}, ..., input_N)` .
Raises:
TypeError: If `input` is not Tensor.
TypeError: If `keepdim` is not a bool.
TypeError: If `dim` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
>>> output, index = ops.auto_generate.MinDim()(x, 0, False)
>>> print(output, index)
0.0 0
>>> output, index = ops.auto_generate.MinDim()(x, 0, True)
>>> print(output)
[0.0] [0]
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim'),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_min_dim(self, [input, dim, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim)
return res
min_dim_op=MinDim()
[文档]class Select(Primitive):
r"""
.. code-block::
prim = ops.Select()
out = prim(condition, input, other)
is equivalent to
.. code-block::
ops.select(condition, input, other)
Refer to :func:`mindspore.ops.select` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('condition', dtype=sig.sig_dtype.T),
sig.make_sig('input', dtype=sig.sig_dtype.T1),
sig.make_sig('other', dtype=sig.sig_dtype.T1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, condition, input, other):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_select(self, [condition, input, other]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, condition, input, other)
return res
select_op=Select()
class EmptyLike(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dtype', default=None),
sig.make_sig('device', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dtype=None, device=None):
return super().__call__(input, dtype, device)
empty_like_op=EmptyLike()
[文档]class FastGeLU(Primitive):
r"""
.. code-block::
prim = ops.FastGeLU()
out = prim(x)
is equivalent to
.. code-block::
ops.fast_gelu(x)
Refer to :func:`mindspore.ops.fast_gelu` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x):
return super().__call__(x)
fast_gelu_op=FastGeLU()
class InplaceScatterSrcReduce(Primitive):
r"""
InplaceScatterSrcReduce is for scatter_ when using a Tensor as the source element with reduce.
For details, please refer to :func:`mindspore.Tensor.scatter_`.
Examples:
>>> from mindspore import Tensor, int64, float32
>>> this_tensor = Tensor([[1, 2], [3, 4]], dtype=float32)
>>> index = Tensor([[1, 0], [1, 0]], dtype=int64)
>>> src = Tensor([[4, 3], [2, 1]], dtype=float32)
>>> this_tensor.scatter_(1, index, src, reduce='add')
>>> print(this_tensor)
[[4., 6.],
[4., 6.]]
Supported Platforms:
``Ascend``
"""
__mindspore_signature__ = (
sig.make_sig('input', sig.sig_rw.RW_WRITE),
sig.make_sig('dim'),
sig.make_sig('index'),
sig.make_sig('src'),
sig.make_sig('reduce'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, input, dim, index, src, reduce):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_inplace_scatter_src_reduce(self, [input, dim, index, src, str_to_enum('InplaceScatterSrcReduce', 'reduce', reduce)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, src, str_to_enum('InplaceScatterSrcReduce', 'reduce', reduce))
return res
inplace_scatter_src_reduce_op=InplaceScatterSrcReduce()
[文档]class GeLU(Primitive):
r"""
Gaussian Error Linear Units activation function.
GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
<https://arxiv.org/abs/1810.04805>`_.
GeLU is defined as follows:
.. math::
GELU(x_i) = x_i*P(X < x_i)
where :math:`P` is the cumulative distribution function of the standard Gaussian distribution,
:math:`x_i` is the input element.
Note:
When calculating the input gradient of GELU with an input value of infinity, there are differences
in the output of the backward between 'Ascend' and 'GPU'.
when x is -inf, the computation result of 'Ascend' is 0, and the computation result of 'GPU' is Nan.
when x is inf, the computation result of 'Ascend' is dy, and the computation result of 'GPU' is Nan.
In mathematical terms, Ascend's result has higher precision.
Inputs:
- **x** (Tensor) - The input of the activation function GeLU, the data type is float16, float32 or float64.
Outputs:
Tensor, with the same type and shape as `x`.
Raises:
TypeError: If `x` is not a Tensor.
TypeError: If dtype of `x` is not float16, float32 or float64.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
>>> result = ops.GeLU()(x)
>>> print(result)
[0.841192 1.9545976 2.9963627]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_gelu(self, [input]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input)
return res
gelu_op=GeLU()
class Outer(Primitive):
r"""
.. code-block::
prim = ops.Outer()
out = prim(input, vec2)
is equivalent to
.. code-block::
ops.outer_ext(input, vec2)
Refer to :func:`mindspore.ops.outer_ext` for more details.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, vec2):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_outer(self, [input, vec2]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, vec2)
return res
outer_op=Outer()
class AbsGrad(Primitive):
r"""
Computes gradients for abs operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, dout):
return super().__call__(x, dout)
abs_grad_op=AbsGrad()
class ReplicationPad2DGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, grad_output, input, padding):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_replication_pad_2d_grad(self, [grad_output, input, padding]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, grad_output, input, padding)
return res
replication_pad_2d_grad_op=ReplicationPad2DGrad()
class ScatterAddExt(Primitive):
r"""
Add all elements in `src` to the index specified by `index` to `input` along dimension specified by `dim`.
It takes three inputs `input`, `src` and `index` of the same rank r >= 1.
For a 3-D tensor, the operation updates input as follows:
.. code-block::
input[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
input[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
input[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
Inputs:
- **input** (Tensor) - The target tensor. The rank must be at least 1.
- **dim** (int) - Which dim to scatter. Accepted range is [-r, r) where r = rank(`input`).
- **index** (Tensor) - The index of `input` to do scatter operation whose data type must be mindspore.int32 or
mindspore.int64. Same rank as `input`. Except for the dimension specified by `dim`,
the size of each dimension of `index` must be less than or equal to the size of
the corresponding dimension of `input`.
- **src** (Tensor) - The tensor doing the scatter operation with `input`, has the same type as `input` and
the size of each dimension must be greater than or equal to that of `index`.
Outputs:
Tensor, has the same shape and type as `input`.
Raises:
TypeError: If `index` is neither int32 nor int64.
ValueError: If anyone of the rank among `input`, `index` and `src` less than 1.
ValueError: If the rank of `input`, `index` and `src` is not the same.
ValueError: If, outside dimension `dim`, the size of any dimension of `index` is greater than the size of
the corresponding dimension of `input` .
ValueError: If the size of any dimension of `src` is less than that of `index`.
Supported Platforms:
``Ascend``
Examples:
>>> import numpy as np
>>> import mindspore as ms
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.array([[1, 2, 3, 4, 5]]), dtype=ms.float32)
>>> src = Tensor(np.array([[8, 8]]), dtype=ms.float32)
>>> index = Tensor(np.array([[2, 4]]), dtype=ms.int64)
>>> out = ops.auto_generate.ScatterAddExt()(input=input, dim=1, index=index, src=src)
>>> print(out)
[[1. 2. 11. 4. 13.]]
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
>>> index = Tensor(np.array([[0, 0, 0], [2, 2, 2], [4, 4, 4]]), dtype=ms.int64)
>>> out = ops.auto_generate.ScatterAddExt()(input=input, dim=0, index=index, src=src)
>>> print(out)
[[1. 2. 3. 0. 0.]
[0. 0. 0. 0. 0.]
[4. 5. 6. 0. 0.]
[0. 0. 0. 0. 0.]
[7. 8. 9. 0. 0.]]
>>> input = Tensor(np.zeros((5, 5)), dtype=ms.float32)
>>> src = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=ms.float32)
>>> index = Tensor(np.array([[0, 2, 4], [0, 2, 4], [0, 2, 4]]), dtype=ms.int64)
>>> out = ops.auto_generate.ScatterAddExt()(input=input, dim=1, index=index, src=src)
>>> print(out)
[[1. 0. 2. 0. 3.]
[4. 0. 5. 0. 6.]
[7. 0. 8. 0. 9.]
[0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0.]]
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim, index, src):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_scatter_add_ext(self, [input, dim, index, src]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, index, src)
return res
scatter_add_ext_op=ScatterAddExt()
[文档]class MaskedFill(Primitive):
r"""
.. code-block::
prim = ops.MaskedFill()
out = prim(input_x, mask, value)
is equivalent to
.. code-block::
ops.masked_fill(input_x, mask, value)
Refer to :func:`mindspore.ops.masked_fill` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input_x, mask, value):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_masked_fill(self, [input_x, mask, value]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input_x, mask, value)
return res
masked_fill_op=MaskedFill()
class RotaryPositionEmbeddingGrad(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('dy'),
sig.make_sig('cos'),
sig.make_sig('sin'),
sig.make_sig('dx', default=None),
sig.make_sig('mode', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, dy, cos, sin, dx=None, mode=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_rotary_position_embedding_grad(self, [dy, cos, sin, dx, mode]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, dy, cos, sin, dx, mode)
return res
rotary_position_embedding_grad_op=RotaryPositionEmbeddingGrad()
class SoftMarginLossGrad(Primitive):
r"""
"""
@prim_arg_register
def __init__(self, reduction='mean'):
self._set_prim_arg_with_handler("reduction", reduction, str_to_enum)
def __call__(self, predict, label, dout):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_soft_margin_loss_grad(self, [predict, label, dout, self.reduction]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, predict, label, dout, self.reduction)
return res
class PromptFlashAttention(Primitive):
r"""
The interface for fully inference.
B -- Batch size
N -- Num heads
S -- Sequence length
D -- Head dim
H -- Hidden size
Self attention constructs an attention model based on the relationship between input samples themselves. The
principle is to assume that there is a length of the input sample sequence :math:`x` of :math:`n`, and each
element of :math:`x` is a :math:`d` dimensional vector, which can be viewed as a token embedding. This sequence
can be transformed through 3 weight matrices to obtain 3 matrices with dimensions of :math:`n\times d`. The self
attention calculation formula is defined as:
.. math::
Attention(Q,K,V)=Softmax(\frac{QK^{T} }{\sqrt{d} } )V
where the product of :math:`Q` and :math:`K^{T}` represents the attention of input :math:`x`. To avoid the value
becoming too large, it is usually scaled by dividing it by the square root of :math:`d` and perform softmax
normalization on each row, yields a matrix of :math:`n\times d` after multiplying :math:`V`.
.. warning::
- This is an experimental API that is subject to change or deletion.
- `attn_mask` of type float16 will be deprecated in the future.
Note:
- Maximum Support for each axis
- Supports B-axis values less than or equal to 65536 (64k).
When the input type includes int8 with D-axis not aligned to 32, or the input type is
float16 or bfloat16 with D-axis not aligned to 16, the B-axis supports up to 128 only.
- Supports N-axis values less than or equal to 256.
- Supports S-axis values less than or equal to 20971520 (20M).
- Supports D-axis values less than or equal to 512.
- Quantization
- int8 Input, int8 Output: Parameters `deq_scale1`, `quant_scale1`, `deq_scale2`, and `quant_scale2`
must all be provided. `quant_offset2` is optional (default is 0 if not provided).
- int8 Input, float16 Output: Parameters `deq_scale1`, `quant_scale1`, and `deq_scale2` must all be provided.
If `quant_offset2` or `quant_scale2` is provided (i.e., not null), it will result in an error.
- float16 or bfloat16 Input, int8 Output: Parameter `quant_scale2` must be provided. `quant_offset2` is
optional (default is 0 if not provided). If `deq_scale1`, `quant_scale1`, or `deq_scale2` is
provided (i.e., not null), it will result in an error.
- int8 Output: `quant_scale2` and `quant_offset2` in per-channel format do not support scenarios with
left padding, Ring Attention, or non-32-byte aligned D-axis.
Does not support sparse as band and `pre_tokens`/`next_tokens` being negative.
- Other Usage Caveats:
- :math:`N` of parameter `query` must be equal to `num_heads`. :math:`N` of parameter `key` and parameter
`value` must be equal to `num_key_value_heads`.
- `num_heads` must be divisible by `num_key_value_heads`.
- When `query` dtype is bfloat16, D axis should align with 16.
Inputs:
query (Tensor): The query tensor with data type of int8, float16 or bfloat16.
The shape is :math:`(B, q_S, q_H)` / `(B, q_N, q_S, q_D)`.
key (Tensor): The key tensor with the same dtype as `query`.
The shape is :math:`(B, kv_S, kv_H)` / `(B, kv_N, kv_S, kv_D)`.
value (Tensor): The value tensor with the same dtype as `query`.
The shape is :math:`(B, kv_S, kv_H)` / `(B, kv_N, kv_S, kv_D)`.
attn_mask (Tensor, optional) - The attention mask tensor with data type of bool, int8, uint8 or float16.
For each element, 0/False indicates retention and 1/True indicates discard.
The shape is :math:`(q_S, kv_S)` / :math:`(B, q_S, kv_S)` / :math:`(1, q_S, kv_S)` /
:math:`(B, 1, q_S, kv_S)` / :math:`(1, 1, q_S, kv_S)`.
Default: ``None``.
actual_seq_lengths (Union[Tensor, tuple[int], list[int]], optional): Describe actual sequence length of each
batch of `query` with data type of int64. The shape is :math:`(B, )`.
Default: ``None``.
actual_seq_lengths_kv (Union[Tensor, tuple[int], list[int]], optional): Describe actual sequence length of each
batch of `key` or `value` with data type of int64. The shape is :math:`(B, )`.
Default: ``None``.
pse_shift (Tensor, optional): The position encoding tensor with data type of float16 or bfloat16.
Input tensor of shape :math:`(B, N, q_S, kv_S)` / :math:`(1, N, q_S, kv_S)`.
Default: ``None``.
- q_S must be greater than or equal to the query's S length, and kv_S must be greater than or
equal to the key's S length.'
- If `pse_shift` has dtype float16, `query` should have dtype float16 or int8, in which case high
precision mode is enabled automatically.
- If `pse_shift` has dtype bfloat16, `query` should have dtype bfloat16.
deq_scale1 (Tensor, optional): Quantitative parametor, the tensor with data type of uint64 or float32.
Input Tensor of shape :math:`(1,)`.
Default: ``None``.
quant_scale1 (Tensor, optional): Quantitative parametor, the tensor with data type of float32.
Input Tensor of shape :math:`(1,)`.
Default: ``None``.
deq_scale2 (Tensor, optional): Quantitative parametor, the tensor with data type of uint64 or float32.
Input Tensor of shape :math:`(1,)`.
Default: ``None``.
quant_scale2 (Tensor, optional): Quantitative parametor, the tensor with data type of float32.
The suggested shape is :math:`(1,)` / :math:`(1, 1, H)` / :math:`(H, )` when output layout is BSH,
:math:`(1,)` / :math:`(1, N, 1, D)` / :math:`(N, D) when layout is BNSD.
Default: ``None``.
quant_offset2 (Tensor, optional): Quantitative parametor, the tensor with data type of float32.
It has the same dtype and shape as `quant_scale2`.
Default: ``None``.
num_heads (int, optional): The number of heads.
Default: ``1``.
scale_value (double, optional): The scale value indicating the scale coefficient, which is used as the scalar of
Muls in the calculation.
Default: ``1.0``.
pre_tokens (int, optional): For sparse cumputing, indicating the number of previous tokens Attention needs to
associated with.
Default: 2147483647.
next_tokens (int, optional): For sparse cumputing, indicating the number of next tokens Attention needs to
associated with.
Default: 0.
input_layout (str, optional): the data layout of the input qkv, support `(BSH)` and `(BNSD)`.
Default `BSH`.
num_key_value_heads (int, optional): An int indicates head numbers of ``key``/``value`` which are used
in GQA algorithm. The value 0 indicates if the key and value have the same head nums, use `num_heads`.
It it is specified(not 0), it must be a factor of `num_heads` and it must be equal to kv_n.
Default: ``0``.
sparse_mode (int, optional): An int specifies sparse mode, can be int from {0, 1, 2, 3, 4}.
Default: ``0``.
- sparseMode = 0: If `attn_mask` is a null pointer, `pre_tokens` and `next_tokens` inputs are ignored
(internally set to INT_MAX).
- sparseMode = 2, 3, 4: `attn_mask` shape must be :math:`(S, S)` or :math:`(1, S, S)` or
:math:`(1, 1, S, S)`, with S fixed at 2048. User must ensure that `attn_mask` is lower triangular.
If not provided or incorrect shape, it will result in an error.
- sparseMode = 1, 2, 3: Ignores `pre_tokens`, `next_tokens` inputs and sets values according
to specific rules.
- sparseMode = 4: `pre_tokens` and `next_tokens` must be non-negative.
inner_precise (int, optional): An int number from {0, 1} indicates computing mode.
``0`` for high precision mode for float16 dtype. ``1`` for high performance mode.
Default: ``1``.
Outputs:
attention_out (Tensor) - Output tensor, has the same shape as` query` of
:math:`(B, q_S, q_H)` / :math:`(B, q_N, q_S, q_D)`.
Output dtype is determined by multiple factors, please refer to Note above for details.
Supported Platforms:
``Ascend``
Raises:
TypeError: Dtype of `query` is not int8, float16 or bfloat16.
TypeError: `query`, `key` and `value` don't have the same dtype.
TypeError: Dtype of `attn_mask` is not bool, int8 or uint8.
TypeError: Dtype of `pse_shift` is not bfloat16 or float16.
TypeError: `scale_value` is not a double number.
TypeError: `input_layout` is not a string.
TypeError: `num_key_value_heads` is not an int.
TypeError: `sparse_mode` is not an int.
TypeError: `sparse_inner_precisemode` is not an int.
TypeError: `quant_scale1` is not Tensor of type float32.
TypeError: `deq_scale1` is not Tensor of type uint64 or float32.
TypeError: `quant_scale2` is not Tensor of type float32.
TypeError: `deq_scale2` is not Tensor of type uint64 or float32.
TypeError: `quant_offset2` is not Tensor of type float32.
ValueError: `input_layout` is a string but of `(BSH)` or `(BNSD)`.
RuntimeError: `num_heads` is not divisible by `num_key_value_heads`.
RuntimeError: `num_heads` is not greater than 0.
RuntimeError: `num_key_value_heads` is not greater than or equal to 0.
RuntimeError: kv_n is not equal to `num_key_value_heads`.
RuntimeError: `attn_mask` shape is not valid.
RuntimeError: `sparse_mode` is specified but is not 0, 1, 2, 3 or 4.
RuntimeError: `query` dtype is bfloat16 and D axis is not aligned with 16.
RuntimeError: `input_layout` is BSH and kv_h is not divisible by `num_key_value_heads`.
RuntimeError: D-axis of `query`, `key` and `value` is not the same.
RuntimeError: In post quant per-channel scenario, D-axis is not 32 Byte aligned.
Examples:
>>> import mindspore
>>> import mindspore.ops as ops
>>> from mindspore import Tensor
>>> import numpy as np
>>> B = 1
>>> N = 16
>>> S = 256
>>> D = 16
>>> query = Tensor(np.ones((B, N, S, D), dtype=np.float16))
>>> key = Tensor(np.ones((B, N, S, D), dtype=np.float16))
>>> value = Tensor(np.ones((B, N, S, D), dtype=np.float16))
>>> out = ops.auto_generate.PromptFlashAttention()(query, key, value, None, None, None, None, None, None, None, None,
None, N, input_layout='BNSD')
>>> print(out.shape)
(1, 16, 256, 16)
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key'),
sig.make_sig('value'),
sig.make_sig('attn_mask', default=None),
sig.make_sig('actual_seq_lengths', default=None),
sig.make_sig('actual_seq_lengths_kv', default=None),
sig.make_sig('pse_shift', default=None),
sig.make_sig('deq_scale1', default=None),
sig.make_sig('quant_scale1', default=None),
sig.make_sig('deq_scale2', default=None),
sig.make_sig('quant_scale2', default=None),
sig.make_sig('quant_offset2', default=None),
)
@prim_arg_register
def __init__(self, num_heads=1, scale_value=1.0, pre_tokens=2147483647, next_tokens=0, input_layout='BSH', num_key_value_heads=0, sparse_mode=0, inner_precise=1):
self._set_prim_arg("num_heads", num_heads)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("pre_tokens", pre_tokens)
self._set_prim_arg("next_tokens", next_tokens)
self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
self._set_prim_arg("num_key_value_heads", num_key_value_heads)
self._set_prim_arg("sparse_mode", sparse_mode)
self._set_prim_arg("inner_precise", inner_precise)
def __call__(self, query, key, value, attn_mask=None, actual_seq_lengths=None, actual_seq_lengths_kv=None, pse_shift=None, deq_scale1=None, quant_scale1=None, deq_scale2=None, quant_scale2=None, quant_offset2=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_prompt_flash_attention(self, [query, key, value, attn_mask, actual_seq_lengths, actual_seq_lengths_kv, pse_shift, deq_scale1, quant_scale1, deq_scale2, quant_scale2, quant_offset2, self.num_heads, self.scale_value, self.pre_tokens, self.next_tokens, self.input_layout, self.num_key_value_heads, self.sparse_mode, self.inner_precise]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, query, key, value, attn_mask, actual_seq_lengths, actual_seq_lengths_kv, pse_shift, deq_scale1, quant_scale1, deq_scale2, quant_scale2, quant_offset2, self.num_heads, self.scale_value, self.pre_tokens, self.next_tokens, self.input_layout, self.num_key_value_heads, self.sparse_mode, self.inner_precise)
return res
class Take(Primitive):
r"""
.. code-block::
prim = ops.Take()
out = prim(input, index)
is equivalent to
.. code-block::
ops.take(input, index)
Refer to :func:`mindspore.ops.take` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, index):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_take(self, [input, index]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, index)
return res
take_op=Take()
class KLDiv(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('target'),
sig.make_sig('reduction', default='mean'),
sig.make_sig('log_target', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, target, reduction='mean', log_target=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_kl_div(self, [input, target, str_to_enum('KLDiv', 'reduction', reduction), log_target]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, target, str_to_enum('KLDiv', 'reduction', reduction), log_target)
return res
kl_div_op=KLDiv()
class RepeatInterleaveGrad(Primitive):
r"""
Gradients of RepeatInterleave operation.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, repeats, dim):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_repeat_interleave_grad(self, [input, repeats, dim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, repeats, dim)
return res
repeat_interleave_grad_op=RepeatInterleaveGrad()
class Addmm(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('mat1'),
sig.make_sig('mat2'),
sig.make_sig('beta', default=1),
sig.make_sig('alpha', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, mat1, mat2, beta=1, alpha=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_addmm(self, [input, mat1, mat2, beta, alpha]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, mat1, mat2, beta, alpha)
return res
addmm_op=Addmm()
class DistCommScatterTensor(Primitive):
r"""
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, other, input, rank_size, src, rank_id, group):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dist_comm_scatter_tensor(self, [other, input, rank_size, src, rank_id, group]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, other, input, rank_size, src, rank_id, group)
return res
dist_comm_scatter_tensor_op=DistCommScatterTensor()
class ArgMaxExt(Primitive):
r"""
.. code-block::
prim = ops.ArgMaxExt()
out = prim(input, dim, keepdim)
is equivalent to
.. code-block::
ops.argmax_ext(input, dim, keepdim)
Refer to :func:`mindspore.ops.argmax_ext` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('input'),
sig.make_sig('dim', default=None),
sig.make_sig('keepdim', default=False),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, input, dim=None, keepdim=False):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_argmax_ext(self, [input, dim, keepdim]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, input, dim, keepdim)
return res
argmax_ext_op=ArgMaxExt()
class GroupedMatmul(Primitive):
r"""
.. code-block::
prim = ops.GroupedMatmul(split_item, group_type, transpose_a, transpose_b)
out = prim(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list)
is equivalent to
.. code-block::
ops.grouped_matmul(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type, transpose_a, transpose_b)
Refer to :func:`mindspore.ops.grouped_matmul` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('scale', default=None),
sig.make_sig('offset', default=None),
sig.make_sig('antiquant_scale', default=None),
sig.make_sig('antiquant_offset', default=None),
sig.make_sig('group_list', default=None),
)
@prim_arg_register
def __init__(self, split_item=0, group_type=-1, transpose_a=False, transpose_b=False):
self._set_prim_arg("split_item", split_item)
self._set_prim_arg("group_type", group_type)
self._set_prim_arg("transpose_a", transpose_a)
self._set_prim_arg("transpose_b", transpose_b)
def __call__(self, x, weight, bias=None, scale=None, offset=None, antiquant_scale=None, antiquant_offset=None, group_list=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_grouped_matmul(self, [x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, self.split_item, self.group_type, self.transpose_a, self.transpose_b]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, self.split_item, self.group_type, self.transpose_a, self.transpose_b)
return res
class QuantBatchMatmul(Primitive):
r"""
.. code-block::
prim = ops.QuantBatchMatmul(transpose_x1, transpose_x2, dtype)
out = prim(x1, x2, scale, offset, bias, pertokenScaleOptional)
is equivalent to
.. code-block::
ops.quant_batch_matmul(x1, x2, scale, offset, bias, pertokenScaleOptional, transpose_x1, transpose_x2, dtype)
Refer to :func:`mindspore.ops.quant_batch_matmul` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x1', dtype=sig.sig_dtype.T),
sig.make_sig('x2', dtype=sig.sig_dtype.T),
sig.make_sig('scale', dtype=sig.sig_dtype.T1),
sig.make_sig('offset', dtype=sig.sig_dtype.T2, default=None),
sig.make_sig('bias', dtype=sig.sig_dtype.T3, default=None),
sig.make_sig('pertokenScaleOptional', dtype=sig.sig_dtype.T4, default=None),
)
@prim_arg_register
def __init__(self, transpose_x1=False, transpose_x2=False, dtype=mstype.float16):
self._set_prim_arg("transpose_x1", transpose_x1)
self._set_prim_arg("transpose_x2", transpose_x2)
self._set_prim_arg_with_handler("dtype", dtype, dtype_to_type_id)
def __call__(self, x1, x2, scale, offset=None, bias=None, pertokenScaleOptional=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_quant_batch_matmul(self, [x1, x2, scale, offset, bias, pertokenScaleOptional, self.transpose_x1, self.transpose_x2, self.dtype]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x1, x2, scale, offset, bias, pertokenScaleOptional, self.transpose_x1, self.transpose_x2, self.dtype)
return res
class GroupedMatmulV4(Primitive):
r"""
.. code-block::
prim = ops.GroupedMatmulV4()
out = prim(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, pre_token_scale, group_list, activation_input, activation_quant_scale, activation_quant_offset, split_item, group_type, group_list_type, act_type)
is equivalent to
.. code-block::
ops.grouped_matmul_v4(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, pre_token_scale, group_list, activation_input, activation_quant_scale, activation_quant_offset, split_item, group_type, group_list_type, act_type)
Refer to :func:`mindspore.ops.grouped_matmul_v4` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('scale', default=None),
sig.make_sig('offset', default=None),
sig.make_sig('antiquant_scale', default=None),
sig.make_sig('antiquant_offset', default=None),
sig.make_sig('pre_token_scale', default=None),
sig.make_sig('group_list', default=None),
sig.make_sig('activation_input', default=None),
sig.make_sig('activation_quant_scale', default=None),
sig.make_sig('activation_quant_offset', default=None),
sig.make_sig('split_item', default=0),
sig.make_sig('group_type', default=-1),
sig.make_sig('group_list_type', default=0),
sig.make_sig('act_type', default=0),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, weight, bias=None, scale=None, offset=None, antiquant_scale=None, antiquant_offset=None, pre_token_scale=None, group_list=None, activation_input=None, activation_quant_scale=None, activation_quant_offset=None, split_item=0, group_type=-1, group_list_type=0, act_type=0):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_grouped_matmul_v4(self, [x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, pre_token_scale, group_list, activation_input, activation_quant_scale, activation_quant_offset, split_item, group_type, group_list_type, act_type]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, pre_token_scale, group_list, activation_input, activation_quant_scale, activation_quant_offset, split_item, group_type, group_list_type, act_type)
return res
grouped_matmul_v4_op=GroupedMatmulV4()
class QuantV2(Primitive):
r"""
Returns the quantized value of input x.
If `sqrt_mode` is False:
.. math::
y = round(scale * x + offset)
If `sqrt_mode` is True:
.. math::
y = round(scale * x * scale + offset)
Inputs:
x (Tensor) : Input tensor.
Its data type must be mindspore.float16, mindspore.float32 or mindspore.bfloat32.
scale (Tensor) : Scaling ratio tensor in quantization. Its type is the same as x.
Its shape in the last axis must equal to the shape of x in the last axis,
and shapes of other dimensions must be 1.
offset (Tensor) : Offset tensor in quantization. Its type is the same as x.
Its shape in the last axis must equal to the shape of x in the last axis,
and shapes of other dimensions must be 1.
sqrt_mode (bool) : Specifies whether to perform square root on `scale`. Only support: ``False``.
rounding_mode (str) : Specifies the way to round. Only support: "ROUND".
dst_type (Type) : Specifies the output type. Only support ``int8``.
Returns:
Tensor, the quantized output tensor of type mindspore.int8. Its shape is the same as x.
Raises:
TypeError: If input, scale or offset is not a Tensor.
ValueError: The shape of scale or offset in the last axis is different from the shape of x in the last axis.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _infer_ops as infer_ops
>>> x = Tensor([100.0, 150.0], mstype.float32)
>>> scale = Tensor([80.0, 40.0], mstype.float32)
>>> offset = Tensor([0.0, 2.0], mstype.float32)
>>> quant = infer_ops.QuantV2()
>>> y = quant(x, scale, offset, False, "Round", mstype.int8)
[127 127]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('scale'),
sig.make_sig('offset'),
sig.make_sig('sqrt_mode', default=False),
sig.make_sig('rounding_mode', default='ROUND'),
sig.make_sig('dst_type', default=mstype.int8),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, scale, offset, sqrt_mode=False, rounding_mode='ROUND', dst_type=mstype.int8):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_quant_v2(self, [x, scale, offset, sqrt_mode, str_to_enum('QuantV2', 'rounding_mode', rounding_mode), dtype_to_type_id('QuantV2', 'dst_type', dst_type)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, scale, offset, sqrt_mode, str_to_enum('QuantV2', 'rounding_mode', rounding_mode), dtype_to_type_id('QuantV2', 'dst_type', dst_type))
return res
quant_v2_op=QuantV2()
class GroupedMatmulV2(Primitive):
r"""
.. code-block::
prim = ops.GroupedMatmulV2()
out = prim(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type)
is equivalent to
.. code-block::
ops.grouped_matmul_v2(x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type)
Refer to :func:`mindspore.ops.grouped_matmul_v2` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('weight'),
sig.make_sig('bias', default=None),
sig.make_sig('scale', default=None),
sig.make_sig('offset', default=None),
sig.make_sig('antiquant_scale', default=None),
sig.make_sig('antiquant_offset', default=None),
sig.make_sig('group_list', default=None),
sig.make_sig('split_item', default=0),
sig.make_sig('group_type', default=-1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, weight, bias=None, scale=None, offset=None, antiquant_scale=None, antiquant_offset=None, group_list=None, split_item=0, group_type=-1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_grouped_matmul_v2(self, [x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, weight, bias, scale, offset, antiquant_scale, antiquant_offset, group_list, split_item, group_type)
return res
grouped_matmul_v2_op=GroupedMatmulV2()
class WeightQuantBatchMatmul(Primitive):
r"""
.. code-block::
prim = ops.WeightQuantBatchMatmul(transpose_x, transpose_weight, antiquant_group_size)
out = prim(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias)
is equivalent to
.. code-block::
ops.weight_quant_batch_matmul(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size)
Refer to :func:`mindspore.ops.weight_quant_batch_matmul` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('weight'),
sig.make_sig('antiquant_scale'),
sig.make_sig('antiquant_offset', default=None),
sig.make_sig('quant_scale', default=None),
sig.make_sig('quant_offset', default=None),
sig.make_sig('bias', default=None),
)
@prim_arg_register
def __init__(self, transpose_x=False, transpose_weight=False, antiquant_group_size=0):
self._set_prim_arg("transpose_x", transpose_x)
self._set_prim_arg("transpose_weight", transpose_weight)
self._set_prim_arg("antiquant_group_size", antiquant_group_size)
def __call__(self, x, weight, antiquant_scale, antiquant_offset=None, quant_scale=None, quant_offset=None, bias=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_weight_quant_batch_matmul(self, [x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, self.transpose_x, self.transpose_weight, self.antiquant_group_size]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, self.transpose_x, self.transpose_weight, self.antiquant_group_size)
return res
class MatmulAllReduceAddRmsNorm(Primitive):
r"""
"""
__mindspore_signature__ = (
sig.make_sig('x1'),
sig.make_sig('x2'),
sig.make_sig('bias'),
sig.make_sig('residual'),
sig.make_sig('gamma'),
sig.make_sig('epsilon'),
sig.make_sig('group'),
sig.make_sig('reduce_op', default='sum'),
sig.make_sig('comm_turn', default=0),
sig.make_sig('stream_mode', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x1, x2, bias, residual, gamma, epsilon, group, reduce_op='sum', comm_turn=0, stream_mode=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_matmul_allreduce_add_rmsnorm(self, [x1, x2, bias, residual, gamma, epsilon, group, str_to_enum('MatmulAllReduceAddRmsNorm', 'reduce_op', reduce_op), comm_turn, stream_mode]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x1, x2, bias, residual, gamma, epsilon, group, str_to_enum('MatmulAllReduceAddRmsNorm', 'reduce_op', reduce_op), comm_turn, stream_mode)
return res
matmul_allreduce_add_rmsnorm_op=MatmulAllReduceAddRmsNorm()
class DynamicQuantExt(Primitive):
r"""
Performs dynamic quantization on the input tensor.
Note:
- Dynamic quantization is performed by adjusting the scale of the input tensor dynamically.
- The `smooth_scales` tensor provides a mechanism to smooth out the scaling factors to avoid sudden changes.
- The input tensor `x` must be at least 1-dimensional, with shape :math:`(batches, n)`.
- The `smooth_scales` tensor must have shape `(n)`.
- The output `scale` tensor has shape `(batches)`.
.. math::
\begin{array}{ll} \\
\text{scale} = \frac{\max(\left| x \right|, \text{axis}=-1)}{127} \\
\text{y} = \text{round}\left(\frac{x}{\text{scale}}\right) \\
\end{array}
Inputs:
x (Tensor): The first input is a tensor of data type float16 or bfloat16.
It contains the data to be quantized.
smooth_scales (Tensor): The second input is a tensor of data type float16 or bfloat16.
It contains the scaling factors used for dynamic quantization.
Outputs:
tuple[Tensor], tuple of 2 tensors, representing the quantized values and the scales used.
- **y** (Tensor) - The quantized tensor.
- **scale** (Tensor) - The scales used for quantization.
Raises:
ValueError: If the rank of `x` is not at least 1.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, ops
>>> input = Tensor(np.random.rand(2, 3), mindspore.float16)
>>> smooth_scales = Tensor(np.random.rand(3), mindspore.float16)
>>> output = ops.auto_generate.DynamicQuantExt()(input, smooth_scales)
>>> print(output.shape)
(2, 3)
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('smooth_scales', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, smooth_scales=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_dynamic_quant_ext(self, [x, smooth_scales]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, smooth_scales)
return res
dynamic_quant_ext_op=DynamicQuantExt()
class QuantLinearSparse(Primitive):
r"""
Matmul with a8w8 quant and weight compressed.
.. warning::
This is an experimental API that is subject to change or deletion.
Note:
- The input `weight` and `compressIdx` should be generated by the compress tool of model_slim.
- Only support Ascend 310p.
Inputs:
x (Tensor): The left matrix with data type of int8.
weight (Tensor): The compressed 1-D weight with data type of int8.
deq_scale (Tensor): The dequant scale with data type of int64.
compress_idx (Tensor): The index for decompress weight with data type of int8.
bias (Tensor): The bias with data type of int32.
Outputs:
A 2-D Tensor with data type of float16.
Supported Platforms:
``Ascend``
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, weight, deq_scale, compress_idx, bias):
return super().__call__(x, weight, deq_scale, compress_idx, bias)
quant_linear_sparse_op=QuantLinearSparse()
class MoeInitRouting(Primitive):
r"""
.. code-block::
prim = ops.MoeInitRouting()
out = prim(x, row_idx, expert_idx, active_num)
is equivalent to
.. code-block::
ops.moe_init_routing(x, row_idx, expert_idx, active_num)
Refer to :func:`mindspore.ops.moe_init_routing` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, row_idx, expert_idx, active_num):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_moe_init_routing(self, [x, row_idx, expert_idx, active_num]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, row_idx, expert_idx, active_num)
return res
moe_init_routing_op=MoeInitRouting()
class MoeGatingTopKSoftmax(Primitive):
r"""
In MoE calculation, Softmax calculation is done on the output of x, taking topk operation.
Args:
k (int): The k-value of topk, of size 0 <= k <= the size of x's -1 axis.
Inputs:
- **x** (Tensor) - The input to be computed. The tensor must be 2D or 3D. Supported dtypes: float16, bfloat16, float32.
- **finished** (Tensor, optional) - A 1D or 2D Tensor. Supported dtype: bool. The shape of the tensor must be :math:`x.shape[:-1]`. Default: ``None``.
Outputs:
- **y** (Tensor) - The topk value taken after doing softmax on `x`. The data type is the same as `x`. The -1 axis of the shape has the same size as the `k` value, and the rest of the axes have the same size as the corresponding axes of `x`.
- **expert_idx** (Tensor) - The index of the topk value, i.e., the expert's serial number, is taken after doing softmax on `x`. The shape is the same as `y`. The data type is int32.
- **row_idx** (Tensor) - Indicates the original row position corresponding to each position, shape is consistent with `y`. The data type is int32.
Raises:
TypeError: If the data type of input Tensor does not match the description in args.
ValueError: If the shape of input Tensor does not match the description in args.
Supported Platforms:
``Ascend``
Examples:
>>> import mindspore as ms
>>> import numpy as np
>>> from mindspore import Tensor
>>> from mindspore.ops.operations import _infer_ops
>>> x = Tensor(np.array([[0.1, 0.2, 0.1, 0.1],
... [0.2, 0.2, 0.4, 0.2],
... [0.3, 0.3, 0.1, 0.3],
... [0.1, 0.7, 0.1, 0.1]]), ms.float16)
>>> finished = Tensor(np.array([True, True, True, True]), ms.bool_)
>>> net = _infer_ops.MoeGatingTopKSoftmax()
>>> output = net(x, finished, k=4)
>>> print(output[0])
[[0.2693 0.2437 0.2437 0.2437]
[0.2893 0.2369 0.2369 0.2369]
[0.262 0.262 0.262 0.2144]
[0.378 0.2074 0.2074 0.2074]]
"""
__mindspore_signature__ = (
sig.make_sig('x'),
sig.make_sig('finished', default=None),
sig.make_sig('k', default=1),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, x, finished=None, k=1):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_moe_gating_top_k_softmax(self, [x, finished, k]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, x, finished, k)
return res
moe_gating_top_k_softmax_op=MoeGatingTopKSoftmax()
class MoeFinalizeRouting(Primitive):
r"""
.. code-block::
prim = ops.MoeFinalizeRouting()
out = prim(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
is equivalent to
.. code-block::
ops.moe_finalize_routing(expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
Refer to :func:`mindspore.ops.moe_finalize_routing` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('expanded_x'),
sig.make_sig('x1'),
sig.make_sig('x2', default=None),
sig.make_sig('bias', default=None),
sig.make_sig('scales', default=None),
sig.make_sig('expanded_row_idx', default=None),
sig.make_sig('expanded_expert_idx', default=None),
)
@prim_arg_register
def __init__(self):
pass
def __call__(self, expanded_x, x1, x2=None, bias=None, scales=None, expanded_row_idx=None, expanded_expert_idx=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_moe_finalize_routing(self, [expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, expanded_x, x1, x2, bias, scales, expanded_row_idx, expanded_expert_idx)
return res
moe_finalize_routing_op=MoeFinalizeRouting()
class KVCacheScatterUpdate(Primitive):
r"""
.. code-block::
prim = ops.KVCacheScatterUpdate()
out = prim(var, indices, updates, axis, reduce)
is equivalent to
.. code-block::
ops.kv_cache_scatter_update(var, indices, updates, axis, reduce)
Refer to :func:`mindspore.ops.kv_cache_scatter_update` for more details.
"""
__mindspore_signature__ = (
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('indices', dtype=sig.sig_dtype.T1),
sig.make_sig('updates', dtype=sig.sig_dtype.T),
sig.make_sig('axis', dtype=sig.sig_dtype.T2),
sig.make_sig('reduce', dtype=sig.sig_dtype.T3, default='none'),
)
@prim_arg_register
def __init__(self):
self.add_prim_attr("side_effect_mem", True)
def __call__(self, var, indices, updates, axis, reduce='none'):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_kv_cache_scatter_update(self, [var, indices, updates, axis, str_to_enum('KVCacheScatterUpdate', 'reduce', reduce)]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, var, indices, updates, axis, str_to_enum('KVCacheScatterUpdate', 'reduce', reduce))
return res
kv_cache_scatter_update_op=KVCacheScatterUpdate()
class MoeComputeExpertTokens(Primitive):
r"""
.. code-block::
prim = ops.MoeComputeExpertTokens()
out = prim(sorted_experts, num_expert)
is equivalent to
.. code-block::
ops.moe_compute_expert_tokens(sorted_experts, num_expert)
Refer to :func:`mindspore.ops.moe_compute_expert_tokens` for more details.
"""
@prim_arg_register
def __init__(self):
pass
def __call__(self, sorted_experts, num_expert):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_moe_compute_expert_tokens(self, [sorted_experts, num_expert]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, sorted_experts, num_expert)
return res
moe_compute_expert_tokens_op=MoeComputeExpertTokens()
class FusedInferAttentionScore(Primitive):
r"""
This is a FlashAttention function designed for both incremental and full inference scenarios. It supports full
inference scenarios (PromptFlashAttention) as well as incremental inference scenarios (IncreFlashAttention).
When the S dimension of the query tensor (Q_S) equals 1, it enters the IncreFlashAttention branch; otherwise,
it enters the PromptFlashAttention branch.
.. math::
Attention(Q,K,V) = Softmax(\frac{QK^{T}}{\sqrt{d}})V
.. warning::
- This is an experimental API that is subject to change or deletion.
- For Ascend, only the Atlas A2 training series products and Atlas 800I A2 inference products are currently
supported.
Note:
- The data layout formats of query, key and value can be interpreted from multiple dimensions, as shown below:
- B, Batch size. Represents the batch size of the input samples.
- S, Sequence length. Represents the sequence length of the input samples. S1 represents the sequence length
of the query, and S2 represents the sequence length of the key/value.
- H, Head size. Represents the size of the hidden layer.
- N, Head nums. Represents the number of attention heads.
- D, Head dims. Represents the smallest unit size of the hidden layer, satisfying :math:`D = H / N`.
Args:
num_heads (int, optional): The number of heads in the query, equal to N when input_layout is BNSD. Default: ``1``.
scale_value (double, optional): The scale value indicating the scale coefficient, which serves as the scalar value for
the Muls in the calculation. Generally, the value is :math:`1.0 / \sqrt{d}`. Default: ``1.0``.
pre_tokens (int, optional): Parameter for sparse computation, represents how many tokens are counted forward.
Default: ``2147483647``. Invalid when Q_S is 1.
next_tokens (int, optional): Parameter for sparse computation, represents how many tokens are counted backward.
Default: ``2147483647``. Invalid when Q_S is 1.
input_layout (str, optional): Specifies the layout of input query, key and value. BSH, BNSD, BSND or BNSD_BSND
is supported. When the layout is BNSD_BSND, it means the input is in the BNSD format and the output is
in the BSND format, this is only supported when Q_S > 1.
Default: ``BSH``.
num_key_value_heads (int, optional): Head numbers of key/value which are used in GQA (Grouped-Query Attention) scenario.
Default: ``0``. A value of 0 means it is equal to the number of key/value heads. The num_heads must be
divisible by num_key_value_heads, and the ratio of num_heads to num_key_value_heads must not be greater
than 64. When the layout is BNSD, the num_key_value_heads must also equals to the N dimension of the
key/value shapes, otherwise, an execution error will occur.
sparse_mode (int, optional): Indicates sparse mode. Default ``0``. Invalid when Q_S is 1.
- 0: Indicates the defaultMask mode. If attn_mask is not passed, the mask operation is not performed,
and pre_tokens and next_tokens(internally assigned as INT_MAX) are ignored. If passed in, the complete
attn_mask matrix (S1 * S2) also must be passed in, indicating that the part between pre_tokens and
next_tokens needs to be calculated.
- 1: Represents allMask. The complete attn_mask matrix (S1 * S2) is required.
- 2: Represents the mask in leftUpCausal mode. The optimized attn_mask matrix (2048*2048) is required.
- 3: Represents the mask in rightDownCausal mode, corresponding to the lower triangular scenario divided by
the right vertex. The optimized attn_mask matrix (2048*2048) is required.
- 4: Represents the mask in band mode, that is, the part between counting pre_tokens and next_tokens. The
optimized attn_mask matrix (2048*2048) is required.
- 5: Represents the prefix scenario, not implemented yet.
- 6: Represents the global scenario, not implemented yet.
- 7: Represents the dilated scenario, not implemented yet.
- 8: Represents the block_local scenario, not implemented yet.
inner_precise (int, optional): There are four modes: 0, 1, 2, and 3, represented by 2 bits: bit 0 (bit0) represents the
choice for high precision or high performance, and bit 1 (bit1) indicates whether row-wise invalidity
correction is applied.
- 0: Enable high-precise mode, without row-wise invalidity correction.
- 1: High-performance mode, without row-wise invalidity correction.
- 2: Enable high-precise mode, with row-wise invalidity correction.
- 3: High-performance mode, with row-wise invalidity correction.
When Q_S > 1, if sparse_mode is 0 or 1 and a user-defined mask is provided, it is recommended to enable
row-wise invalidity correction. Only support 0 and 1 when Q_S is 1. Default: ``1``.
High-precise and high-performance are only effective for float16 inputs; Row invalidity correction
is effective for float16, bfloat16, and int8 inputs.
Currently, 0 and 1 are reserved configuration values. If there is a situation where an entire row in the
"mask portion involved in computation" is all 1s, precision may degrade. In such cases, you can try
setting this parameter to 2 or 3 to enable row invalidity correction for improved precision. However,
this configuration will result in decreased performance.
If the function can detect the presence of invalid row scenarios, e.g. in cases where sparse_mode is 3
and S_q > S_kv, it will automatically enable row invalidity computation.
block_size (int, optional): Maximum number of tokens per block in the KV cache block for PageAttention.
Default: ``0``. Invalid when Q_S > 1.
antiquant_mode (int, optional): Fake-quantization mode, 0: per-channel (per-channel includes per-tensor), 1: per-token.
The per-channel and per-tensor modes can be distinguished by the dimension of the input shape. When the
dimension is 1, it runs in per-tensor mode; otherwise, it runs in per-channel mode.
Default: ``0``. Invalid when Q_S > 1.
key_antiquant_mode (int, optional): Fake-quantization mode for the key. 0: per-channel (per-channel includes per-tensor),
1: per-token. Default: ``0``. Invalid when Q_S > 1.
value_antiquant_mode (int, optional): Fake-quantization mode for the value. 0: per-channel (per-channel includes
per-tensor), 1: per-token. Default: ``0``. Invalid when Q_S > 1.
softmax_lse_flag (bool, optional): Whether to output softmax_lse. Default: ``False``.
Inputs:
- **query** (Tensor) - The query input of the attention structure, with data type of float16, bfloat16 or int8.
Input tensor of shape :math:`(B, S, H)`, :math:`(B, N, S, D)`, or :math:`(B, S, N, D)`.
- **key** (Union[tuple[Tensor], list[Tensor]]) - The key input of the attention structure, with data type
of float16, bfloat16 or int8. Input tensor of shape :math:`(B, S, H)`, :math:`(B, N, S, D)`, or
:math:`(B, S, N, D)`.
- **value** (Union[tuple[Tensor], list[Tensor]]) - The value input of the attention structure, with data
type of float16, bfloat16 or int8. Input tensor of shape :math:`(B, S, H)`, :math:`(B, N, S, D)`, or
:math:`(B, S, N, D)`.
- **pse_shift** (Tensor, optional) - The padding mask tensor with data type of float16 or bfloat16.
Default: ``None``.
- When Q_S is not 1, if pse_shift is of type float16, the query must be of type float16 or int8.
If pse_shift is of type bfloat16, the query must also be of type bfloat16. The input shape
must be either :math:`(B, N, Q\_S, KV\_S)` or :math:`(1, N, Q\_S, KV\_S)`, where Q_S corresponds to the
S dimension of the query shape, and KV_S corresponds to the S dimension of the key and value shapes.
For scenarios where the KV_S of pse_shift is not 32-aligned, it is recommended to pad it
to 32 bytes to improve performance. The padding values for the extra portions are not restricted.
- When Q_S is 1, if pse_shift is of type float16, the query must also be of type float16.
If pse_shift is of type bfloat16, the query must be of type bfloat16. The input shape must be
:math:`(B, N, 1, KV\_S)` or :math:`(1, N, 1, KV\_S)`, where KV_S corresponds to the S dimension of the
key/value shapes. For scenarios where the KV\_S of pse_shift is not 32-aligned, it is recommended
to pad it to 32 bytes to improve performance. The padding values for the extra portions are not
restricted.
- **attn_mask** (Tensor, optional) - The attention mask tensor for the result of query*key with data type of int8, uint8 or bool.
For each element, 0 indicates retention and 1 indicates discard.
Default: ``None``.
- When Q_S is not 1, the recommended input shapes are Q_S,KV_S; B,Q_S,KV_S; 1,Q_S,KV_S; B,1,Q_S,KV_S
or 1,1,Q_S,KV_S.
- When Q_S is 1, the recommended input shapes are B,KV_S; B,1,KV_S or B,1,1,KV_S.
- **actual_seq_lengths** (Union[tuple[int], list[int], Tensor], optional) - Describe actual sequence length of the query with
data type of int64. If this parameter is not specified, it can be set to None, indicating that it matches
the S dimension of the query shape. Constraint: The effective sequence length for each batch in this
parameter should not exceed the corresponding batch's sequence length in the query. When Q_S is 1, this
parameter is ignored.
Default: ``None``.
- **actual_seq_lengths_kv** (Union[tuple[int], list[int], Tensor], optional) - Describe actual sequence length of the key and
value with data type of int64. If this parameter is not specified, it can be set to None, indicating that
it matches the S dimension of the key and value shape. Constraint: The effective sequence length for each
batch in this parameter should not exceed the corresponding batch's sequence length in the key and value.
Default: ``None``.
- **dequant_scale1** (Tensor, optional) - Quantization factors for inverse quantization after BMM1 with data type of uint64.
Supports per-tensor mode. If not used, set it to None.
Default: ``None``.
- **quant_scale1** (Tensor, optional) - Quantization factors for quantization before BMM2 with data type of float32.
Supports per-tensor mode. If not used, set it to None.
Default: ``None``.
- **dequant_scale2** (Tensor, optional) - Quantization factors for inverse quantization after BMM2 with data type of uint64.
Supports per-tensor mode. If not used, set it to None.
Default: ``None``.
- **quant_scale2** (Tensor, optional) - Quantization factors for output quantization with data type of float32, bfloat16.
Supports per-tensor and per-channel modes. If not used, set it to None.
Default: ``None``.
- **quant_offset2** (Tensor, optional) - Quantization offset for output quantization with data type of float32, bfloat16.
Supports per-tensor and per-channel modes. If not used, set it to None.
Default: ``None``.
For scenarios where the input is int8 and the output is int8: the parameters dequant_scale1, quant_scale1,
dequant_scale2, and quant_scale2 must all be provided. The parameter quant_offset2 is optional and defaults
to 0 if not specified.
- When the output is int8 and quant_scale2 and quant_offset2 are per-channel, left padding, Ring Attention,
or D-axis misalignment (not 32-aligned) scenarios are not supported.
- When the output is int8, scenarios with sparse_mode as band and pre_tokens/next_tokens being negative are
not supported.
- When the output is int8, if quant_offset2 is not None and empty tensor, and the sparse_mode, pre_tokens,
and next_tokens meet the following conditions, certain rows of the matrix may not participate in
calculations, leading to errors. This scenario will be intercepted (solution: if this scenario should
not be intercepted, quantization should be performed outside the FIA interface, not enabled inside the
FIA interface):
- sparse_mode = 0, if attn_mask is a not None and each batch's
actual_seq_lengths - actual_seq_lengths_kv - pre_tokens > 0 or next_tokens < 0, it will meet the
interception condition.
- sparse_mode = 1 or 2, no interception condition will occur.
- sparse_mode = 3, if each batch's actual_seq_lengths - actual_seq_lengths_kv < 0, it will meet the
interception condition.
- sparse_mode = 4, if pre_tokens < 0 or each batch's
next_tokens + actual_seq_lengths - actual_seq_lengths_kv < 0, it will meet the interception
condition.
- sparse_mode = 0, if attn_mask is a not None and each batch's
actual_seq_lengths - actual_seq_lengths_kv - pre_tokens > 0 or next_tokens < 0, it will meet the
interception condition.
- sparse_mode = 1 or 2, no interception condition will occur.
- sparse_mode = 3, if each batch's actual_seq_lengths - actual_seq_lengths_kv < 0, it will meet the
interception condition.
- sparse_mode = 4, if pre_tokens < 0 or each batch's
next_tokens + actual_seq_lengths - actual_seq_lengths_kv < 0, it will meet the interception
condition.
For scenarios where the input is int8 and the output is float16: the parameters dequant_scale1,
quant_scale1, and dequant_scale2 must all be provided.
For scenarios where the input is entirely float16 or bfloat16 and the output is int8: the parameter
quant_scale2 must be provided. The parameter quant_offset2 is optional and defaults to 0 if not specified.
The parameters quant_scale2 and quant_offset2 support both per-tensor and per-channel modes and two data
types: float32 and bfloat16. If quant_offset2 is provided, its type and shape must match those of
quant_scale2. When the input is bfloat16, both float32 and bfloat16 are supported; otherwise, only float32
is supported. For per-channel mode: When the output layout is BSH, the product of all dimensions in
quant_scale2 must equal H. For other layouts, the product must equal N * D. When the output layout is BSH,
it is recommended to set the shape of quant_scale2 as :math:`(1, 1, H)` or :math:`(H)`. When the output
layout is BNSD, it is recommended to set the shape as :math:`(1, N, 1, D)` or :math:`(N, D)`. When the
output layout is BSND, it is recommended to set the shape as :math:`(1, 1, N, D)` or :math:`(N, D)`.
- **antiquant_scale** (Tensor, optional) - Inverse quantization factors with data type of float16, float32 or bfloat16.
Only support float16 when Q_S > 1. Supports per-tensor, per-channel and per-token modes.
Default: ``None``.
- **antiquant_offset** (Tensor, optional) - Inverse quantization offset with data type of float16, float32 or bfloat16.
Only support float16 when Q_S > 1. Supports per-tensor, per-channel and per-token modes.
Default: ``None``.
Constraints for antiquant_scale and antiquant_offset parameters:
- Supports three modes: per-channel, per-tensor, and per-token:
- Per-channel mode: The shape of both parameters in the BNSD layout is :math:`(2, N, 1, D)`, the shape
in the BSND layout is :math:`(2, N, D)`, and the shape in the BSH layout is :math:`(2, H)`, where 2
corresponds to the key and value, and N represents num_key_value_heads. The parameter data type is
the same as the query data type, and antiquant_mode should be set to 0.
- Per-tensor mode: The shape of both parameters is :math:`(2)`, the data type is the same as the query
data type, and antiquant_mode should be set to 0.
- Per-token mode: The shape of both parameters is :math:`(2, B, S)`, the data type is fixed to float32,
and antiquant_mode should be set to 1.
- Supports both symmetric and asymmetric quantization:
- Asymmetric quantization mode: Both antiquant_scale and antiquant_offset must be provided.
- Symmetric quantization mode: antiquant_offset can be empty (``None``). If antiquant_offset is empty,
symmetric quantization is performed. If antiquant_offset is provided, asymmetric quantization is
performed.
- **key_antiquant_scale** (Tensor, optional) - Inverse quantization factors for the key, with data type of float16, float32 or
bfloat16, when the KV fake quantization parameters are separated.
Supports per-tensor, per-channel and per-token modes.
Default: ``None``. Invalid when Q_S > 1.
- **key_antiquant_offset** (Tensor, optional) - Inverse quantization offset for the key, with data type of float16, float32 or
bfloat16, when the KV fake quantization parameters are separated.
Supports per-tensor, per-channel and per-token modes.
Default: ``None``. Invalid when Q_S > 1.
- **value_antiquant_scale** (Tensor, optional) - Inverse quantization factors for the value, with data type of float16, float32
or bfloat16, when the KV fake quantization parameters are separated.
Supports per-tensor, per-channel and per-token modes.
Default: ``None``. Invalid when Q_S > 1.
- **value_antiquant_offset** (Tensor, optional) - Inverse quantization offset for the value, with data type of float16, float32
or bfloat16, when the KV fake quantization parameters are separated.
Supports per-tensor, per-channel and per-token modes.
Default: ``None``. Invalid when Q_S > 1.
- **block_table** (Tensor, optional) - Block mapping table in KV cache for PageAttention, with data type of int32.
If not used, set it to None.
Default: ``None``. Invalid when Q_S > 1.
- **query_padding_size** (Tensor, optional) - The query padding size with data type of int64. Indicates whether the data in each
batch of the query is right-aligned, and how many elements are right-aligned.
Default: ``None``. Invalid when Q_S is 1.
- **kv_padding_size** (Tensor, optional) - The key and value padding size with data type of int64. Indicates whether the data
in each batch of the key and value is right-aligned, and how many elements are right-aligned.
Default: ``None``. Invalid when Q_S is 1.
- **key_shared_prefix** (Tensor, optional) - Shared prefix of the key. This is a reserved parameter and is not yet enabled.
Default: ``None``.
- **value_shared_prefix** (Tensor, optional) - Shared prefix of the value. This is a reserved parameter and is not yet enabled.
Default: ``None``.
- **actual_shared_prefix_len** (Union[tuple[int], list[int], Tensor], optional) - Describe the actual length of shared prefix.
This is a reserved parameter and is not yet enabled.
Default: ``None``.
Outputs:
- **attention_out** (Tensor) - The attention score with data type of float16, bfloat16 or int8. When the input_layout
is BNSD_BSND, the shape is :math:`(B, S, N, D)`. In all other cases, the shape is consistent with the
input query shape.
- **softmax_lse** (Tensor) - The softmax_lse with data type of float32, obtained by taking the lse (log, sum and exp)
of the result of query*key. Specifically, the Ring Attention algorithm first takes the max of the result of
query*key, obtaining softmax_max. The result of query*key is then subtracted by softmax_max, followed by
taking exp, and then the sum is computed to obtain softmax_sum. Finally, the log of softmax_sum is taken,
and softmax_max is added to obtain softmax_lse. The softmax_lse is only calculated when softmax_lse_flag
is True, and the shape would be :math:`(B, N, Q\_S, 1)`. If softmax_lse_flag is False, then a tensor with
shape :math:`(1)` filled with zeros would be returned. In graph mode with JitConfig set to O2, please ensure
that the softmax_lse_flag is enabled before using softmax_lse; otherwise, an exception will occur.
Constraints:
- Full Inference Scenario (Q_S > 1):
- Query, key, and value inputs functional usage restrictions:
- The B axis supports values less than or equal to 65535. If the input type includes int8, or
if the input type is float16 or bfloat16 and the D axis is not 16-aligned, the B axis is only
supported up to 128.
- The N axis supports values less than or equal to 256, and the D axis supports values less than
or equal to 512.
- The S axis supports values less than or equal to 20,971,520 (20M). In some long sequence scenarios,
if the computation load is too large, it may cause a timeout in the PFA operator (AICore error
type with errorStr: "timeout or trap error"). In this case, it is recommended to perform an S
split. Note: The computational load is affected by B, S, N, D, etc.; the larger the values, the
greater the computational load. Typical long sequence timeout scenarios (where the product of
B, S, N, and D is large) include, but are not limited to:
1. B=1, Q_N=20, Q_S=2097152, D=256, KV_N=1, KV_S=2097152;
2. B=1, Q_N=2, Q_S=20971520, D=256, KV_N=2, KV_S=20971520;
3. B=20, Q_N=1, Q_S=2097152, D=256, KV_N=1, KV_S=2097152;
4. B=1, Q_N=10, Q_S=2097152, D=512, KV_N=1, KV_S=2097152.
- When the query, key, value, or attention_out type includes int8, the D axis must be 32-aligned.
If all types are float16 or bfloat16, the D axis must be 16-aligned.
- The sparse_mode parameter currently only supports values 0, 1, 2, 3, and 4. Using any other values will
result in an error.
- When sparse_mode = 0, if the attn_mask is None, or if the attn_mask is provided in the left padding
scenario, the input parameters pre_tokens and next_tokens are ignored.
- When sparse_mode = 2, 3, or 4, the shape of the attn_mask must be S,S or 1,S,S or 1,1,S,S, where S
must be fixed at 2048, and the user must ensure the attn_mask is a lower triangular matrix. If no
attn_mask is provided or if the shape is incorrect, an error will occur.
- In sparse_mode = 1, 2, 3 scenarios, the pre_tokens and next_tokens inputs are ignored and assigned
according to the relevant rules.
- The KV cache de-quantization only supports queries of type float16, where int8 keys and values are
de-quantized to float16. The data range of the input key/value and the antiquant_scale must have a
product within the range of (-1, 1). High-performance mode can guarantee precision; otherwise,
high-precision mode should be enabled to ensure accuracy.
- Query left padding scenario:
- In the query left padding scenario, the formula for calculating the starting point of the query
transport is: Q_S - query_padding_size - actual_seq_lengths. The formula for the
ending point of the query transport is: Q_S - query_padding_size. The query transport
starting point must not be less than 0, and the ending point must not exceed Q_S; otherwise,
the results will be incorrect.
- If the kv_padding_size in the query left padding scenario is less than 0, it will be set to 0.
- The query left padding scenario must be enabled together with the actual_seq_lengths parameter,
otherwise, the default is the query right padding scenario.
- The query left padding scenario does not support PageAttention and cannot be enabled together with
the block_table parameter.
- KV left padding scenario:
- In the KV left padding scenario, the formula for calculating the starting point of the key and
value transport is: KV_S - kv_padding_size - actual_seq_lengths_kv. The formula
for the ending point of the key and value transport is: KV_S - kv_padding_size. The
key and value transport starting point must not be less than 0, and the ending point must not
exceed KV_S; otherwise, the results will be incorrect.
- If the kv_padding_size in the KV left padding scenario is less than 0, it will be set to 0.
- The KV left padding scenario must be enabled together with the actual_seq_lengths_kv parameter,
otherwise, the default is the KV right padding scenario.
- The KV left padding scenario does not support PageAttention and cannot be enabled together with
the block_table parameter.
- pse_shift functional usage restrictions:
- This function is supported when the query data type is float16, bfloat16, or int8.
- If the query data type is float16 and pse_shift is enabled, it will force high-precision mode,
inheriting the limitations of high-precision mode.
- Q_S must be greater than or equal to the length of the query S, and KV_S must be greater than
or equal to the length of the key S.
- KV fake quantization parameter separation is not currently supported.
- Incremental Inference Scenario (Q_S is 1):
- Query, key, and value inputs functional usage restrictions:
- The B axis supports values less than or equal to 65,536.
- The N axis supports values less than or equal to 256.
- The D axis supports values less than or equal to 512.
- Scenarios where the input types of query, key, and value are all int8 are not supported.
- Page attention scenario:
- The necessary condition to enable page attention is that the block_table exists and is valid.
The key and value are arranged in contiguous memory according to the indices in the block_table.
The key and value dtypes supported are float16, bfloat16, and int8. In this scenario, the
input_layout parameter for key and value is invalid.
- block_size is a user-defined parameter, and its value will affect the performance of page attention.
When enabling page attention, a non-zero value for block_size must be provided, and the maximum
value for block_size is 512.
- If the input types of key and value are float16 or bfloat16, they must be 16-aligned. If the
input types are int8, they must be 32-aligned, with 128 being recommended. In general, page
attention can increase throughput but may lead to a performance decrease.
- In the page attention enabled scenario, when the KV cache layout is (blocknum, block_size, H) and
num_key_value_heads * D exceeds 64K, an error will be reported due to hardware
instruction constraints. This can be resolved by enabling GQA (reducing num_key_value_heads) or
adjusting the KV cache layout to (blocknum, num_key_value_heads, block_size, D).
- The product of all dimensions of the shape of the key and value tensors in the page attention
scenario must not exceed the representable range of int32.
- In the page attention enabled scenario, the input S must be greater than or equal to
max_block_num_per_seq * block_size.
- Enabling attention mask (e.g., mask shape = (B, 1, 1, S))
- Enabling pse_shift (e.g., pse_shift shape = (B, N, 1, S))
- Enabling fake quantization in per-token mode (e.g., antiquant_scale and antiquant_offset shapes =
(2, B, S)) are also supported.
- KV left padding scenario:
- In the KV left padding scenario, the formula for calculating the starting point of the KV cache
transport is: KV_S - kv_padding_size - actual_seq_lengths. The formula for the endpoint of the
KV cache transport is: KV_S - kv_padding_size. If the starting point or endpoint of the KV cache
is less than 0, the returned data result will be all zeros.
- If kv_padding_size is less than 0 in the KV left padding scenario, it will be set to 0.
- The KV left padding scenario must be enabled together with the actual_seq_lengths parameter,
otherwise, it defaults to the KV right padding scenario.
- The KV left padding scenario must be enabled together with the attn_mask parameter, and the attn_mask
must be correctly applied to hide invalid data. Otherwise, accuracy issues may arise.
- pse_shift functional usage restrictions:
- The data type of pse_shift must match the data type of the query.
- Only the D axis alignment is supported, meaning the D axis must be divisible by 16.
- KV fake quantization parameter separation:
- key_antiquant_mode and value_antiquant_mode must be consistent.
- key_antiquant_scale and value_antiquant_scale must either both be empty or both non-empty.
- key_antiquant_offset and value_antiquant_offset must either both be empty or both non-empty.
- When both key_antiquant_scale and value_antiquant_scale are non-empty, their shapes must be
consistent.
- When both key_antiquant_offset and value_antiquant_offset are non-empty, their shapes must be
consistent.
Supported Platforms:
``Ascend``
Examples:
>>> from mindspore.ops.operations import _infer_ops as infer_ops
>>> from mindspore import Tensor
>>> import numpy as np
>>> B, N, S, D = 1, 8, 1024, 128
>>> query = Tensor(np.random.rand(B, N, S, D).astype(np.float16))
>>> key = Tensor(np.random.rand(B, N, S, D).astype(np.float16))
>>> value = Tensor(np.random.rand(B, N, S, D).astype(np.float16))
>>> fias = infer_ops.FusedInferAttentionScore(num_heads=N, input_layout='BNSD')
>>> out = fias(query, [key], [value])
>>> print(out[0].shape)
(1, 8, 1024, 128)
"""
__mindspore_signature__ = (
sig.make_sig('query'),
sig.make_sig('key'),
sig.make_sig('value'),
sig.make_sig('pse_shift', default=None),
sig.make_sig('attn_mask', default=None),
sig.make_sig('actual_seq_lengths', default=None),
sig.make_sig('actual_seq_lengths_kv', default=None),
sig.make_sig('dequant_scale1', default=None),
sig.make_sig('quant_scale1', default=None),
sig.make_sig('dequant_scale2', default=None),
sig.make_sig('quant_scale2', default=None),
sig.make_sig('quant_offset2', default=None),
sig.make_sig('antiquant_scale', default=None),
sig.make_sig('antiquant_offset', default=None),
sig.make_sig('block_table', default=None),
sig.make_sig('query_padding_size', default=None),
sig.make_sig('kv_padding_size', default=None),
sig.make_sig('key_antiquant_scale', default=None),
sig.make_sig('key_antiquant_offset', default=None),
sig.make_sig('value_antiquant_scale', default=None),
sig.make_sig('value_antiquant_offset', default=None),
sig.make_sig('key_shared_prefix', default=None),
sig.make_sig('value_shared_prefix', default=None),
sig.make_sig('actual_shared_prefix_len', default=None),
)
@prim_arg_register
def __init__(self, num_heads=1, scale_value=1.0, pre_tokens=2147483647, next_tokens=2147483647, input_layout='BSH', num_key_value_heads=0, sparse_mode=0, inner_precise=1, block_size=0, antiquant_mode=0, softmax_lse_flag=False, key_antiquant_mode=0, value_antiquant_mode=0):
self._set_prim_arg("num_heads", num_heads)
self._set_prim_arg("scale_value", scale_value)
self._set_prim_arg("pre_tokens", pre_tokens)
self._set_prim_arg("next_tokens", next_tokens)
self._set_prim_arg_with_handler("input_layout", input_layout, str_to_enum)
self._set_prim_arg("num_key_value_heads", num_key_value_heads)
self._set_prim_arg("sparse_mode", sparse_mode)
self._set_prim_arg("inner_precise", inner_precise)
self._set_prim_arg("block_size", block_size)
self._set_prim_arg("antiquant_mode", antiquant_mode)
self._set_prim_arg("softmax_lse_flag", softmax_lse_flag)
self._set_prim_arg("key_antiquant_mode", key_antiquant_mode)
self._set_prim_arg("value_antiquant_mode", value_antiquant_mode)
def __call__(self, query, key, value, pse_shift=None, attn_mask=None, actual_seq_lengths=None, actual_seq_lengths_kv=None, dequant_scale1=None, quant_scale1=None, dequant_scale2=None, quant_scale2=None, quant_offset2=None, antiquant_scale=None, antiquant_offset=None, block_table=None, query_padding_size=None, kv_padding_size=None, key_antiquant_scale=None, key_antiquant_offset=None, value_antiquant_scale=None, value_antiquant_offset=None, key_shared_prefix=None, value_shared_prefix=None, actual_shared_prefix_len=None):
# Add for jit context.
if jit_context() and jit_context().compiled:
return None
res = _convert_stub(pyboost_fused_infer_attention_score(self, [query, key, value, pse_shift, attn_mask, actual_seq_lengths, actual_seq_lengths_kv, dequant_scale1, quant_scale1, dequant_scale2, quant_scale2, quant_offset2, antiquant_scale, antiquant_offset, block_table, query_padding_size, kv_padding_size, key_antiquant_scale, key_antiquant_offset, value_antiquant_scale, value_antiquant_offset, key_shared_prefix, value_shared_prefix, actual_shared_prefix_len, self.num_heads, self.scale_value, self.pre_tokens, self.next_tokens, self.input_layout, self.num_key_value_heads, self.sparse_mode, self.inner_precise, self.block_size, self.antiquant_mode, self.softmax_lse_flag, self.key_antiquant_mode, self.value_antiquant_mode]))
# Add for jit context.
if jit_context():
if is_stub_tensor(res):
res = res.stub_sync()
return jit_context().run_op(self, res, query, key, value, pse_shift, attn_mask, actual_seq_lengths, actual_seq_lengths_kv, dequant_scale1, quant_scale1, dequant_scale2, quant_scale2, quant_offset2, antiquant_scale, antiquant_offset, block_table, query_padding_size, kv_padding_size, key_antiquant_scale, key_antiquant_offset, value_antiquant_scale, value_antiquant_offset, key_shared_prefix, value_shared_prefix, actual_shared_prefix_len, self.num_heads, self.scale_value, self.pre_tokens, self.next_tokens, self.input_layout, self.num_key_value_heads, self.sparse_mode, self.inner_precise, self.block_size, self.antiquant_mode, self.softmax_lse_flag, self.key_antiquant_mode, self.value_antiquant_mode)
return res