MindSpore Lite API

Summary of MindSpore Lite API support

Class

Description

C++ API

Python API

Context

Set the number of threads at runtime

void SetThreadNum(int32_t thread_num)

Context.cpu.thread_num

Context

Get the current thread number setting

int32_t GetThreadNum() const

Context.cpu.thread_num

Context

Set the parallel number of operators at runtime

void SetInterOpParallelNum(int32_t parallel_num)

Context.cpu.inter_op_parallel_num

Context

Get the current operators parallel number setting

int32_t GetInterOpParallelNum() const

Context.cpu.inter_op_parallel_num

Context

Set the thread affinity to CPU cores

void SetThreadAffinity(int mode)

Context.cpu.thread_affinity_mode

Context

Get the thread affinity of CPU cores

int GetThreadAffinityMode() const

Context.cpu.thread_affinity_mode

Context

Set the thread lists to CPU cores

void SetThreadAffinity(const std::vector<int> &core_list)

Context.cpu.thread_affinity_core_list

Context

Get the thread lists of CPU cores

std::vector<int32_t> GetThreadAffinityCoreList() const

Context.cpu.thread_affinity_core_list

Context

Set the status whether to perform model inference or training in parallel

void SetEnableParallel(bool is_parallel)

Context

Get the status whether to perform model inference or training in parallel

bool GetEnableParallel() const

Context

Set built-in delegate mode to access third-party AI framework

void SetBuiltInDelegate(DelegateMode mode)

Context

Get the built-in delegate mode of the third-party AI framework

DelegateMode GetBuiltInDelegate() const

Context

Set Delegate to access third-party AI framework

set_delegate(const std::shared_ptr<AbstractDelegate> &delegate)

Context

Get the delegate of the third-party AI framework

std::shared_ptr<AbstractDelegate> get_delegate() const

Context

Set quant model to run as float model in multi device

void SetMultiModalHW(bool float_mode)

Context

Get the mode of the model run

bool GetMultiModalHW() const

Context

Get a mutable reference of DeviceInfoContext vector in this context

std::vector<std::shared_ptr<DeviceInfoContext>> &MutableDeviceInfo()

Wrapped in Context.target

DeviceInfoContext

Get the type of this DeviceInfoContext

enum DeviceType GetDeviceType() const

DeviceInfoContext

converts DeviceInfoContext to a shared pointer of type T

std::shared_ptr<T> Cast()

DeviceInfoContext

set provider’s name

void SetProvider(const std::string &provider)

DeviceInfoContext

obtain provider’s name

std::string GetProvider() const

DeviceInfoContext

set provider’s device type

void SetProviderDevice(const std::string &device)

DeviceInfoContext

obtain provider’s device type

std::string GetProviderDevice() const

DeviceInfoContext

set memory allocator

void SetAllocator(const std::shared_ptr<Allocator> &allocator)

DeviceInfoContext

obtain memory allocator

std::shared_ptr<Allocator> GetAllocator() const

CPUDeviceInfo

Get the type of this DeviceInfoContext

enum DeviceType GetDeviceType() const

context.cpu

CPUDeviceInfo

Set enables to perform the float16 inference

void SetEnableFP16(bool is_fp16)

Context.cpu.precision_mode

CPUDeviceInfo

Get enables to perform the float16 inference

bool GetEnableFP16() const

Context.cpu.precision_mode

GPUDeviceInfo

Get the type of this DeviceInfoContext

enum DeviceType GetDeviceType() const

Context.gpu

GPUDeviceInfo

Set device id

void SetDeviceID(uint32_t device_id)

Context.gpu.device_id

GPUDeviceInfo

Get the device id

uint32_t GetDeviceID() const

Context.gpu.device_id

GPUDeviceInfo

Get the distribution rank id

int GetRankID() const

Context.gpu.rank_id

GPUDeviceInfo

Get the distribution group size

int GetGroupSize() const

Context.gpu.group_size

GPUDeviceInfo

Set the precision mode

void SetPrecisionMode(const std::string &precision_mode)

GPUDeviceInfo

Get the precision mode

std::string GetPrecisionMode() const

GPUDeviceInfo

Set enables to perform the float16 inference

void SetEnableFP16(bool is_fp16)

Context.gpu.precision_mode

GPUDeviceInfo

Get enables to perform the float16 inference

bool GetEnableFP16() const

Context.gpu.precision_mode

GPUDeviceInfo

Set enables to sharing mem with OpenGL

void SetEnableGLTexture(bool is_enable_gl_texture)

GPUDeviceInfo

Get enables to sharing mem with OpenGL

bool GetEnableGLTexture() const

GPUDeviceInfo

Set current OpenGL context

void SetGLContext(void *gl_context)

0

GPUDeviceInfo

Get current OpenGL context

void *GetGLContext() const

GPUDeviceInfo

Set current OpenGL display

void SetGLDisplay(void *gl_display)

GPUDeviceInfo

Get current OpenGL display

void *GetGLDisplay() const

AscendDeviceInfo

Get the type of this DeviceInfoContext

enum DeviceType GetDeviceType() const

Context.ascend

AscendDeviceInfo

Set device id

void SetDeviceID(uint32_t device_id)

Context.ascend.device_id

AscendDeviceInfo

Get the device id

uint32_t GetDeviceID() const

Context.ascend.device_id

AscendDeviceInfo

Set AIPP configuration file path

void SetInsertOpConfigPath(const std::string &cfg_path)

AscendDeviceInfo

Get AIPP configuration file path

std::string GetInsertOpConfigPath() const

AscendDeviceInfo

Set format of model inputs

void SetInputFormat(const std::string &format)

AscendDeviceInfo

Get format of model inputs

std::string GetInputFormat() const

AscendDeviceInfo

Set shape of model inputs

void SetInputShape(const std::string &shape)

AscendDeviceInfo

Get shape of model inputs

std::string GetInputShape() const

AscendDeviceInfo

Set shape of model inputs

void SetInputShapeMap(const std::map<int, std::vector <int>> &shape)

AscendDeviceInfo

Get shape of model inputs

std::map<int, std::vector <int>> GetInputShapeMap() const

AscendDeviceInfo

Set dynamic batch sizes of model inputs. Ranges from 2 to 100

void SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size)

AscendDeviceInfo

Get dynamic batch sizes of model inputs

std::string GetDynamicBatchSize() const

AscendDeviceInfo

Set the dynamic image size of model inputs

void SetDynamicImageSize(const std::string &dynamic_image_size)

AscendDeviceInfo

Get dynamic image size of model inputs

std::string GetDynamicImageSize() const

AscendDeviceInfo

Set type of model outputs

void SetOutputType(enum DataType output_type)

AscendDeviceInfo

Get type of model outputs

enum DataType GetOutputType() const

AscendDeviceInfo

Set precision mode of model

void SetPrecisionMode(const std::string &precision_mode)

Context.ascend.precision_mode

AscendDeviceInfo

Get precision mode of model

std::string GetPrecisionMode() const

Context.ascend.precision_mode

AscendDeviceInfo

Set op select implementation mode

void SetOpSelectImplMode(const std::string &op_select_impl_mode)

AscendDeviceInfo

Get op select implementation mode

std::string GetOpSelectImplMode() const

AscendDeviceInfo

Set fusion switch config file path. Controls which fusion passes to be turned off

void SetFusionSwitchConfigPath(const std::string &cfg_path)

AscendDeviceInfo

Get fusion switch config file path

std::string GetFusionSwitchConfigPath() const

AscendDeviceInfo

Set buffer optimize mode

void SetBufferOptimizeMode(const std::string &buffer_optimize_mode)

AscendDeviceInfo

Get buffer optimize mode

std::string GetBufferOptimizeMode() const

KirinNPUDeviceInfo

Get the type of this DeviceInfoContext

enum DeviceType GetDeviceType() const

KirinNPUDeviceInfo

Set enables to perform the float16 inference

void SetEnableFP16(bool is_fp16)

KirinNPUDeviceInfo

Get enables to perform the float16 inference

bool GetEnableFP16() const

KirinNPUDeviceInfo

Set the NPU frequency

void SetFrequency(int frequency)

KirinNPUDeviceInfo

Get the NPU frequency

int GetFrequency() const

Model

Build a model from model buffer so that it can run on a device

Status Build(const void *model_data, size_t data_size, ModelType model_type, const std::shared_ptr <Context> &model_context = nullptr)

Model

Load and build a model from model buffer so that it can run on a device

Status Build(const std::string &model_path, ModelType model_type, const std::shared_ptr <Context> &model_context = nullptr)

Model.build_from_file

Model

Build a model from model buffer so that it can run on a device

Status Build(const void *model_data, size_t data_size, ModelType model_type, const std::shared_ptr <Context> &model_context, const Key &dec_key, const std::string &dec_mode, const std::string &cropto_lib_path)

Model

Load and build a model from model buffer so that it can run on a device

Status Build(const std::string &model_path, ModelType model_type, const std::shared_ptr <Context> &model_context, const Key &dec_key, const std::string &dec_mode, const std::string &cropto_lib_path)

Model

Build a train model from GraphCell so that it can run on a device

Status Build(GraphCell graph, const std::shared_ptr <Context> &model_context = nullptr, const std::shared_ptr <TrainCfg> &train_cfg = nullptr)

Model

Build a train model from GraphCell so that it can run on a device

Status Build(GraphCell graph, Node *optimizer, std::vector<Expr *> inputs, const std::shared_ptr <Context> &model_context, const std::shared_ptr <TrainCfg> &train_cfg)

Model

Build a Transfer Learning model where the backbone weights are fixed and the head weights are trainable

Status BuildTransferLearning(GraphCell backbone, GraphCell head, const std::shared_ptr <Context> &context, const std::shared_ptr <TrainCfg> &train_cfg = nullptr)

Model

Resize the shapes of inputs

Status Resize(const std::vector <MSTensor> &inputs, const std::vector <std::vector<int64_t>> &dims)

Model.resize

Model

Change the size and or content of weight tensors

Status UpdateWeights(const std::vector <MSTensor> &new_weights)

Model

Inference model API

Status Predict(const std::vector <MSTensor> &inputs, std::vector <MSTensor> *outputs, const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr)

Model.predict

Model

Inference model API only with callback

Status Predict(const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr)

Model

Training API, Run model by step

Status RunStep(const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr)

Model

Inference model with preprocess in model

Status PredictWithPreprocess(const std::vector <std::vector<MSTensor>> &inputs, std::vector <MSTensor> *outputs, const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr)

Model

Apply data preprocess if it exits in model

Status Preprocess(const std::vector <std::vector<MSTensor>> &inputs, std::vector <MSTensor> *outputs)

Model

Check if data preprocess exists in model

bool HasPreprocess()

Model

Load config file

Status LoadConfig(const std::string &config_path)

Wrapped in the parameter config_path of Model.build_from_file

Model

Update config

Status UpdateConfig(const std::string &section, const std::pair<std::string, std::string> &config)

Model

Obtains all input tensors of the model

std::vector <MSTensor> GetInputs()

Model.get_inputs

Model

Obtains the input tensor of the model by name

MSTensor GetInputByTensorName(const std::string &tensor_name)

Model

Obtain all gradient tensors of the model

std::vector <MSTensor> GetGradients() const

Model

Update gradient tensors of the model

Status ApplyGradients(const std::vector <MSTensor> &gradients)

Model

Obtain all weights tensors of the model

std::vector <MSTensor> GetFeatureMaps() const

Model

Obtain all trainable parameters of the model optimizers

std::vector <MSTensor> GetTrainableParams() const

Model

Update weights tensors of the model

Status UpdateFeatureMaps(const std::vector <MSTensor> &new_weights)

Model

Obtain optimizer params tensors of the model

std::vector <MSTensor> GetOptimizerParams() const

Model

Update the optimizer parameters

Status SetOptimizerParams(const std::vector <MSTensor> &params)

Model

Setup training with virtual batches

Status SetupVirtualBatch(int virtual_batch_multiplier, float lr = -1.0f, float momentum = -1.0f)

Model

Set the Learning Rate of the training

Status SetLearningRate(float learning_rate)

Model

Get the Learning Rate of the optimizer

float GetLearningRate()

Model

Initialize object with metrics

Status InitMetrics(std::vector<Metrics *> metrics)

Model

Accessor to TrainLoop metric objects

std::vector<Metrics *> GetMetrics()

Model

Obtains all output tensors of the model

std::vector <MSTensor> GetOutputs()

Wrapped in the return value of Model.predict

Model

Obtains names of all output tensors of the model

std::vector <std::string> GetOutputTensorNames()

Model

Obtains the output tensor of the model by name

MSTensor GetOutputByTensorName(const std::string &tensor_name)

Model

Get output MSTensors of model by node name

std::vector <MSTensor> GetOutputsByNodeName(const std::string &node_name)

Model

Bind GLTexture2D object to cl Memory

Status BindGLTexture2DMemory(const std::map<std::string, unsigned int> &inputGLTexture, std::map<std::string, unsigned int> *outputGLTexture)

Model

Set the model running mode

Status SetTrainMode(bool train)

Model

Get the model running mode

bool GetTrainMode() const

Model

Performs the training Loop in Train Mode

Status Train(int epochs, std::shared_ptr <dataset::Dataset>ds, std::vector<TrainCallBack *> cbs)

Model

Performs the training loop over all data in Eval Mode

Status Evaluate(std::shared_ptr <dataset::Dataset> ds, std::vector<TrainCallBack *> cbs)

Model

Check if the device supports the model

static bool CheckModelSupport(enum DeviceType device_type, ModelType model_type)

RunnerConfig

Set the number of workers at runtime

void SetWorkersNum(int32_t workers_num)

Context.parallel.workers_num

RunnerConfig

Get the current operators parallel workers number setting

int32_t GetWorkersNum() const

Context.parallel.workers_num

RunnerConfig

Set the context at runtime

void SetContext(const std::shared_ptr <Context> &context)

Wrapped in Context.parallel

RunnerConfig

Get the current context setting

std::shared_ptr <Context> GetContext() const

Wrapped in Context.parallel

RunnerConfig

Set the config before runtime

void SetConfigInfo(const std::string &section, const std::map<std::string, std::string> &config)

Context.parallel.config_info

RunnerConfig

Get the current config setting

std::map<std::string, std::map<std::string, std::string>> GetConfigInfo() const

Context.parallel.config_info

RunnerConfig

Set the config path before runtime

void SetConfigPath(const std::string &config_path)

Context.parallel.config_path

RunnerConfig

Get the current config path

std::string GetConfigPath() const

Context.parallel.config_path

ModelParallelRunner

build a model parallel runner from model path so that it can run on a device

Status Init(const std::string &model_path, const std::shared_ptr <RunnerConfig> &runner_config = nullptr)

Model.parallel_runner.build_from_file

ModelParallelRunner

build a model parallel runner from model buffer so that it can run on a device

Status Init(const void *model_data, const size_t data_size, const std::shared_ptr <RunnerConfig> &runner_config = nullptr)

ModelParallelRunner

Obtains all input tensors information of the model

std::vector <MSTensor> GetInputs()

Model.parallel_runner.get_inputs

ModelParallelRunner

Obtains all output tensors information of the model

std::vector <MSTensor> GetOutputs()

Wrapped in the return value of Model.parallel_runner.predict

ModelParallelRunner

Inference ModelParallelRunner

Status Predict(const std::vector <MSTensor> &inputs, std::vector <MSTensor> *outputs,const MSKernelCallBack &before = nullptr, const MSKernelCallBack &after = nullptr)

Model.parallel_runner.predict

MSTensor

Creates a MSTensor object, whose data need to be copied before accessed by Model

static inline MSTensor *CreateTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data, size_t data_len) noexcept

Tensor

MSTensor

Creates a MSTensor object, whose data can be directly accessed by Model

static inline MSTensor *CreateRefTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data, size_t data_len, bool own_data = true) noexcept

MSTensor

Creates a MSTensor object, whose device data can be directly accessed by Model

static inline MSTensor CreateDeviceTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, void *data, size_t data_len) noexcept

MSTensor

Creates a MSTensor object from local file

static inline MSTensor *CreateTensorFromFile(const std::string &file, DataType type = DataType::kNumberTypeUInt8, const std::vector<int64_t> &shape = {}) noexcept

MSTensor

Create a string type MSTensor object whose data can be accessed by Model only after being copied

static inline MSTensor *StringsToTensor(const std::string &name, const std::vectorstd::string &str)

MSTensor

Parse the string type MSTensor object into strings

static inline std::vectorstd::string TensorToStrings(const MSTensor &tensor)

MSTensor

Destroy an object created by Clone , StringsToTensor , CreateRefTensor or CreateTensor | static void DestroyTensorPtr(MSTensor *tensor) noexcept

MSTensor

Obtains the name of the MSTensor

std::string Name() const

Tensor.name

MSTensor

Obtains the data type of the MSTensor

enum DataType DataType() const

Tensor.dtype

MSTensor

Obtains the shape of the MSTensor

const std::vector<int64_t> &Shape() const

Tensor.shape

MSTensor

Obtains the number of elements of the MSTensor

int64_t ElementNum() const

Tensor.element_num

MSTensor

Obtains a shared pointer to the copy of data of the MSTensor

std::shared_ptr <const void> Data() const

MSTensor

Obtains the pointer to the data of the MSTensor

void *MutableData()

Wrapped in Tensor.get_data_to_numpy and Tensor.set_data_from_numpy

MSTensor

Obtains the length of the data of the MSTensor, in bytes

size_t DataSize() const

Tensor.data_size

MSTensor

Get whether the MSTensor data is const data

bool IsConst() const

MSTensor

Gets the boolean value that indicates whether the memory of MSTensor is on device

bool IsDevice() const

MSTensor

Gets a deep copy of the MSTensor

MSTensor *Clone() const

MSTensor

Gets the boolean value that indicates whether the MSTensor is valid

bool operator==(std::nullptr_t) const

MSTensor

Gets the boolean value that indicates whether the MSTensor is valid

bool operator!=(std::nullptr_t) const

MSTensor

Get the boolean value that indicates whether the MSTensor equals tensor

bool operator==(const MSTensor &tensor) const

MSTensor

Get the boolean value that indicates whether the MSTensor not equals tensor

bool operator!=(const MSTensor &tensor) const

MSTensor

Set the shape of for the MSTensor

void SetShape(const std::vector<int64_t> &shape)

Tensor.shape

MSTensor

Set the data type for the MSTensor

void SetDataType(enum DataType data_type)

Tensor.dtype

MSTensor

Set the name for the MSTensor

void SetTensorName(const std::string &name)

Tensor.name

MSTensor

Set the Allocator for the MSTensor

void SetAllocator(std::shared_ptr <Allocator> allocator)

MSTensor

Obtain the Allocator of the MSTensor

std::shared_ptr <Allocator> allocator() const

MSTensor

Set the format for the MSTensor

void SetFormat(mindspore::Format format)

Tensor.format

MSTensor

Obtain the format of the MSTensor

mindspore::Format format() const

Tensor.format

MSTensor

Set the data for the MSTensor

void SetData(void *data, bool own_data = true)

MSTensor

Set the device data address for the MSTensor

void SetDeviceData(void *data)

MSTensor

Get the device data address of the MSTensor set by SetDeviceData

void *GetDeviceData()

MSTensor

Get the quantization parameters of the MSTensor

std::vector <QuantParam> QuantParams() const

MSTensor

Set the quantization parameters for the MSTensor

void SetQuantParams(std::vector <QuantParam> quant_params)

MindSpore Lite API

Python API