Class Context
- Defined in File context.h 
Class Documentation
- 
class Context
- Context is used to store environment variables during execution. - Public Functions - 
void SetThreadNum(int32_t thread_num)
- Set the number of threads at runtime. - Parameters
- thread_num – [in] the number of threads at runtime. 
 
 - 
int32_t GetThreadNum() const
- Get the current thread number setting. - Returns
- The current thread number setting. 
 
 - 
void SetGroupInfoFile(std::string group_info_file)
- Set the communication group info file path. - Parameters
- group_info_file – [in] communication group info file for distributed inference. 
 
 - 
std::string GetGroupInfoFile() const
- Get the communication group info file path. - Returns
- The communication group info file path setting. 
 
 - 
void SetInterOpParallelNum(int32_t parallel_num)
- Set the parallel number of operators at runtime. - Parameters
- parallel_num – [in] the parallel number of operators at runtime. 
 
 - 
int32_t GetInterOpParallelNum() const
- Get the current operators parallel number setting. - Returns
- The current operators parallel number setting. 
 
 - 
void SetThreadAffinity(int mode)
- Set the thread affinity to CPU cores. - Parameters
- mode – [in] 0: no affinities, 1: big cores first, 2: little cores first 
 
 - 
int GetThreadAffinityMode() const
- Get the thread affinity of CPU cores. - Returns
- Thread affinity to CPU cores. 0: no affinities, 1: big cores first, 2: little cores first 
 
 - 
void SetThreadAffinity(const std::vector<int> &core_list)
- Set the thread lists to CPU cores. - Note - If core_list and mode are set by SetThreadAffinity at the same time, the core_list is effective, but the mode is not effective. - Parameters
- core_list – [in] a vector of thread core lists. 
 
 - 
std::vector<int32_t> GetThreadAffinityCoreList() const
- Get the thread lists of CPU cores. - Returns
- core_list: a vector of thread core lists. 
 
 - 
void SetEnableParallel(bool is_parallel)
- Set the status whether to perform model inference or training in parallel. - Parameters
- is_parallel – [in] true, parallel; false, not in parallel. 
 
 - 
bool GetEnableParallel() const
- Get the status whether to perform model inference or training in parallel. - Returns
- Bool value that indicates whether in parallel. 
 
 - 
void SetBuiltInDelegate(DelegateMode mode)
- Set built-in delegate mode to access third-party AI framework. - Parameters
- mode – [in] the built-in delegate mode. 
 
 - 
DelegateMode GetBuiltInDelegate() const
- Get the built-in delegate mode of the third-party AI framework. - Returns
- the built-in delegate mode. 
 
 - 
void set_delegate(const std::shared_ptr<AbstractDelegate> &delegate)
- Set Delegate to access third-party AI framework. - Parameters
- delegate – [in] the custom delegate. 
 
 - 
std::shared_ptr<AbstractDelegate> get_delegate() const
- Get the delegate of the third-party AI framework. - Returns
- Pointer to the custom delegate. 
 
 - 
void SetMultiModalHW(bool float_mode)
- Set quant model to run as float model in multi device. - Parameters
- float_mode – [in] true, run as float model; false, not run as float model. 
 
 - 
bool GetMultiModalHW() const
- Get the mode of the model run. - Returns
- Bool value that indicates whether run as float model 
 
 - 
std::vector<std::shared_ptr<DeviceInfoContext>> &MutableDeviceInfo()
- Get a mutable reference of DeviceInfoContext vector in this context. Only MindSpore Lite supports heterogeneous scenarios with multiple members in the vector. - Returns
- Mutable reference of DeviceInfoContext vector in this context. 
 
 
- 
void SetThreadNum(int32_t thread_num)