Class Dataset
Defined in File datasets.h
Inheritance Relationships
Base Type
public std::enable_shared_from_this< Dataset >
Derived Types
public mindspore::dataset::AGNewsDataset
(Class AGNewsDataset)public mindspore::dataset::AlbumDataset
(Class AlbumDataset)public mindspore::dataset::AmazonReviewDataset
(Class AmazonReviewDataset)public mindspore::dataset::BatchDataset
(Class BatchDataset)public mindspore::dataset::BucketBatchByLengthDataset
(Class BucketBatchByLengthDataset)public mindspore::dataset::CLUEDataset
(Class CLUEDataset)public mindspore::dataset::CMUArcticDataset
(Class CMUArcticDataset)public mindspore::dataset::CSVDataset
(Class CSVDataset)public mindspore::dataset::Caltech256Dataset
(Class Caltech256Dataset)public mindspore::dataset::CelebADataset
(Class CelebADataset)public mindspore::dataset::Cifar100Dataset
(Class Cifar100Dataset)public mindspore::dataset::Cifar10Dataset
(Class Cifar10Dataset)public mindspore::dataset::CityscapesDataset
(Class CityscapesDataset)public mindspore::dataset::CoNLL2000Dataset
(Class CoNLL2000Dataset)public mindspore::dataset::CocoDataset
(Class CocoDataset)public mindspore::dataset::ConcatDataset
(Class ConcatDataset)public mindspore::dataset::DBpediaDataset
(Class DBpediaDataset)public mindspore::dataset::DIV2KDataset
(Class DIV2KDataset)public mindspore::dataset::EMnistDataset
(Class EMnistDataset)public mindspore::dataset::EnWik9Dataset
(Class EnWik9Dataset)public mindspore::dataset::FakeImageDataset
(Class FakeImageDataset)public mindspore::dataset::FashionMnistDataset
(Class FashionMnistDataset)public mindspore::dataset::FilterDataset
(Class FilterDataset)public mindspore::dataset::FlickrDataset
(Class FlickrDataset)public mindspore::dataset::GTZANDataset
(Class GTZANDataset)public mindspore::dataset::IMDBDataset
(Class IMDBDataset)public mindspore::dataset::IWSLT2016Dataset
(Class IWSLT2016Dataset)public mindspore::dataset::IWSLT2017Dataset
(Class IWSLT2017Dataset)public mindspore::dataset::ImageFolderDataset
(Class ImageFolderDataset)public mindspore::dataset::KITTIDataset
(Class KITTIDataset)public mindspore::dataset::KMnistDataset
(Class KMnistDataset)public mindspore::dataset::LFWDataset
(Class LFWDataset)public mindspore::dataset::LJSpeechDataset
(Class LJSpeechDataset)public mindspore::dataset::LSUNDataset
(Class LSUNDataset)public mindspore::dataset::LibriTTSDataset
(Class LibriTTSDataset)public mindspore::dataset::ManifestDataset
(Class ManifestDataset)public mindspore::dataset::MapDataset
(Class MapDataset)public mindspore::dataset::MindDataDataset
(Class MindDataDataset)public mindspore::dataset::MnistDataset
(Class MnistDataset)public mindspore::dataset::Multi30kDataset
(Class Multi30kDataset)public mindspore::dataset::OmniglotDataset
(Class OmniglotDataset)public mindspore::dataset::PennTreebankDataset
(Class PennTreebankDataset)public mindspore::dataset::PhotoTourDataset
(Class PhotoTourDataset)public mindspore::dataset::Places365Dataset
(Class Places365Dataset)public mindspore::dataset::ProjectDataset
(Class ProjectDataset)public mindspore::dataset::QMnistDataset
(Class QMnistDataset)public mindspore::dataset::RandomDataDataset
(Class RandomDataDataset)public mindspore::dataset::RenameDataset
(Class RenameDataset)public mindspore::dataset::RepeatDataset
(Class RepeatDataset)public mindspore::dataset::SBUDataset
(Class SBUDataset)public mindspore::dataset::SQuADDataset
(Class SQuADDataset)public mindspore::dataset::STL10Dataset
(Class STL10Dataset)public mindspore::dataset::SemeionDataset
(Class SemeionDataset)public mindspore::dataset::ShuffleDataset
(Class ShuffleDataset)public mindspore::dataset::SkipDataset
(Class SkipDataset)public mindspore::dataset::SogouNewsDataset
(Class SogouNewsDataset)public mindspore::dataset::SpeechCommandsDataset
(Class SpeechCommandsDataset)public mindspore::dataset::TFRecordDataset
(Class TFRecordDataset)public mindspore::dataset::TakeDataset
(Class TakeDataset)public mindspore::dataset::TedliumDataset
(Class TedliumDataset)public mindspore::dataset::TextFileDataset
(Class TextFileDataset)public mindspore::dataset::UDPOSDataset
(Class UDPOSDataset)public mindspore::dataset::USPSDataset
(Class USPSDataset)public mindspore::dataset::VOCDataset
(Class VOCDataset)public mindspore::dataset::WIDERFaceDataset
(Class WIDERFaceDataset)public mindspore::dataset::WikiTextDataset
(Class WikiTextDataset)public mindspore::dataset::YahooAnswersDataset
(Class YahooAnswersDataset)public mindspore::dataset::YelpReviewDataset
(Class YelpReviewDataset)public mindspore::dataset::YesNoDataset
(Class YesNoDataset)public mindspore::dataset::ZipDataset
(Class ZipDataset)
Class Documentation
-
class Dataset : public std::enable_shared_from_this<Dataset>
A base class to represent a dataset in the data pipeline.
Subclassed by mindspore::dataset::AGNewsDataset, mindspore::dataset::AlbumDataset, mindspore::dataset::AmazonReviewDataset, mindspore::dataset::BatchDataset, mindspore::dataset::BucketBatchByLengthDataset, mindspore::dataset::CLUEDataset, mindspore::dataset::CMUArcticDataset, mindspore::dataset::CSVDataset, mindspore::dataset::Caltech256Dataset, mindspore::dataset::CelebADataset, mindspore::dataset::Cifar100Dataset, mindspore::dataset::Cifar10Dataset, mindspore::dataset::CityscapesDataset, mindspore::dataset::CoNLL2000Dataset, mindspore::dataset::CocoDataset, mindspore::dataset::ConcatDataset, mindspore::dataset::DBpediaDataset, mindspore::dataset::DIV2KDataset, mindspore::dataset::EMnistDataset, mindspore::dataset::EnWik9Dataset, mindspore::dataset::FakeImageDataset, mindspore::dataset::FashionMnistDataset, mindspore::dataset::FilterDataset, mindspore::dataset::FlickrDataset, mindspore::dataset::GTZANDataset, mindspore::dataset::IMDBDataset, mindspore::dataset::IWSLT2016Dataset, mindspore::dataset::IWSLT2017Dataset, mindspore::dataset::ImageFolderDataset, mindspore::dataset::KITTIDataset, mindspore::dataset::KMnistDataset, mindspore::dataset::LFWDataset, mindspore::dataset::LJSpeechDataset, mindspore::dataset::LSUNDataset, mindspore::dataset::LibriTTSDataset, mindspore::dataset::ManifestDataset, mindspore::dataset::MapDataset, mindspore::dataset::MindDataDataset, mindspore::dataset::MnistDataset, mindspore::dataset::Multi30kDataset, mindspore::dataset::OmniglotDataset, mindspore::dataset::PennTreebankDataset, mindspore::dataset::PhotoTourDataset, mindspore::dataset::Places365Dataset, mindspore::dataset::ProjectDataset, mindspore::dataset::QMnistDataset, mindspore::dataset::RandomDataDataset, mindspore::dataset::RenameDataset, mindspore::dataset::RepeatDataset, mindspore::dataset::SBUDataset, mindspore::dataset::SQuADDataset, mindspore::dataset::STL10Dataset, mindspore::dataset::SemeionDataset, mindspore::dataset::ShuffleDataset, mindspore::dataset::SkipDataset, mindspore::dataset::SogouNewsDataset, mindspore::dataset::SpeechCommandsDataset, mindspore::dataset::TFRecordDataset, mindspore::dataset::TakeDataset, mindspore::dataset::TedliumDataset, mindspore::dataset::TextFileDataset, mindspore::dataset::UDPOSDataset, mindspore::dataset::USPSDataset, mindspore::dataset::VOCDataset, mindspore::dataset::WIDERFaceDataset, mindspore::dataset::WikiTextDataset, mindspore::dataset::YahooAnswersDataset, mindspore::dataset::YelpReviewDataset, mindspore::dataset::YesNoDataset, mindspore::dataset::ZipDataset
Public Functions
-
Dataset()
Constructor.
-
virtual ~Dataset() = default
Destructor.
-
int64_t GetDatasetSize(bool estimate = false)
Get the dataset size.
- Parameters
estimate – [in] This is only supported by some of the ops and it’s used to speed up the process of getting dataset size at the expense of accuracy.
- Returns
Dataset size. If failed, return -1.
-
std::vector<mindspore::DataType> GetOutputTypes()
Get the output type.
- Returns
A vector contains output DataType of dataset. If failed, return an empty vector.
-
std::vector<std::vector<int64_t>> GetOutputShapes()
Get the output shape.
- Returns
A vector contains output TensorShape of dataset. If failed, return an empty vector.
-
int64_t GetBatchSize()
Get the batch size.
- Returns
Batch size configuration of dataset.
-
int64_t GetRepeatCount()
Get the repeat count.
- Returns
Repeat count configuration of dataset.
-
int64_t GetNumClasses()
Get the number of classes.
- Returns
Number of classes of dataset. If failed, return -1.
-
inline std::vector<std::string> GetColumnNames()
Get the column names.
- Returns
A vector contains all column names of dataset. If failed, return an empty vector.
-
inline std::vector<std::pair<std::string, std::vector<int32_t>>> GetClassIndexing()
Get the class indexing.
- Returns
A map of ClassIndexing of dataset. If failed, return an empty map.
-
std::shared_ptr<Dataset> SetNumWorkers(int32_t num_workers)
Function to set runtime number of workers.
- Parameters
num_workers – [in] The number of threads in this operator.
- Returns
Shared pointer to the original object.
样例/* Set number of workers(threads) to process the dataset in parallel */ std::shared_ptr<Dataset> ds = ImageFolder(folder_path, true); ds = ds->SetNumWorkers(16);
-
std::shared_ptr<PullIterator> CreatePullBasedIterator(const std::vector<std::vector<char>> &columns = {})
A Function to create an PullBasedIterator over the Dataset.
- Parameters
columns – [in] List of columns to be used to specify the order of columns.
- Returns
Shared pointer to the Iterator.
样例/* dataset is an instance of Dataset object */ std::shared_ptr<Iterator> = dataset->CreatePullBasedIterator(); std::unordered_map<std::string, mindspore::MSTensor> row; iter->GetNextRow(&row);
-
inline std::shared_ptr<Iterator> CreateIterator(const std::vector<std::string> &columns = {}, int32_t num_epochs = -1)
Function to create an Iterator over the Dataset pipeline.
- Parameters
columns – [in] List of columns to be used to specify the order of columns.
num_epochs – [in] Number of epochs to run through the pipeline (default=-1, which means infinite epochs). An empty row is returned at the end of each epoch.
- Returns
Shared pointer to the Iterator.
样例/* dataset is an instance of Dataset object */ std::shared_ptr<Iterator> = dataset->CreateIterator(); std::unordered_map<std::string, mindspore::MSTensor> row; iter->GetNextRow(&row);
-
inline bool DeviceQueue(const std::string &queue_name = "", const std::string &device_type = "", int32_t device_id = 0, int32_t num_epochs = -1, bool send_epoch_end = true, int32_t total_batches = 0, bool create_data_info_queue = false)
Function to transfer data through a device.
Note
If device is Ascend, features of data will be transferred one by one. The limitation of data transmission per time is 256M.
- Parameters
queue_name – [in] Channel name (default=””, create new unique name).
device_type – [in] Type of device (default=””, get from MSContext).
device_id – [in] id of device (default=1, get from MSContext).
num_epochs – [in] Number of epochs (default=-1, infinite epochs).
send_epoch_end – [in] Whether to send end of sequence to device or not (default=true).
total_batches – [in] Number of batches to be sent to the device (default=0, all data).
create_data_info_queue – [in] Whether to create queue which stores types and shapes of data or not (default=false).
- Returns
Returns true if no error encountered else false.
-
inline bool Save(const std::string &dataset_path, int32_t num_files = 1, const std::string &dataset_type = "mindrecord")
Function to create a Saver to save the dynamic data processed by the dataset pipeline.
Note
Usage restrictions:
Supported dataset formats: ‘mindrecord’ only.
To save the samples in order, set dataset’s shuffle to false and num_files to 1.
Before calling the function, do not use batch operator, repeat operator or data augmentation operators with random attribute in map operator.
Mindrecord does not support bool, uint64, multi-dimensional uint8(drop dimension) nor multi-dimensional string.
- Parameters
dataset_path – [in] Path to dataset file.
num_files – [in] Number of dataset files (default=1).
dataset_type – [in] Dataset format (default=”mindrecord”).
- Returns
Returns true if no error encountered else false.
样例/* Create a dataset and save its data into MindRecord */ std::string folder_path = "/path/to/cifar_dataset"; std::shared_ptr<Dataset> ds = Cifar10(folder_path, "all", std::make_shared<SequentialSampler>(0, 10)); std::string save_file = "Cifar10Data.mindrecord"; bool rc = ds->Save(save_file);
-
std::shared_ptr<BatchDataset> Batch(int32_t batch_size, bool drop_remainder = false)
Function to create a BatchDataset.
Note
Combines batch_size number of consecutive rows into batches.
- Parameters
batch_size – [in] The number of rows each batch is created with.
drop_remainder – [in] Determines whether or not to drop the last possibly incomplete batch. If true, and if there are less than batch_size rows available to make the last batch, then those rows will be dropped and not propagated to the next node.
- Returns
Shared pointer to the current Dataset.
样例/* Create a dataset where every 100 rows is combined into a batch */ std::shared_ptr<Dataset> ds = ImageFolder(folder_path, true); ds = ds->Batch(100, true);
-
inline std::shared_ptr<BucketBatchByLengthDataset> BucketBatchByLength(const std::vector<std::string> &column_names, const std::vector<int32_t> &bucket_boundaries, const std::vector<int32_t> &bucket_batch_sizes, const std::function<MSTensorVec(MSTensorVec)> &element_length_function = nullptr, const std::map<std::string, std::pair<std::vector<int64_t>, MSTensor>> &pad_info = {}, bool pad_to_bucket_boundary = false, bool drop_remainder = false)
Function to create a BucketBatchByLengthDataset.
Note
Bucket elements according to their lengths. Each bucket will be padded and batched when they are full.
- Parameters
column_names – [in] Columns passed to element_length_function.
bucket_boundaries – [in] A list consisting of the upper boundaries of the buckets. Must be strictly increasing. If there are n boundaries, n+1 buckets are created: One bucket for [0, bucket_boundaries[0]), one bucket for [bucket_boundaries[i], bucket_boundaries[i+1]) for each 0<i<n, and one bucket for [bucket_boundaries[n-1], inf).
bucket_batch_sizes – [in] A list consisting of the batch sizes for each bucket. Must contain elements equal to the size of bucket_boundaries + 1.
element_length_function – [in] A function pointer that takes in MSTensorVec and outputs a MSTensorVec. The output must contain a single tensor containing a single int32_t. If no value is provided, then size of column_names must be 1, and the size of the first dimension of that column will be taken as the length (default=nullptr).
pad_info – [in] Represents how to batch each column. The key corresponds to the column name, the value must be a tuple of 2 elements. The first element corresponds to the shape to pad to, and the second element corresponds to the value to pad with. If a column is not specified, then that column will be padded to the longest in the current batch, and 0 will be used as the padding value. Any unspecified dimensions will be padded to the longest in the current batch, unless if pad_to_bucket_boundary is true. If no padding is wanted, set pad_info to None (default=empty dictionary).
pad_to_bucket_boundary – [in] If true, will pad each unspecified dimension in pad_info to the bucket_boundary minus 1. If there are any elements that fall into the last bucket, an error will occur (default=false).
drop_remainder – [in] If true, will drop the last batch for each bucket if it is not a full batch (default=false).
- Returns
Shared pointer to the current Dataset.
样例/* Bucket elements according to their lengths */ std::shared_ptr<Dataset> ds = Mnist(folder_path, "all", std::make_shared<RandomSampler>(false, 10)); ds = ds->BucketBatchByLength({"image"}, {1, 2, 3}, {4, 5, 6, 7});
-
inline std::shared_ptr<SentencePieceVocab> BuildSentencePieceVocab(const std::vector<std::string> &col_names, int32_t vocab_size, float character_coverage, SentencePieceModel model_type, const std::unordered_map<std::string, std::string> ¶ms)
Function to create a SentencePieceVocab from source dataset.
Note
Build a SentencePieceVocab from a dataset.
- Parameters
col_names – [in] Column names to get words from. It can be a vector of column names.
vocab_size – [in] Vocabulary size.
character_coverage – [in] Percentage of characters covered by the model, must be between 0.98 and 1.0 Good defaults are: 0.9995 for languages with rich character sets like Japanese or Chinese character sets, and 1.0 for other languages with small character sets.
model_type – [in] Model type. Choose from unigram (default), bpe, char, or word. The input sentence must be pretokenized when using word type.
params – [in] A vector contains more option parameters of sentencepiece library.
- Returns
Shared pointer to the SentencePieceVocab.
样例/* Build a SentencePieceVocab from TextFile dataset */ std::string vocab_file = "/path/to/txtfile"; std::shared_ptr<Dataset> ds_vocab = TextFile({vocab_file}, 0, ShuffleMode::kFalse); std::shared_ptr<SentencePieceVocab> vocab = ds_vocab->BuildSentencePieceVocab({}, 5000, 0.9995, SentencePieceModel::kUnigram, {});
-
inline std::shared_ptr<Vocab> BuildVocab(const std::vector<std::string> &columns = {}, const std::pair<int64_t, int64_t> &freq_range = {0, kDeMaxFreq}, int64_t top_k = kDeMaxTopk, const std::vector<std::string> &special_tokens = {}, bool special_first = true)
Function to create a Vocab from source dataset.
Note
Build a vocab from a dataset. This would collect all the unique words in a dataset and return a vocab which contains top_k most frequent words (if top_k is specified).
- Parameters
columns – [in] Column names to get words from. It can be a vector of column names.
freq_range – [in] A tuple of integers (min_frequency, max_frequency). Words within the frequency range would be kept. 0 <= min_frequency <= max_frequency <= total_words. min_frequency/max_frequency can be set to default, which corresponds to 0/total_words separately.
top_k – [in] Number of words to be built into vocab. top_k most frequent words are taken. The top_k is taken after freq_range. If not enough top_k, all words will be taken.
special_tokens – [in] A list of strings, each one is a special token.
special_first – [in] Whether special_tokens will be prepended/appended to vocab, If special_tokens is specified and special_first is set to default, special_tokens will be prepended.
- Returns
Shared pointer to the Vocab.
样例/* Build a Vocab from TextFile dataset */ std::string vocab_file = "/path/to/txtfile"; std::shared_ptr<Dataset> ds = TextFile({vocab_file}, 0, ShuffleMode::kFalse); std::shared_ptr<Vocab> vocab = ds->BuildVocab();
Function to create a ConcatDataset.
Note
Concat the datasets in the input.
- Parameters
datasets – [in] List of shared pointers to the dataset that should be concatenated together.
- Returns
Shared pointer to the current Dataset.
样例/* Create a dataset by concatenating dataset_1 and dataset_2 with "+" operator */ std::shared_ptr<Dataset> dataset = dataset_1 + dataset_2; /* Create a dataset by concatenating dataset_1 and dataset_2 with concat operation */ std::shared_ptr<Dataset> dataset = dataset_1->Concat({dataset_2});
-
inline std::shared_ptr<FilterDataset> Filter(const std::function<MSTensorVec(MSTensorVec)> &predicate, const std::vector<std::string> &input_columns = {})
Function to filter dataset by predicate.
Note
If input_columns is not provided or empty, all columns will be used.
- Parameters
predicate – [in] Function callable which returns a boolean value. If false then filter the element.
input_columns – [in] List of names of the input columns to filter.
- Returns
Shared pointer to the current Dataset.
样例/* Define a predicate function */ MSTensorVec Predicate1(MSTensorVec in) { // Return true if input is equal to 3 uint64_t input_value; TensorRow input = VecToRow(in); (void)input.at(0)->GetItemAt(&input_value, {0}); bool result = (input_value == 3); // Convert from boolean to TensorRow TensorRow output; std::shared_ptr<Tensor> out; (void)Tensor::CreateEmpty(mindspore::dataset::TensorShape({}), mindspore::dataset::DataType(mindspore::dataset::DataType::Type::DE_BOOL), &out); (void)out->SetItemAt({}, result); output.push_back(out); return RowToVec(output); } /* Apply predicate function for datase */ std::shared_ptr<Dataset> ds = ds->Filter(Predicate1, {"label"});
Function to create a MapDataset.
Note
Applies each operation in operations to this dataset.
- Parameters
operations – [in] Vector of raw pointers to TensorTransform objects to be applied on the dataset. Operations are applied in the order they appear in this list.
input_columns – [in] Vector of the names of the columns that will be passed to the first operation as input. The size of this list must match the number of input columns expected by the first operator. The default input_columns is the first column.
output_columns – [in] Vector of names assigned to the columns outputted by the last operation. This parameter is mandatory if len(input_columns) != len(output_columns). The size of this list must match the number of output columns of the last operation. The default output_columns will have the same name as the input columns, i.e., the columns will be replaced.
project_columns – [in] A list of column names to project.
cache – [in] Tensor cache to use (default=nullptr, which means no cache is used).
callbacks – [in] List of Dataset callbacks to be called.
- Returns
Shared pointer to the current Dataset.
样例// Create objects for the tensor ops std::shared_ptr<TensorTransform> decode_op = std::make_shared<vision::Decode>(true); std::shared_ptr<TensorTransform> random_color_op = std::make_shared<vision::RandomColor>(0.0, 0.0); /* 1) Simple map example */ // Apply decode_op on column "image". This column will be replaced by the outputted // column of decode_op. Since column_order is not provided, both columns "image" // and "label" will be propagated to the child node in their original order. dataset = dataset->Map({decode_op}, {"image"}); // Decode and rename column "image" to "decoded_image". dataset = dataset->Map({decode_op}, {"image"}, {"decoded_image"}); // Specify the order of the output columns. dataset = dataset->Map({decode_op}, {"image"}, {}, {"label", "image"}); // Rename column "image" to "decoded_image" and also specify the order of the output columns. dataset = dataset->Map({decode_op}, {"image"}, {"decoded_image"}, {"label", "decoded_image"}); // Rename column "image" to "decoded_image" and keep only this column. dataset = dataset->Map({decode_op}, {"image"}, {"decoded_image"}, {"decoded_image"}); /* 2) Map example with more than one operation */ // Create a dataset where the images are decoded, then randomly color jittered. // decode_op takes column "image" as input and outputs one column. The column // outputted by decode_op is passed as input to random_jitter_op. // random_jitter_op will output one column. Column "image" will be replaced by // the column outputted by random_jitter_op (the very last operation). All other // columns are unchanged. Since column_order is not specified, the order of the // columns will remain the same. dataset = dataset->Map({decode_op, random_jitter_op}, {"image"})
Function to create a MapDataset.
Note
Applies each operation in operations to this dataset.
- Parameters
operations – [in] Vector of shared pointers to TensorTransform objects to be applied on the dataset. Operations are applied in the order they appear in this list.
input_columns – [in] Vector of the names of the columns that will be passed to the first operation as input. The size of this list must match the number of input columns expected by the first operator. The default input_columns is the first column.
output_columns – [in] Vector of names assigned to the columns outputted by the last operation. This parameter is mandatory if len(input_columns) != len(output_columns). The size of this list must match the number of output columns of the last operation. The default output_columns will have the same name as the input columns, i.e., the columns will be replaced.
project_columns – [in] A list of column names to project.
cache – [in] Tensor cache to use (default=nullptr which means no cache is used).
callbacks – [in] List of Dataset callbacks to be called.
- Returns
Shared pointer to the current Dataset.
Function to create a MapDataset.
Note
Applies each operation in operations to this dataset.
- Parameters
operations – [in] Vector of TensorTransform objects to be applied on the dataset. Operations are applied in the order they appear in this list.
input_columns – [in] Vector of the names of the columns that will be passed to the first operation as input. The size of this list must match the number of input columns expected by the first operator. The default input_columns is the first column.
output_columns – [in] Vector of names assigned to the columns outputted by the last operation. This parameter is mandatory if len(input_columns) != len(output_columns). The size of this list must match the number of output columns of the last operation. The default output_columns will have the same name as the input columns, i.e., the columns will be replaced.
project_columns – [in] A list of column names to project.
cache – [in] Tensor cache to use (default=nullptr which means no cache is used).
callbacks – [in] List of Dataset callbacks to be called.
- Returns
Shared pointer to the current Dataset.
-
inline std::shared_ptr<ProjectDataset> Project(const std::vector<std::string> &columns)
Function to create a Project Dataset.
Note
Applies project to the dataset.
- Parameters
columns – [in] The name of columns to project.
- Returns
Shared pointer to the current Dataset.
样例/* Reorder the original column names in dataset */ std::shared_ptr<Dataset> ds = Mnist(folder_path, "all", std::make_shared<RandomSampler>(false, 10)); ds = ds->Project({"label", "image"});
-
inline std::shared_ptr<RenameDataset> Rename(const std::vector<std::string> &input_columns, const std::vector<std::string> &output_columns)
Function to create a Rename Dataset.
Note
Renames the columns in the input dataset.
- Parameters
input_columns – [in] List of the input columns to rename.
output_columns – [in] List of the output columns.
- Returns
Shared pointer to the current Dataset.
样例/* Rename the original column names in dataset */ std::shared_ptr<Dataset> ds = Mnist(folder_path, "all", std::make_shared<RandomSampler>(false, 10)); ds = ds->Rename({"image", "label"}, {"image_output", "label_output"});
-
inline std::shared_ptr<RepeatDataset> Repeat(int32_t count = -1)
Function to create a RepeatDataset.
Note
Repeats this dataset count times. Repeat indefinitely if count is -1.
- Parameters
count – [in] Number of times the dataset should be repeated.
- Returns
Shared pointer to the current Dataset.
样例/* Create a dataset where the dataset is repeated for 50 epochs */ std::shared_ptr<Dataset> ds = Mnist(folder_path, "all", std::make_shared<RandomSampler>(false, 10)); ds = ds->Repeat(50);
-
inline std::shared_ptr<ShuffleDataset> Shuffle(int32_t buffer_size)
Function to create a Shuffle Dataset.
Note
Randomly shuffles the rows of this dataset.
- Parameters
buffer_size – [in] The size of the buffer (must be larger than 1) for shuffling
- Returns
Shared pointer to the current Dataset.
样例/* Create a shuffled dataset using a shuffle buffer of size 4 */ std::shared_ptr<Dataset> ds = Mnist(folder_path, "all", std::make_shared<RandomSampler>(false, 10)); ds = ds->Shuffle(4);
-
inline std::shared_ptr<SkipDataset> Skip(int32_t count)
Function to create a SkipDataset.
Note
Skips count elements in this dataset.
- Parameters
count – [in] Number of elements the dataset to be skipped.
- Returns
Shared pointer to the current Dataset.
样例/* Create a dataset which skips first 3 elements from data */ std::shared_ptr<Dataset> ds = Mnist(folder_path, "all", std::make_shared<RandomSampler>(false, 10)); ds = ds->Skip(3);
-
inline std::shared_ptr<TakeDataset> Take(int32_t count = -1)
Function to create a TakeDataset.
Note
Takes count elements in this dataset.
- Parameters
count – [in] Number of elements the dataset to be taken.
- Returns
Shared pointer to the current Dataset.
样例/* Create a dataset where the dataset includes 50 elements. */ std::shared_ptr<Dataset> ds = Mnist(folder_path, "all", std::make_shared<RandomSampler>(false, 10)); ds = ds->Take(50);
Function to create a Zip Dataset.
Note
Applies zip to the dataset.
- Parameters
datasets – [in] A list of shared pointers to the datasets that we want to zip.
- Returns
Shared pointer to the current Dataset.
样例/* Create a dataset which is the combination of dataset and dataset_1 */ std::shared_ptr<Dataset> ds1 = ImageFolder(folder_path, true, std::make_shared<RandomSampler>(false, 10)); std::shared_ptr<Dataset> ds2 = Cifar10(folder_path, "all", std::make_shared<RandomSampler>(false, 10)); std::shared_ptr<Dataset> ds = ds->Zip({ds1, ds2});
-
Dataset()