Source code for mindspore.train.callback._loss_monitor

# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LossMonitor Callback class."""

import numpy as np

from mindspore.common.tensor import Tensor
from mindspore._checkparam import Validator
from ._callback import Callback


[文档]class LossMonitor(Callback): """ Monitor the loss in training. If the loss is NAN or INF, it will terminate training. Note: If per_print_times is 0, do not print loss. Args: per_print_times (int): How many steps to print once loss. During sink mode, it will print loss in the nearest step. Default: 1. Raises: ValueError: If per_print_times is not an integer or less than zero. Examples: >>> from mindspore import Model, nn >>> >>> net = LeNet5() >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9) >>> model = Model(net, loss_fn=loss, optimizer=optim) >>> data_path = './MNIST_Data' >>> dataset = create_dataset(data_path) >>> loss_monitor = LossMonitor() >>> model.train(10, dataset, callbacks=loss_monitor) """ def __init__(self, per_print_times=1): super(LossMonitor, self).__init__() Validator.check_non_negative_int(per_print_times) self._per_print_times = per_print_times self._last_print_time = 0
[文档] def step_end(self, run_context): """ Print training loss at the end of step. Args: run_context (RunContext): Include some information of the model. """ cb_params = run_context.original_args() loss = cb_params.net_outputs if isinstance(loss, (tuple, list)): if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray): loss = loss[0] if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray): loss = float(np.mean(loss.asnumpy())) cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1 if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)): raise ValueError("epoch: {} step: {}. Invalid loss, terminating training.".format( cb_params.cur_epoch_num, cur_step_in_epoch)) #In disaster recovery scenario, the cb_params.cur_step_num may be rollback to previous step # and be less than self._last_print_time, so self._last_print_time need to be updated. if self._per_print_times != 0 and (cb_params.cur_step_num <= self._last_print_time): while cb_params.cur_step_num <= self._last_print_time: self._last_print_time -=\ max(self._per_print_times, cb_params.batch_num if cb_params.dataset_sink_mode else 1) if self._per_print_times != 0 and (cb_params.cur_step_num - self._last_print_time) >= self._per_print_times: self._last_print_time = cb_params.cur_step_num print("epoch: %s step: %s, loss is %s" % (cb_params.cur_epoch_num, cur_step_in_epoch, loss), flush=True)