• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

qiskit-community / qiskit-machine-learning / 8545395904

03 Apr 2024 08:55PM CUT coverage: 92.711% (+0.08%) from 92.636%
8545395904

Pull #793

github

web-flow
Merge 240d02fb3 into 97513d377
Pull Request #793: Patches einsum dimensionality in `torch_connector` - #716

25 of 25 new or added lines in 1 file covered. (100.0%)

10 existing lines in 1 file now uncovered.

1908 of 2058 relevant lines covered (92.71%)

0.93 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.0
/qiskit_machine_learning/utils/loss_functions/loss_functions.py
1
# This code is part of a Qiskit project.
2
#
3
# (C) Copyright IBM 2021, 2023.
4
#
5
# This code is licensed under the Apache License, Version 2.0. You may
6
# obtain a copy of this license in the LICENSE.txt file in the root directory
7
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8
#
9
# Any modifications or derivative works of this code must retain this
10
# copyright notice, and modified files need to carry a notice indicating
11
# that they have been altered from the originals.
12

13
""" Loss utilities """
1✔
14

15
from abc import ABC, abstractmethod
1✔
16

17
import numpy as np
1✔
18

19
from ...exceptions import QiskitMachineLearningError
1✔
20

21

22
class Loss(ABC):
1✔
23
    """
24
    Abstract base class for computing Loss.
25
    """
26

27
    def __call__(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
1✔
28
        """
29
        This method calls the ``evaluate`` method. This is a convenient method to compute loss.
30
        """
31
        return self.evaluate(predict, target)
1✔
32

33
    @abstractmethod
1✔
34
    def evaluate(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
1✔
35
        """
36
        An abstract method for evaluating the loss function. Inputs are expected in a shape
37
        of ``(N, *)``. Where ``N`` is a number of samples. Loss is computed for each sample
38
        individually.
39

40
        Args:
41
            predict: an array of predicted values using the model.
42
            target: an array of the true values.
43

44
        Returns:
45
            An array with values of the loss function of the shape ``(N, 1)``.
46

47
        Raises:
48
            QiskitMachineLearningError: shapes of predict and target do not match
49
        """
50
        raise NotImplementedError
×
51

52
    @staticmethod
1✔
53
    def _validate_shapes(predict: np.ndarray, target: np.ndarray) -> None:
1✔
54
        """
55
        Validates that shapes of both parameters are identical.
56

57
        Args:
58
            predict: an array of predicted values using the model
59
            target: an array of the true values
60

61
        Raises:
62
            QiskitMachineLearningError: shapes of predict and target do not match.
63
        """
64

65
        if predict.shape != target.shape:
1✔
66
            raise QiskitMachineLearningError(
1✔
67
                f"Shapes don't match, predict: {predict.shape}, target: {target.shape}!"
68
            )
69

70
    @abstractmethod
1✔
71
    def gradient(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
1✔
72
        """
73
        An abstract method for computing the gradient. Inputs are expected in a shape
74
        of ``(N, *)``. Where ``N`` is a number of samples. Gradient is computed for each sample
75
        individually.
76

77
        Args:
78
            predict: an array of predicted values using the model.
79
            target: an array of the true values.
80

81
        Returns:
82
            An array with gradient values of the shape ``(N, *)``. The output shape depends on
83
            the loss function.
84

85
        Raises:
86
            QiskitMachineLearningError: shapes of predict and target do not match.
87
        """
88
        raise NotImplementedError
×
89

90

91
class L1Loss(Loss):
1✔
92
    r"""
93
    This class computes the L1 loss (i.e. absolute error) for each sample as:
94

95
    .. math::
96

97
        \text{L1Loss}(predict, target) = \sum_{i=0}^{N_{\text{elements}}} \left| predict_i -
98
        target_i \right|.
99
    """
100

101
    def evaluate(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
1✔
102
        self._validate_shapes(predict, target)
1✔
103

104
        if len(predict.shape) <= 1:
1✔
105
            return np.abs(predict - target)
1✔
106
        else:
107
            return np.linalg.norm(predict - target, ord=1, axis=tuple(range(1, len(predict.shape))))
1✔
108

109
    def gradient(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
1✔
110
        self._validate_shapes(predict, target)
1✔
111

112
        return np.sign(predict - target)
1✔
113

114

115
class L2Loss(Loss):
1✔
116
    r"""
117
    This class computes the L2 loss (i.e. squared error) for each sample as:
118

119
    .. math::
120

121
        \text{L2Loss}(predict, target) = \sum_{i=0}^{N_{\text{elements}}} (predict_i - target_i)^2.
122

123
    """
124

125
    def evaluate(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
1✔
126
        self._validate_shapes(predict, target)
1✔
127

128
        if len(predict.shape) <= 1:
1✔
129
            return (predict - target) ** 2
1✔
130
        else:
131
            return np.linalg.norm(predict - target, axis=tuple(range(1, len(predict.shape)))) ** 2
1✔
132

133
    def gradient(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
1✔
134
        self._validate_shapes(predict, target)
1✔
135

136
        return 2 * (predict - target)
1✔
137

138

139
class CrossEntropyLoss(Loss):
1✔
140
    r"""
141
    This class computes the cross entropy loss for each sample as:
142

143
    .. math::
144

145
        \text{CrossEntropyLoss}(predict, target) = -\sum_{i=0}^{N_{\text{classes}}}
146
        target_i * log(predict_i).
147
    """
148

149
    def evaluate(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
1✔
150
        self._validate_shapes(predict, target)
1✔
151
        if len(predict.shape) == 1:
1✔
152
            predict = predict.reshape(1, -1)
1✔
153
            target = target.reshape(1, -1)
1✔
154

155
        # multiply target and log(predict) matrices row by row and sum up each row
156
        # into a single float, so the output is of shape(N,), where N number or samples.
157
        # then reshape
158
        # before taking the log we clip the predicted probabilities at a small positive number. This
159
        # ensures that in cases where a class is predicted to have 0 probability we don't get `nan`.
160
        val = -np.einsum(
1✔
161
            "ij,ij->i", target, np.log2(np.clip(predict, a_min=1e-10, a_max=None))
162
        ).reshape(-1, 1)
163
        return val
1✔
164

165
    def gradient(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
1✔
166
        """Assume softmax is used, and target vector may or may not be one-hot encoding"""
167

168
        self._validate_shapes(predict, target)
1✔
169
        if len(predict.shape) == 1:
1✔
170
            predict = predict.reshape(1, -1)
×
171
            target = target.reshape(1, -1)
×
172

173
        # sum up target along rows, then multiply predict by this sum element wise,
174
        # then subtract target
175
        grad = np.einsum("ij,i->ij", predict, np.sum(target, axis=1)) - target
1✔
176

177
        return grad
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc