Skip to content

loss

L1Loss

Bases: Loss

L1 loss

Source code in slimfit/loss.py
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
class L1Loss(Loss):
    """L1 loss"""

    # todo refactor ydata / ymodel?
    def __call__(
        self, dependent_data: dict[str, np.ndarray], target_data: dict[str, np.ndarray]
    ) -> np.ndarray | float:
        if self.weights is None:
            residuals = {k: (target_data[k] - dependent_data[k]) for k in target_data.keys()}
        else:
            residuals = {
                k: (target_data[k] - dependent_data[k]) * self.weights[k]
                for k in target_data.keys()
            }

        return self.reduce(residuals)

LogLoss

Bases: Loss

Takes the elementwise logarithm of predicted input data

Used in combination with maximum likelihood methods

returns negative of the reductions are use in combination with minimizers rather than maximizers

TODO move minus sign to objective

Source code in slimfit/loss.py
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
class LogLoss(Loss):
    """Takes the elementwise logarithm of predicted input data

    Used in combination with maximum likelihood methods

    returns negative of the reductions are use in combination with minimizers rather than maximizers
    #TODO move minus sign to objective
    """

    def __init__(
        self,
        weights: Optional[dict[str, npt.ArrayLike]] = None,
        reduction: Literal["mean", "sum", "concat"] = "sum",
    ):
        if reduction not in ["mean", "sum", "concat"]:
            raise ValueError(
                f"LogLoss does not support reduction {reduction!r}, only 'mean', 'sum', 'concat'"
            )
        super().__init__(weights, reduction)

    def __call__(
        self, y_data: dict[str, np.ndarray], y_model: dict[str, np.ndarray]
    ) -> np.ndarray | float:
        if self.weights is None:
            # log_vals = {k: np.log(y_model[k]) for k in y_model.keys()}
            log_vals = {
                k: np.log(np.clip(y_model[k], a_min=MIN_PROB, a_max=None)) for k in y_model.keys()
            }

        else:
            log_vals = {k: np.log(y_model[k] * self.weights[k]) for k in y_model.keys()}

        return -self.reduce(log_vals)

LogSumLoss

Bases: Loss

Sums by specified axis, then takes elementwise log, then applies reduction method

Used in combination with maximum likelihood methods

Example

sum along axis 1, then takes elementwise log, then sums the result

LogSumLoss(sum_axis=1, reduction='sum')

returns negative of the reductions are use in combination with minimizers rather than maximizers

Source code in slimfit/loss.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
class LogSumLoss(Loss):
    """Sums by specified axis, then takes elementwise log, then applies reduction method



    Used in combination with maximum likelihood methods

    Example:
        # sum along axis 1, then takes elementwise log, then sums the result
        LogSumLoss(sum_axis=1, reduction='sum')

    returns negative of the reductions are use in combination with minimizers rather than maximizers
    """

    def __init__(
        self,
        weights: Optional[dict[str, npt.ArrayLike]] = None,
        sum_axis: Optional[int] = 1,
        reduction: Literal["mean", "sum", "concat"] = "sum",
    ):
        self.sum_axis = sum_axis
        if reduction not in ["mean", "sum", "concat"]:
            raise ValueError(
                f"LogSumLoss does not support reduction {reduction!r}, only 'mean', 'sum', 'concat'"
            )
        super().__init__(weights, reduction)

    def __call__(
        self, y_data: dict[str, np.ndarray], y_model: dict[str, np.ndarray]
    ) -> np.ndarray | float:
        # from slimfit.minimizer import MIN_PROB
        if self.weights is None:
            log_vals = {
                k: np.log(
                    # y_model[k].sum(axis=self.sum_axis),
                    np.clip(y_model[k].sum(axis=self.sum_axis), a_min=MIN_PROB, a_max=None)
                )
                for k in y_model.keys()
            }

        else:
            log_vals = {
                k: np.log(
                    # np.clip(y_model[k].sum(axis=self.sum_axis)) * self.weights[k], a_min=MIN_PROB, a_max=None)
                    y_model[k].sum(axis=self.sum_axis)
                    * self.weights[k]
                )
                for k in y_model.keys()
            }

        return -self.reduce(log_vals)

Loss

Bases: object

Loss function base class.

Parameters:

Name Type Description Default
weights Optional[dict[str, ArrayLike]]

Optional dictionary of weights for each data point. Must match ydata in shape.

None
reduction Literal['mean', 'sum', 'concat', 'none', None]

Reduction strategy to use. Defaults to "mean".

'sum'

Attributes:

Name Type Description
reduce ReductionStrategy

Callable that reduces the loss values.

Source code in slimfit/loss.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
class Loss(object):
    """
    Loss function base class.

    Args:
        weights: Optional dictionary of weights for each data point. Must match `ydata` in shape.
        reduction: Reduction strategy to use. Defaults to "mean".

    Attributes:
        reduce: Callable that reduces the loss values.

    """

    def __init__(
        self,
        weights: Optional[dict[str, npt.ArrayLike]] = None,
        reduction: Literal["mean", "sum", "concat", "none", None] = "sum",
    ):
        self.weights = weights
        if reduction == "mean":
            self.reduce: ReductionStrategy = mean_reduction
        elif reduction == "sum":
            self.reduce: ReductionStrategy = sum_reduction
        elif reduction == "concat":
            self.reduce: ReductionStrategy = concat_reduction
        elif reduction in [None, "none"]:
            self.reduce = lambda x: x

    @abc.abstractmethod
    def __call__(
        self, y_data: dict[str, np.ndarray], y_model: dict[str, np.ndarray]
    ) -> np.ndarray | float:
        ...

SELoss

Bases: Loss

Squared error loss

Source code in slimfit/loss.py
86
87
88
89
90
91
92
93
94
95
96
97
98
99
class SELoss(Loss):
    """Squared error loss"""

    def __call__(
        self, y_data: dict[str, np.ndarray], y_model: dict[str, np.ndarray]
    ) -> np.ndarray | float:
        if self.weights is None:
            residuals = {k: (y_model[k] - y_data[k]) ** 2 for k in y_model.keys()}
        else:
            residuals = {
                k: ((y_model[k] - y_data[k]) * self.weights[k]) ** 2 for k in y_model.keys()
            }

        return self.reduce(residuals)