• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8137716344

22 Jan 2024 10:53PM UTC coverage: 98.47%. Remained the same
8137716344

push

github

web-flow
Bump pillow from 10.1.0 to 10.2.0 in /tests (#718)

Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.1.0 to 10.2.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/10.1.0...10.2.0)

---
updated-dependencies:
- dependency-name: pillow
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

3475 of 3529 relevant lines covered (98.47%)

7.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

98.6
/foolbox/attacks/gradient_descent_base.py
1
from typing import Union, Any, Optional, Callable, Tuple
10✔
2
from abc import ABC, abstractmethod
10✔
3
import eagerpy as ep
10✔
4

5
from ..devutils import flatten
10✔
6
from ..devutils import atleast_kd
10✔
7

8
from ..types import Bounds
10✔
9

10
from ..models.base import Model
10✔
11

12
from ..criteria import Misclassification, TargetedMisclassification
10✔
13

14
from ..distances import l1, l2, linf
10✔
15

16
from .base import FixedEpsilonAttack
10✔
17
from .base import T
10✔
18
from .base import get_criterion
10✔
19
from .base import raise_if_kwargs
10✔
20
from .base import verify_input_bounds
10✔
21

22

23
class Optimizer(ABC):
10✔
24
    def __init__(self, x: ep.Tensor):
10✔
25
        pass
×
26

27
    @abstractmethod
28
    def __call__(self, gradient: ep.Tensor) -> ep.Tensor:
29
        pass
30

31

32
class AdamOptimizer(Optimizer):
10✔
33
    def __init__(
10✔
34
        self,
35
        x: ep.Tensor,
36
        stepsize: float,
37
        beta1: float = 0.9,
38
        beta2: float = 0.999,
39
        epsilon: float = 1e-8,
40
    ):
41

42
        self.stepsize = stepsize
6✔
43
        self.beta1 = beta1
6✔
44
        self.beta2 = beta2
6✔
45
        self.epsilon = epsilon
6✔
46

47
        self.m = ep.zeros_like(x)
6✔
48
        self.v = ep.zeros_like(x)
6✔
49
        self.t = 0
6✔
50

51
    def __call__(self, gradient: ep.Tensor) -> ep.Tensor:
10✔
52
        self.t += 1
6✔
53

54
        self.m = self.beta1 * self.m + (1 - self.beta1) * gradient
6✔
55
        self.v = self.beta2 * self.v + (1 - self.beta2) * gradient**2
6✔
56

57
        bias_correction_1 = 1 - self.beta1**self.t
6✔
58
        bias_correction_2 = 1 - self.beta2**self.t
6✔
59

60
        m_hat = self.m / bias_correction_1
6✔
61
        v_hat = self.v / bias_correction_2
6✔
62

63
        return self.stepsize * m_hat / (ep.sqrt(v_hat) + self.epsilon)
6✔
64

65

66
class GDOptimizer(Optimizer):
10✔
67
    def __init__(self, x: ep.Tensor, stepsize: float):
10✔
68
        self.stepsize = stepsize
8✔
69

70
    def __call__(
10✔
71
        self,
72
        gradient: ep.Tensor,
73
    ) -> ep.Tensor:
74
        return self.stepsize * gradient
8✔
75

76

77
class BaseGradientDescent(FixedEpsilonAttack, ABC):
10✔
78
    def __init__(
10✔
79
        self,
80
        *,
81
        rel_stepsize: float,
82
        abs_stepsize: Optional[float] = None,
83
        steps: int,
84
        random_start: bool,
85
    ):
86
        self.rel_stepsize = rel_stepsize
10✔
87
        self.abs_stepsize = abs_stepsize
10✔
88
        self.steps = steps
10✔
89
        self.random_start = random_start
10✔
90

91
    def get_loss_fn(
10✔
92
        self, model: Model, labels: ep.Tensor
93
    ) -> Callable[[ep.Tensor], ep.Tensor]:
94
        # can be overridden by users
95
        def loss_fn(inputs: ep.Tensor) -> ep.Tensor:
8✔
96
            logits = model(inputs)
8✔
97
            return ep.crossentropy(logits, labels).sum()
8✔
98

99
        return loss_fn
8✔
100

101
    def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer:
10✔
102
        # can be overridden by users
103
        return GDOptimizer(x, stepsize)
8✔
104

105
    def value_and_grad(
10✔
106
        # can be overridden by users
107
        self,
108
        loss_fn: Callable[[ep.Tensor], ep.Tensor],
109
        x: ep.Tensor,
110
    ) -> Tuple[ep.Tensor, ep.Tensor]:
111
        return ep.value_and_grad(loss_fn, x)
6✔
112

113
    def run(
10✔
114
        self,
115
        model: Model,
116
        inputs: T,
117
        criterion: Union[Misclassification, TargetedMisclassification, T],
118
        *,
119
        epsilon: float,
120
        **kwargs: Any,
121
    ) -> T:
122
        raise_if_kwargs(kwargs)
8✔
123
        x0, restore_type = ep.astensor_(inputs)
8✔
124
        criterion_ = get_criterion(criterion)
8✔
125
        del inputs, criterion, kwargs
8✔
126

127
        verify_input_bounds(x0, model)
8✔
128

129
        # perform a gradient ascent (targeted attack) or descent (untargeted attack)
130
        if isinstance(criterion_, Misclassification):
8✔
131
            gradient_step_sign = 1.0
8✔
132
            classes = criterion_.labels
8✔
133
        elif hasattr(criterion_, "target_classes"):
6✔
134
            gradient_step_sign = -1.0
6✔
135
            classes = criterion_.target_classes
6✔
136
        else:
137
            raise ValueError("unsupported criterion")
×
138

139
        loss_fn = self.get_loss_fn(model, classes)
8✔
140

141
        if self.abs_stepsize is None:
8✔
142
            stepsize = self.rel_stepsize * epsilon
8✔
143
        else:
144
            stepsize = self.abs_stepsize
6✔
145

146
        optimizer = self.get_optimizer(x0, stepsize)
8✔
147

148
        if self.random_start:
8✔
149
            x = self.get_random_start(x0, epsilon)
6✔
150
            x = ep.clip(x, *model.bounds)
6✔
151
        else:
152
            x = x0
8✔
153

154
        for _ in range(self.steps):
8✔
155
            _, gradients = self.value_and_grad(loss_fn, x)
8✔
156
            gradients = self.normalize(gradients, x=x, bounds=model.bounds)
8✔
157
            x = x + gradient_step_sign * optimizer(gradients)
8✔
158
            x = self.project(x, x0, epsilon)
8✔
159
            x = ep.clip(x, *model.bounds)
8✔
160

161
        return restore_type(x)
8✔
162

163
    @abstractmethod
164
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
165
        ...
166

167
    @abstractmethod
168
    def normalize(
169
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
170
    ) -> ep.Tensor:
171
        ...
172

173
    @abstractmethod
174
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
175
        ...
176

177

178
def clip_lp_norms(x: ep.Tensor, *, norm: float, p: float) -> ep.Tensor:
10✔
179
    assert 0 < p < ep.inf
6✔
180
    norms = flatten(x).norms.lp(p=p, axis=-1)
6✔
181
    norms = ep.maximum(norms, 1e-12)  # avoid divsion by zero
6✔
182
    factor = ep.minimum(1, norm / norms)  # clipping -> decreasing but not increasing
6✔
183
    factor = atleast_kd(factor, x.ndim)
6✔
184
    return x * factor
6✔
185

186

187
def normalize_lp_norms(x: ep.Tensor, *, p: float) -> ep.Tensor:
10✔
188
    assert 0 < p < ep.inf
6✔
189
    norms = flatten(x).norms.lp(p=p, axis=-1)
6✔
190
    norms = ep.maximum(norms, 1e-12)  # avoid divsion by zero
6✔
191
    factor = 1 / norms
6✔
192
    factor = atleast_kd(factor, x.ndim)
6✔
193
    return x * factor
6✔
194

195

196
def uniform_l1_n_balls(dummy: ep.Tensor, batch_size: int, n: int) -> ep.Tensor:
10✔
197
    # https://mathoverflow.net/a/9188
198
    u = ep.uniform(dummy, (batch_size, n))
6✔
199
    v = u.sort(axis=-1)
6✔
200
    vp = ep.concatenate([ep.zeros(v, (batch_size, 1)), v[:, : n - 1]], axis=-1)
6✔
201
    assert v.shape == vp.shape
6✔
202
    x = v - vp
6✔
203
    sign = ep.uniform(dummy, (batch_size, n), low=-1.0, high=1.0).sign()
6✔
204
    return sign * x
6✔
205

206

207
def uniform_l2_n_spheres(dummy: ep.Tensor, batch_size: int, n: int) -> ep.Tensor:
10✔
208
    x = ep.normal(dummy, (batch_size, n + 1))
6✔
209
    r = x.norms.l2(axis=-1, keepdims=True)
6✔
210
    s = x / r
6✔
211
    return s
6✔
212

213

214
def uniform_l2_n_balls(dummy: ep.Tensor, batch_size: int, n: int) -> ep.Tensor:
10✔
215
    """Sampling from the n-ball
216

217
    Implementation of the algorithm proposed by Voelker et al. [#Voel17]_
218

219
    References:
220
        .. [#Voel17] Voelker et al., 2017, Efficiently sampling vectors and coordinates
221
            from the n-sphere and n-ball
222
            http://compneuro.uwaterloo.ca/files/publications/voelker.2017.pdf
223
    """
224
    s = uniform_l2_n_spheres(dummy, batch_size, n + 1)
6✔
225
    b = s[:, :n]
6✔
226
    return b
6✔
227

228

229
class L1BaseGradientDescent(BaseGradientDescent):
10✔
230
    distance = l1
10✔
231

232
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
233
        batch_size, n = flatten(x0).shape
6✔
234
        r = uniform_l1_n_balls(x0, batch_size, n).reshape(x0.shape)
6✔
235
        return x0 + epsilon * r
6✔
236

237
    def normalize(
10✔
238
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
239
    ) -> ep.Tensor:
240
        return normalize_lp_norms(gradients, p=1)
6✔
241

242
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
243
        return x0 + clip_lp_norms(x - x0, norm=epsilon, p=1)
6✔
244

245

246
class L2BaseGradientDescent(BaseGradientDescent):
10✔
247
    distance = l2
10✔
248

249
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
250
        batch_size, n = flatten(x0).shape
6✔
251
        r = uniform_l2_n_balls(x0, batch_size, n).reshape(x0.shape)
6✔
252
        return x0 + epsilon * r
6✔
253

254
    def normalize(
10✔
255
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
256
    ) -> ep.Tensor:
257
        return normalize_lp_norms(gradients, p=2)
6✔
258

259
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
260
        return x0 + clip_lp_norms(x - x0, norm=epsilon, p=2)
6✔
261

262

263
class LinfBaseGradientDescent(BaseGradientDescent):
10✔
264
    distance = linf
10✔
265

266
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
267
        return x0 + ep.uniform(x0, x0.shape, -epsilon, epsilon)
6✔
268

269
    def normalize(
10✔
270
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
271
    ) -> ep.Tensor:
272
        return gradients.sign()
8✔
273

274
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
275
        return x0 + ep.clip(x - x0, -epsilon, epsilon)
8✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc