• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8139142615

04 Mar 2024 11:04AM UTC coverage: 98.477%. Remained the same
8139142615

push

github

web-flow
Fix guide compilation (#722)

* Replace yarn with npm

3492 of 3546 relevant lines covered (98.48%)

7.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.71
/foolbox/attacks/fast_minimum_norm.py
1
import math
10✔
2
from abc import abstractmethod, ABC
10✔
3
from typing import Union, Optional, Any, Tuple
10✔
4

5
import eagerpy as ep
10✔
6
from eagerpy.astensor import T
10✔
7

8
from .base import MinimizationAttack, raise_if_kwargs, get_criterion, get_is_adversarial
10✔
9
from .gradient_descent_base import (
10✔
10
    uniform_l1_n_balls,
11
    normalize_lp_norms,
12
    uniform_l2_n_balls,
13
)
14
from .. import Model, Misclassification, TargetedMisclassification
10✔
15
from ..devutils import atleast_kd, flatten
10✔
16
from ..distances import l1, linf, l2, l0, LpDistance
10✔
17

18
ps = {l0: 0, l1: 1, l2: 2, linf: ep.inf, LpDistance: ep.nan}
10✔
19
duals = {l0: ep.nan, l1: ep.inf, l2: 2, linf: 1, LpDistance: ep.nan}
10✔
20

21

22
def best_other_classes(logits: ep.Tensor, exclude: ep.Tensor) -> ep.Tensor:
10✔
23
    other_logits = logits - ep.onehot_like(logits, exclude, value=ep.inf)
6✔
24
    return other_logits.argmax(axis=-1)
6✔
25

26

27
def project_onto_l1_ball(x: ep.Tensor, eps: ep.Tensor) -> ep.Tensor:
10✔
28
    """Computes Euclidean projection onto the L1 ball for a batch. [Duchi08]_
29

30
    Adapted from the pytorch version by Tony Duan:
31
    https://gist.github.com/tonyduan/1329998205d88c566588e57e3e2c0c55
32

33
    Args:
34
        x: Batch of arbitrary-size tensors to project, possibly on GPU
35
        eps: radius of l-1 ball to project onto
36

37
    References:
38
      ..[Duchi08] Efficient Projections onto the l1-Ball for Learning in High Dimensions
39
         John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra.
40
         International Conference on Machine Learning (ICML 2008)
41
    """
42
    original_shape = x.shape
6✔
43
    x = flatten(x)
6✔
44
    mask = (ep.norms.l1(x, axis=1) <= eps).astype(x.dtype).expand_dims(1)
6✔
45
    mu = ep.flip(ep.sort(ep.abs(x)), axis=-1).astype(x.dtype)
6✔
46
    cumsum = ep.cumsum(mu, axis=-1)
6✔
47
    arange = ep.arange(x, 1, x.shape[1] + 1).astype(x.dtype)
6✔
48
    rho = (
6✔
49
        ep.max(
50
            ((mu * arange > (cumsum - eps.expand_dims(1)))).astype(x.dtype) * arange,
51
            axis=-1,
52
        )
53
        - 1
54
    )
55
    # samples already under norm will have to select
56
    rho = ep.maximum(rho, 0)
6✔
57
    theta = (
6✔
58
        cumsum[ep.arange(x, x.shape[0]), rho.astype(ep.arange(x, 1).dtype)] - eps
59
    ) / (rho + 1.0)
60
    proj = (ep.abs(x) - theta.expand_dims(1)).clip(min_=0, max_=ep.inf)
6✔
61
    x = mask * x + (1 - mask) * proj * ep.sign(x)
6✔
62
    return x.reshape(original_shape)
6✔
63

64

65
class FMNAttackLp(MinimizationAttack, ABC):
10✔
66
    """The Fast Minimum Norm adversarial attack, in Lp norm. [Pintor21]_
67

68
    Args:
69
        steps: Number of iterations.
70
        max_stepsize: Initial stepsize for the gradient update.
71
        min_stepsize: Final stepsize for the gradient update. The
72
            stepsize will be reduced with a cosine annealing policy.
73
        gamma: Initial stepsize for the epsilon update. It will
74
            be updated with a cosine annealing reduction up to 0.001.
75
        init_attack: Optional initial attack. If an initial attack
76
            is specified (or initial points are provided in the run), the
77
            attack will first try to search for the boundary between the
78
            initial point and the points in a class that satisfies the
79
            adversarial criterion.
80
        binary_search_steps: Number of steps to use for the search
81
            from the adversarial points. If no initial attack or adversarial
82
            starting point is provided, this parameter will be ignored.
83
    """
84

85
    def __init__(
10✔
86
        self,
87
        *,
88
        steps: int = 100,
89
        max_stepsize: float = 1.0,
90
        min_stepsize: Optional[float] = None,
91
        gamma: float = 0.05,
92
        init_attack: Optional[MinimizationAttack] = None,
93
        binary_search_steps: int = 10,
94
    ):
95
        self.steps = steps
10✔
96
        self.max_stepsize = max_stepsize
10✔
97
        self.init_attack = init_attack
10✔
98
        if min_stepsize is not None:
10✔
99
            self.min_stepsize = min_stepsize
10✔
100
        else:
101
            self.min_stepsize = max_stepsize / 100
10✔
102
        self.binary_search_steps = binary_search_steps
10✔
103
        self.gamma = gamma
10✔
104

105
        self.p = ps[self.distance]
10✔
106
        self.dual = duals[self.distance]
10✔
107

108
    def run(
10✔
109
        self,
110
        model: Model,
111
        inputs: T,
112
        criterion: Union[Misclassification, TargetedMisclassification, T],
113
        *,
114
        starting_points: Optional[ep.Tensor] = None,
115
        early_stop: Optional[float] = None,
116
        **kwargs: Any,
117
    ) -> T:
118
        raise_if_kwargs(kwargs)
6✔
119
        criterion_ = get_criterion(criterion)
6✔
120

121
        if isinstance(criterion_, Misclassification):
6✔
122
            targeted = False
6✔
123
            classes = criterion_.labels
6✔
124
        elif isinstance(criterion_, TargetedMisclassification):
6✔
125
            targeted = True
6✔
126
            classes = criterion_.target_classes
6✔
127
        else:
128
            raise ValueError("unsupported criterion")
×
129

130
        def loss_fn(
6✔
131
            inputs: ep.Tensor, labels: ep.Tensor
132
        ) -> Tuple[ep.Tensor, Tuple[ep.Tensor, ep.Tensor]]:
133

134
            logits = model(inputs)
6✔
135

136
            if targeted:
6✔
137
                c_minimize = best_other_classes(logits, labels)
6✔
138
                c_maximize = labels  # target_classes
6✔
139
            else:
140
                c_minimize = labels  # labels
6✔
141
                c_maximize = best_other_classes(logits, labels)
6✔
142

143
            loss = logits[rows, c_minimize] - logits[rows, c_maximize]
6✔
144

145
            return -loss.sum(), (logits, loss)
6✔
146

147
        x, restore_type = ep.astensor_(inputs)
6✔
148
        del inputs, criterion, kwargs
6✔
149
        N = len(x)
6✔
150
        initialized = False
6✔
151
        # start from initialization points/attack
152
        if starting_points is not None:
6✔
153
            x1 = starting_points
6✔
154
            initialized = True
6✔
155
        else:
156
            if self.init_attack is not None:
×
157
                x1 = self.init_attack.run(model, x, criterion_)
×
158
                initialized = True
×
159

160
        # if initial points or initialization attacks are provided,
161
        #   search for the boundary
162
        if initialized is True:
6✔
163
            is_adv = get_is_adversarial(criterion_, model)
6✔
164
            assert is_adv(x1).all()
6✔
165
            lower_bound = ep.zeros(x, shape=(N,))
6✔
166
            upper_bound = ep.ones(x, shape=(N,))
6✔
167
            for _ in range(self.binary_search_steps):
6✔
168
                epsilons = (lower_bound + upper_bound) / 2
6✔
169
                mid_points = self.mid_points(x, x1, epsilons, model.bounds)
6✔
170
                is_advs = is_adv(mid_points)
6✔
171
                lower_bound = ep.where(is_advs, lower_bound, epsilons)
6✔
172
                upper_bound = ep.where(is_advs, epsilons, upper_bound)
6✔
173
            starting_points = self.mid_points(x, x1, upper_bound, model.bounds)
6✔
174
            delta = starting_points - x
6✔
175
        else:
176
            # start from x0
177
            delta = ep.zeros_like(x)
×
178

179
        if classes.shape != (N,):
6✔
180
            name = "target_classes" if targeted else "labels"
×
181
            raise ValueError(
×
182
                f"expected {name} to have shape ({N},), got {classes.shape}"
183
            )
184

185
        min_, max_ = model.bounds
6✔
186
        rows = range(N)
6✔
187
        grad_and_logits = ep.value_and_grad_fn(x, loss_fn, has_aux=True)
6✔
188

189
        if self.p != 0:
6✔
190
            epsilon = ep.inf * ep.ones(x, len(x))
6✔
191
        else:
192
            epsilon = ep.maximum(
6✔
193
                ep.ones(x, len(x)), ep.norms.l0(flatten(delta), axis=-1)
194
            )
195
        if self.p != 0:
6✔
196
            worst_norm = ep.norms.lp(
6✔
197
                flatten(ep.maximum(x - min_, max_ - x)), p=self.p, axis=-1
198
            )
199
        else:
200
            worst_norm = flatten(ep.ones_like(x)).bool().sum(axis=1).float32()
6✔
201

202
        best_lp = worst_norm
6✔
203
        best_delta = delta
6✔
204
        adv_found = ep.zeros(x, len(x)).bool()
6✔
205

206
        for i in range(self.steps):
6✔
207
            # perform cosine annealing of learning rates
208
            stepsize = (
6✔
209
                self.min_stepsize
210
                + (self.max_stepsize - self.min_stepsize)
211
                * (1 + math.cos(math.pi * i / self.steps))
212
                / 2
213
            )
214
            gamma = (
6✔
215
                0.001
216
                + (self.gamma - 0.001) * (1 + math.cos(math.pi * (i / self.steps))) / 2
217
            )
218

219
            x_adv = x + delta
6✔
220

221
            loss, (logits, loss_batch), gradients = grad_and_logits(x_adv, classes)
6✔
222
            is_adversarial = criterion_(x_adv, logits)
6✔
223
            lp = ep.norms.lp(flatten(delta), p=self.p, axis=-1)
6✔
224
            is_smaller = lp <= best_lp
6✔
225
            is_both = ep.logical_and(is_adversarial, is_smaller)
6✔
226
            adv_found = ep.logical_or(adv_found, is_adversarial)
6✔
227
            best_lp = ep.where(is_both, lp, best_lp)
6✔
228
            best_delta = ep.where(atleast_kd(is_both, x.ndim), delta, best_delta)
6✔
229

230
            # update epsilon
231
            if self.p != 0:
6✔
232
                distance_to_boundary = abs(loss_batch) / ep.norms.lp(
6✔
233
                    flatten(gradients), p=self.dual, axis=-1
234
                )
235
                epsilon = ep.where(
6✔
236
                    is_adversarial,
237
                    ep.minimum(
238
                        epsilon * (1 - gamma),
239
                        ep.norms.lp(flatten(best_delta), p=self.p, axis=-1),
240
                    ),
241
                    ep.where(
242
                        adv_found,
243
                        epsilon * (1 + gamma),
244
                        ep.norms.lp(flatten(delta), p=self.p, axis=-1)
245
                        + distance_to_boundary,
246
                    ),
247
                )
248
            else:
249
                epsilon = ep.where(
6✔
250
                    is_adversarial,
251
                    ep.minimum(
252
                        ep.minimum(
253
                            epsilon - 1,
254
                            (epsilon * (1 - gamma))
255
                            .astype(ep.arange(x, 1).dtype)
256
                            .astype(epsilon.dtype),
257
                        ),
258
                        ep.norms.lp(flatten(best_delta), p=self.p, axis=-1),
259
                    ),
260
                    ep.maximum(
261
                        epsilon + 1,
262
                        (epsilon * (1 + gamma))
263
                        .astype(ep.arange(x, 1).dtype)
264
                        .astype(epsilon.dtype),
265
                    ),
266
                )
267
                epsilon = ep.maximum(1, epsilon).astype(epsilon.dtype)
6✔
268

269
            # clip epsilon
270
            epsilon = ep.minimum(epsilon, worst_norm)
6✔
271

272
            # computes normalized gradient update
273
            grad_ = self.normalize(gradients) * stepsize
6✔
274

275
            # do step
276
            delta = delta + grad_
6✔
277

278
            # project according to the given norm
279
            delta = self.project(x=x + delta, x0=x, epsilon=epsilon) - x
6✔
280

281
            # clip to valid bounds
282
            delta = ep.clip(x + delta, *model.bounds) - x
6✔
283

284
        x_adv = x + best_delta
6✔
285
        return restore_type(x_adv)
6✔
286

287
    def normalize(self, gradients: ep.Tensor) -> ep.Tensor:
10✔
288
        return normalize_lp_norms(gradients, p=2)
6✔
289

290
    @abstractmethod
291
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: ep.Tensor) -> ep.Tensor:
292
        ...
293

294
    @abstractmethod
295
    def mid_points(
296
        self,
297
        x0: ep.Tensor,
298
        x1: ep.Tensor,
299
        epsilons: ep.Tensor,
300
        bounds: Tuple[float, float],
301
    ) -> ep.Tensor:
302
        raise NotImplementedError
303

304

305
class L1FMNAttack(FMNAttackLp):
10✔
306
    """The L1 Fast Minimum Norm adversarial attack, in Lp norm. [Pintor21]_
307

308
    Args:
309
        steps: Number of iterations.
310
        max_stepsize: Initial stepsize for the gradient update.
311
        min_stepsize: Final stepsize for the gradient update. The
312
            stepsize will be reduced with a cosine annealing policy.
313
        gamma: Initial stepsize for the epsilon update. It will
314
            be updated with a cosine annealing reduction up to 0.001.
315
        init_attack: Optional initial attack. If an initial attack
316
            is specified (or initial points are provided in the run), the
317
            attack will first try to search for the boundary between the
318
            initial point and the points in a class that satisfies the
319
            adversarial criterion.
320
        binary_search_steps: Number of steps to use for the search
321
            from the adversarial points. If no initial attack or adversarial
322
            starting point is provided, this parameter will be ignored.
323

324
    References:
325
        .. [Pintor21] Maura Pintor, Fabio Roli, Wieland Brendel,
326
            Battista Biggio, "Fast Minimum-norm Adversarial
327
            Attacks through Adaptive Norm Constraints."
328
            arXiv preprint arXiv:2102.12827 (2021).
329
            https://arxiv.org/abs/2102.12827
330
    """
331

332
    distance = l1
10✔
333
    p = 1
10✔
334
    dual = ep.inf
10✔
335

336
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
337
        batch_size, n = flatten(x0).shape
×
338
        r = uniform_l1_n_balls(x0, batch_size, n).reshape(x0.shape)
×
339
        return x0 + epsilon * r
×
340

341
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: ep.Tensor) -> ep.Tensor:
10✔
342
        return x0 + project_onto_l1_ball(x - x0, epsilon)
6✔
343

344
    def mid_points(
10✔
345
        self,
346
        x0: ep.Tensor,
347
        x1: ep.Tensor,
348
        epsilons: ep.Tensor,
349
        bounds: Tuple[float, float],
350
    ) -> ep.Tensor:
351
        # returns a point between x0 and x1 where
352
        # epsilon = 0 returns x0 and epsilon = 1
353
        # returns x1
354

355
        # get epsilons in right shape for broadcasting
356
        epsilons = epsilons.reshape(epsilons.shape + (1,) * (x0.ndim - 1))
6✔
357

358
        threshold = (bounds[1] - bounds[0]) * (1 - epsilons)
6✔
359
        mask = (x1 - x0).abs() > threshold
6✔
360
        new_x = ep.where(
6✔
361
            mask, x0 + (x1 - x0).sign() * ((x1 - x0).abs() - threshold), x0
362
        )
363
        return new_x
6✔
364

365

366
class L2FMNAttack(FMNAttackLp):
10✔
367
    """The L2 Fast Minimum Norm adversarial attack, in Lp norm. [Pintor21]_
368

369
    Args:
370
        steps: Number of iterations.
371
        max_stepsize: Initial stepsize for the gradient update.
372
        min_stepsize: Final stepsize for the gradient update. The
373
            stepsize will be reduced with a cosine annealing policy.
374
        gamma: Initial stepsize for the epsilon update. It will
375
            be updated with a cosine annealing reduction up to 0.001.
376
        init_attack: Optional initial attack. If an initial attack
377
            is specified (or initial points are provided in the run), the
378
            attack will first try to search for the boundary between the
379
            initial point and the points in a class that satisfies the
380
            adversarial criterion.
381
        binary_search_steps: Number of steps to use for the search
382
            from the adversarial points. If no initial attack or adversarial
383
            starting point is provided, this parameter will be ignored.
384
    """
385

386
    distance = l2
10✔
387
    p = 2
10✔
388
    dual = 2
10✔
389

390
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
391
        batch_size, n = flatten(x0).shape
×
392
        r = uniform_l2_n_balls(x0, batch_size, n).reshape(x0.shape)
×
393
        return x0 + epsilon * r
×
394

395
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: ep.Tensor) -> ep.Tensor:
10✔
396
        norms = flatten(x).norms.l2(axis=-1)
6✔
397
        norms = ep.maximum(norms, 1e-12)
6✔
398
        factor = ep.minimum(1, norms / norms)
6✔
399
        factor = atleast_kd(factor, x.ndim)
6✔
400
        return x0 + (x - x0) * factor
6✔
401

402
    def mid_points(
10✔
403
        self,
404
        x0: ep.Tensor,
405
        x1: ep.Tensor,
406
        epsilons: ep.Tensor,
407
        bounds: Tuple[float, float],
408
    ) -> ep.Tensor:
409
        # returns a point between x0 and x1 where
410
        # epsilon = 0 returns x0 and epsilon = 1
411
        # returns x1
412

413
        # get epsilons in right shape for broadcasting
414
        epsilons = epsilons.reshape(epsilons.shape + (1,) * (x0.ndim - 1))
6✔
415
        return epsilons * x1 + (1 - epsilons) * x0
6✔
416

417

418
class LInfFMNAttack(FMNAttackLp):
10✔
419
    """The L-infinity Fast Minimum Norm adversarial attack, in Lp norm. [Pintor21]_
420

421
    Args:
422
        steps: Number of iterations.
423
        max_stepsize: Initial stepsize for the gradient update.
424
        min_stepsize: Final stepsize for the gradient update. The
425
            stepsize will be reduced with a cosine annealing policy.
426
        gamma: Initial stepsize for the epsilon update. It will
427
            be updated with a cosine annealing reduction up to 0.001.
428
        init_attack: Optional initial attack. If an initial attack
429
            is specified (or initial points are provided in the run), the
430
            attack will first try to search for the boundary between the
431
            initial point and the points in a class that satisfies the
432
            adversarial criterion.
433
        binary_search_steps: Number of steps to use for the search
434
            from the adversarial points. If no initial attack or adversarial
435
            starting point is provided, this parameter will be ignored.
436
    """
437

438
    distance = linf
10✔
439
    p = ep.inf
10✔
440
    dual = 1
10✔
441

442
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
443
        return x0 + ep.uniform(x0, x0.shape, -epsilon, epsilon)
×
444

445
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: ep.Tensor) -> ep.Tensor:
10✔
446
        clipped = ep.maximum(flatten(x - x0).T, -epsilon)
6✔
447
        clipped = ep.minimum(clipped, epsilon).T
6✔
448
        return x0 + clipped.reshape(x0.shape)
6✔
449

450
    def mid_points(
10✔
451
        self,
452
        x0: ep.Tensor,
453
        x1: ep.Tensor,
454
        epsilons: ep.Tensor,
455
        bounds: Tuple[float, float],
456
    ) -> ep.Tensor:
457
        # returns a point between x0 and x1 where
458
        # epsilon = 0 returns x0 and epsilon = 1
459
        delta = x1 - x0
6✔
460
        min_, max_ = bounds
6✔
461
        s = max_ - min_
6✔
462
        # get epsilons in right shape for broadcasting
463
        epsilons = epsilons.reshape(epsilons.shape + (1,) * (x0.ndim - 1))
6✔
464

465
        clipped_delta = ep.where(delta < -epsilons * s, -epsilons * s, delta)
6✔
466
        clipped_delta = ep.where(
6✔
467
            clipped_delta > epsilons * s, epsilons * s, clipped_delta
468
        )
469
        return x0 + clipped_delta
6✔
470

471

472
class L0FMNAttack(FMNAttackLp):
10✔
473
    """The L0 Fast Minimum Norm adversarial attack, in Lp norm. [Pintor21]_
474

475
    Args:
476
        steps: Number of iterations.
477
        max_stepsize: Initial stepsize for the gradient update.
478
        min_stepsize: Final stepsize for the gradient update. The
479
            stepsize will be reduced with a cosine annealing policy.
480
        gamma: Initial stepsize for the epsilon update. It will
481
            be updated with a cosine annealing reduction up to 0.001.
482
        init_attack: Optional initial attack. If an initial attack
483
            is specified (or initial points are provided in the run), the
484
            attack will first try to search for the boundary between the
485
            initial point and the points in a class that satisfies the
486
            adversarial criterion.
487
        binary_search_steps: Number of steps to use for the search
488
            from the adversarial points. If no initial attack or adversarial
489
            starting point is provided, this parameter will be ignored.
490
    """
491

492
    distance = l0
10✔
493
    p = 0
10✔
494

495
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: ep.Tensor) -> ep.Tensor:
10✔
496
        flatten_delta = flatten(x - x0)
6✔
497
        n, d = flatten_delta.shape
6✔
498
        abs_delta = abs(flatten_delta)
6✔
499
        epsilon = epsilon.astype(ep.arange(x, 1).dtype)
6✔
500
        rows = range(n)
6✔
501
        idx_sorted = ep.flip(ep.argsort(abs_delta, axis=1), -1)[rows, epsilon]
6✔
502
        thresholds = (ep.ones_like(flatten_delta).T * abs_delta[rows, idx_sorted]).T
6✔
503
        clipped = ep.where(abs_delta >= thresholds, flatten_delta, 0)
6✔
504
        return x0 + clipped.reshape(x0.shape).astype(x0.dtype)
6✔
505

506
    def mid_points(
10✔
507
        self,
508
        x0: ep.Tensor,
509
        x1: ep.Tensor,
510
        epsilons: ep.Tensor,
511
        bounds: Tuple[float, float],
512
    ) -> ep.Tensor:
513
        # returns a point between x0 and x1 where
514
        # epsilon = 0 returns x0 and epsilon = 1
515
        # returns x1
516
        # epsilons here will be the percentage of features to keep
517
        n_features = flatten(ep.ones_like(x0)).bool().sum(axis=1).float32()
6✔
518
        new_x = self.project(x1, x0, n_features * epsilons)
6✔
519
        return new_x
6✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc