• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8139141456

04 Mar 2024 11:03AM UTC coverage: 37.923% (-60.6%) from 98.477%
8139141456

Pull #722

github

web-flow
Merge 5663238db into 17e0e9b31
Pull Request #722: Fix guide compilation

1344 of 3544 relevant lines covered (37.92%)

0.38 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

38.46
/foolbox/attacks/sparse_l1_descent_attack.py
1
from typing import Optional
1✔
2
import eagerpy as ep
1✔
3
import numpy as np
1✔
4

5
from ..devutils import flatten
1✔
6
from ..devutils import atleast_kd
1✔
7

8
from ..types import Bounds
1✔
9

10
from .gradient_descent_base import L1BaseGradientDescent
1✔
11
from .gradient_descent_base import normalize_lp_norms
1✔
12

13

14
class SparseL1DescentAttack(L1BaseGradientDescent):
1✔
15
    """Sparse L1 Descent Attack [#Tra19]_.
16

17
    Args:
18
        rel_stepsize: Stepsize relative to epsilon.
19
        abs_stepsize: If given, it takes precedence over rel_stepsize.
20
        steps : Number of update steps.
21
        random_start : Controls whether to randomly start within allowed epsilon ball.
22

23
    References:
24
        .. [#Tra19] Florian Tramèr, Dan Boneh, "Adversarial Training and
25
        Robustness for Multiple Perturbations"
26
        https://arxiv.org/abs/1904.13000
27
    """
28

29
    def normalize(
1✔
30
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
31
    ) -> ep.Tensor:
32
        bad_pos = ep.logical_or(
×
33
            ep.logical_and(x == bounds.lower, gradients < 0),
34
            ep.logical_and(x == bounds.upper, gradients > 0),
35
        )
36
        gradients = ep.where(bad_pos, ep.zeros_like(gradients), gradients)
×
37

38
        abs_gradients = gradients.abs()
×
39
        quantiles = np.quantile(
×
40
            flatten(abs_gradients).numpy(), q=self.quantile, axis=-1
41
        )
42
        keep = abs_gradients >= atleast_kd(
×
43
            ep.from_numpy(gradients, quantiles), gradients.ndim
44
        )
45
        e = ep.where(keep, gradients.sign(), ep.zeros_like(gradients))
×
46
        return normalize_lp_norms(e, p=1)
×
47

48
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
1✔
49
        # based on https://github.com/ftramer/MultiRobustness/blob/ad41b63235d13b1b2a177c5f270ab9afa74eee69/pgd_attack.py#L110
50
        delta = flatten(x - x0)
×
51
        norms = delta.norms.l1(axis=-1)
×
52
        if (norms <= epsilon).all():
×
53
            return x
×
54

55
        n, d = delta.shape
×
56
        abs_delta = abs(delta)
×
57
        mu = -ep.sort(-abs_delta, axis=-1)
×
58
        cumsums = mu.cumsum(axis=-1)
×
59
        js = 1.0 / ep.arange(x, 1, d + 1).astype(x.dtype)
×
60
        temp = mu - js * (cumsums - epsilon)
×
61
        guarantee_first = ep.arange(x, d).astype(x.dtype) / d
×
62
        # guarantee_first are small values (< 1) that we add to the boolean
63
        # tensor (only 0 and 1) to break the ties and always return the first
64
        # argmin, i.e. the first value where the boolean tensor is 0
65
        # (otherwise, this is not guaranteed on GPUs, see e.g. PyTorch)
66
        rho = ep.argmin((temp > 0).astype(x.dtype) + guarantee_first, axis=-1)
×
67
        theta = 1.0 / (1 + rho.astype(x.dtype)) * (cumsums[range(n), rho] - epsilon)
×
68
        delta = delta.sign() * ep.maximum(abs_delta - theta[..., ep.newaxis], 0)
×
69
        delta = delta.reshape(x.shape)
×
70
        return x0 + delta
×
71

72
    def __init__(
1✔
73
        self,
74
        *,
75
        quantile: float = 0.99,
76
        rel_stepsize: float = 0.2,
77
        abs_stepsize: Optional[float] = None,
78
        steps: int = 10,
79
        random_start: bool = False,
80
    ):
81
        super().__init__(
1✔
82
            rel_stepsize=rel_stepsize,
83
            abs_stepsize=abs_stepsize,
84
            steps=steps,
85
            random_start=random_start,
86
        )
87
        if not 0 <= quantile <= 1:
1✔
88
            raise ValueError(f"quantile needs to be between 0 and 1, got {quantile}")
×
89
        self.quantile = quantile
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc