• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8137716344

22 Jan 2024 10:53PM UTC coverage: 98.47%. Remained the same
8137716344

push

github

web-flow
Bump pillow from 10.1.0 to 10.2.0 in /tests (#718)

Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.1.0 to 10.2.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/10.1.0...10.2.0)

---
updated-dependencies:
- dependency-name: pillow
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

3475 of 3529 relevant lines covered (98.47%)

7.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

97.44
/foolbox/attacks/sparse_l1_descent_attack.py
1
from typing import Optional
10✔
2
import eagerpy as ep
10✔
3
import numpy as np
10✔
4

5
from ..devutils import flatten
10✔
6
from ..devutils import atleast_kd
10✔
7

8
from ..types import Bounds
10✔
9

10
from .gradient_descent_base import L1BaseGradientDescent
10✔
11
from .gradient_descent_base import normalize_lp_norms
10✔
12

13

14
class SparseL1DescentAttack(L1BaseGradientDescent):
10✔
15
    """Sparse L1 Descent Attack [#Tra19]_.
16

17
    Args:
18
        rel_stepsize: Stepsize relative to epsilon.
19
        abs_stepsize: If given, it takes precedence over rel_stepsize.
20
        steps : Number of update steps.
21
        random_start : Controls whether to randomly start within allowed epsilon ball.
22

23
    References:
24
        .. [#Tra19] Florian Tramèr, Dan Boneh, "Adversarial Training and
25
        Robustness for Multiple Perturbations"
26
        https://arxiv.org/abs/1904.13000
27
    """
28

29
    def normalize(
10✔
30
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
31
    ) -> ep.Tensor:
32
        bad_pos = ep.logical_or(
6✔
33
            ep.logical_and(x == bounds.lower, gradients < 0),
34
            ep.logical_and(x == bounds.upper, gradients > 0),
35
        )
36
        gradients = ep.where(bad_pos, ep.zeros_like(gradients), gradients)
6✔
37

38
        abs_gradients = gradients.abs()
6✔
39
        quantiles = np.quantile(
6✔
40
            flatten(abs_gradients).numpy(), q=self.quantile, axis=-1
41
        )
42
        keep = abs_gradients >= atleast_kd(
6✔
43
            ep.from_numpy(gradients, quantiles), gradients.ndim
44
        )
45
        e = ep.where(keep, gradients.sign(), ep.zeros_like(gradients))
6✔
46
        return normalize_lp_norms(e, p=1)
6✔
47

48
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
10✔
49
        # based on https://github.com/ftramer/MultiRobustness/blob/ad41b63235d13b1b2a177c5f270ab9afa74eee69/pgd_attack.py#L110
50
        delta = flatten(x - x0)
6✔
51
        norms = delta.norms.l1(axis=-1)
6✔
52
        if (norms <= epsilon).all():
6✔
53
            return x
6✔
54

55
        n, d = delta.shape
4✔
56
        abs_delta = abs(delta)
4✔
57
        mu = -ep.sort(-abs_delta, axis=-1)
4✔
58
        cumsums = mu.cumsum(axis=-1)
4✔
59
        js = 1.0 / ep.arange(x, 1, d + 1).astype(x.dtype)
4✔
60
        temp = mu - js * (cumsums - epsilon)
4✔
61
        guarantee_first = ep.arange(x, d).astype(x.dtype) / d
4✔
62
        # guarantee_first are small values (< 1) that we add to the boolean
63
        # tensor (only 0 and 1) to break the ties and always return the first
64
        # argmin, i.e. the first value where the boolean tensor is 0
65
        # (otherwise, this is not guaranteed on GPUs, see e.g. PyTorch)
66
        rho = ep.argmin((temp > 0).astype(x.dtype) + guarantee_first, axis=-1)
4✔
67
        theta = 1.0 / (1 + rho.astype(x.dtype)) * (cumsums[range(n), rho] - epsilon)
4✔
68
        delta = delta.sign() * ep.maximum(abs_delta - theta[..., ep.newaxis], 0)
4✔
69
        delta = delta.reshape(x.shape)
4✔
70
        return x0 + delta
4✔
71

72
    def __init__(
10✔
73
        self,
74
        *,
75
        quantile: float = 0.99,
76
        rel_stepsize: float = 0.2,
77
        abs_stepsize: Optional[float] = None,
78
        steps: int = 10,
79
        random_start: bool = False,
80
    ):
81
        super().__init__(
10✔
82
            rel_stepsize=rel_stepsize,
83
            abs_stepsize=abs_stepsize,
84
            steps=steps,
85
            random_start=random_start,
86
        )
87
        if not 0 <= quantile <= 1:
10✔
88
            raise ValueError(f"quantile needs to be between 0 and 1, got {quantile}")
×
89
        self.quantile = quantile
10✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc