• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8137716344

22 Jan 2024 10:53PM UTC coverage: 98.47%. Remained the same
8137716344

push

github

web-flow
Bump pillow from 10.1.0 to 10.2.0 in /tests (#718)

Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.1.0 to 10.2.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/10.1.0...10.2.0)

---
updated-dependencies:
- dependency-name: pillow
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

3475 of 3529 relevant lines covered (98.47%)

7.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

100.0
/foolbox/attacks/deepfool.py
1
from typing import Union, Optional, Tuple, Any, Callable
10✔
2
from typing_extensions import Literal
10✔
3
import eagerpy as ep
10✔
4
import logging
10✔
5
from abc import ABC
10✔
6
from abc import abstractmethod
10✔
7

8
from ..devutils import flatten
10✔
9
from ..devutils import atleast_kd
10✔
10

11
from ..models import Model
10✔
12

13
from ..criteria import Criterion
10✔
14

15
from ..distances import l2, linf
10✔
16

17
from .base import MinimizationAttack
10✔
18
from .base import T
10✔
19
from .base import get_criterion
10✔
20
from .base import raise_if_kwargs
10✔
21
from .base import verify_input_bounds
10✔
22

23

24
class DeepFoolAttack(MinimizationAttack, ABC):
10✔
25
    """A simple and fast gradient-based adversarial attack.
26

27
    Implements the `DeepFool`_ attack.
28

29
    Args:
30
        steps : Maximum number of steps to perform.
31
        candidates : Limit on the number of the most likely classes that should
32
            be considered. A small value is usually sufficient and much faster.
33
        overshoot : How much to overshoot the boundary.
34
        loss  Loss function to use inside the update function.
35

36

37
    .. _DeepFool:
38
            Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, Pascal Frossard,
39
            "DeepFool: a simple and accurate method to fool deep neural
40
            networks", https://arxiv.org/abs/1511.04599
41

42
    """
43

44
    def __init__(
10✔
45
        self,
46
        *,
47
        steps: int = 50,
48
        candidates: Optional[int] = 10,
49
        overshoot: float = 0.02,
50
        loss: Union[Literal["logits"], Literal["crossentropy"]] = "logits",
51
    ):
52
        self.steps = steps
10✔
53
        self.candidates = candidates
10✔
54
        self.overshoot = overshoot
10✔
55
        self.loss = loss
10✔
56

57
    def _get_loss_fn(
10✔
58
        self,
59
        model: Model,
60
        classes: ep.Tensor,
61
    ) -> Callable[[ep.Tensor, int], Tuple[ep.Tensor, Tuple[ep.Tensor, ep.Tensor]]]:
62

63
        N = len(classes)
6✔
64
        rows = range(N)
6✔
65
        i0 = classes[:, 0]
6✔
66

67
        if self.loss == "logits":
6✔
68

69
            def loss_fun(
6✔
70
                x: ep.Tensor, k: int
71
            ) -> Tuple[ep.Tensor, Tuple[ep.Tensor, ep.Tensor]]:
72
                logits = model(x)
6✔
73
                ik = classes[:, k]
6✔
74
                l0 = logits[rows, i0]
6✔
75
                lk = logits[rows, ik]
6✔
76
                loss = lk - l0
6✔
77
                return loss.sum(), (loss, logits)
6✔
78

79
        elif self.loss == "crossentropy":
6✔
80

81
            def loss_fun(
6✔
82
                x: ep.Tensor, k: int
83
            ) -> Tuple[ep.Tensor, Tuple[ep.Tensor, ep.Tensor]]:
84
                logits = model(x)
6✔
85
                ik = classes[:, k]
6✔
86
                l0 = -ep.crossentropy(logits, i0)
6✔
87
                lk = -ep.crossentropy(logits, ik)
6✔
88
                loss = lk - l0
6✔
89
                return loss.sum(), (loss, logits)
6✔
90

91
        else:
92
            raise ValueError(
6✔
93
                f"expected loss to be 'logits' or 'crossentropy', got '{self.loss}'"
94
            )
95

96
        return loss_fun
6✔
97

98
    def run(
10✔
99
        self,
100
        model: Model,
101
        inputs: T,
102
        criterion: Union[Criterion, T],
103
        *,
104
        early_stop: Optional[float] = None,
105
        **kwargs: Any,
106
    ) -> T:
107
        raise_if_kwargs(kwargs)
6✔
108
        x, restore_type = ep.astensor_(inputs)
6✔
109
        del inputs, kwargs
6✔
110

111
        verify_input_bounds(x, model)
6✔
112

113
        criterion = get_criterion(criterion)
6✔
114

115
        min_, max_ = model.bounds
6✔
116

117
        logits = model(x)
6✔
118
        classes = logits.argsort(axis=-1).flip(axis=-1)
6✔
119
        if self.candidates is None:
6✔
120
            candidates = logits.shape[-1]  # pragma: no cover
121
        else:
122
            candidates = min(self.candidates, logits.shape[-1])
6✔
123
            if not candidates >= 2:
6✔
124
                raise ValueError(  # pragma: no cover
125
                    f"expected the model output to have atleast 2 classes, got {logits.shape[-1]}"
126
                )
127
            logging.info(f"Only testing the top-{candidates} classes")
6✔
128
            classes = classes[:, :candidates]
6✔
129

130
        N = len(x)
6✔
131
        rows = range(N)
6✔
132

133
        loss_fun = self._get_loss_fn(model, classes)
6✔
134
        loss_aux_and_grad = ep.value_and_grad_fn(x, loss_fun, has_aux=True)
6✔
135

136
        x0 = x
6✔
137
        p_total = ep.zeros_like(x)
6✔
138
        for _ in range(self.steps):
6✔
139
            # let's first get the logits using k = 1 to see if we are done
140
            diffs = [loss_aux_and_grad(x, 1)]
6✔
141
            _, (_, logits), _ = diffs[0]
6✔
142

143
            is_adv = criterion(x, logits)
6✔
144
            if is_adv.all():
6✔
145
                break
6✔
146

147
            # then run all the other k's as well
148
            # we could avoid repeated forward passes and only repeat
149
            # the backward pass, but this cannot currently be done in eagerpy
150
            diffs += [loss_aux_and_grad(x, k) for k in range(2, candidates)]
6✔
151

152
            # we don't need the logits
153
            diffs_ = [(losses, grad) for _, (losses, _), grad in diffs]
6✔
154
            losses = ep.stack([lo for lo, _ in diffs_], axis=1)
6✔
155
            grads = ep.stack([g for _, g in diffs_], axis=1)
6✔
156
            assert losses.shape == (N, candidates - 1)
6✔
157
            assert grads.shape == (N, candidates - 1) + x0.shape[1:]
6✔
158

159
            # calculate the distances
160
            distances = self.get_distances(losses, grads)
6✔
161
            assert distances.shape == (N, candidates - 1)
6✔
162

163
            # determine the best directions
164
            best = distances.argmin(axis=1)
6✔
165
            distances = distances[rows, best]
6✔
166
            losses = losses[rows, best]
6✔
167
            grads = grads[rows, best]
6✔
168
            assert distances.shape == (N,)
6✔
169
            assert losses.shape == (N,)
6✔
170
            assert grads.shape == x0.shape
6✔
171

172
            # apply perturbation
173
            distances = distances + 1e-4  # for numerical stability
6✔
174
            p_step = self.get_perturbations(distances, grads)
6✔
175
            assert p_step.shape == x0.shape
6✔
176

177
            p_total += p_step
6✔
178
            # don't do anything for those that are already adversarial
179
            x = ep.where(
6✔
180
                atleast_kd(is_adv, x.ndim), x, x0 + (1.0 + self.overshoot) * p_total
181
            )
182
            x = ep.clip(x, min_, max_)
6✔
183

184
        return restore_type(x)
6✔
185

186
    @abstractmethod
187
    def get_distances(self, losses: ep.Tensor, grads: ep.Tensor) -> ep.Tensor:
188
        ...
189

190
    @abstractmethod
191
    def get_perturbations(self, distances: ep.Tensor, grads: ep.Tensor) -> ep.Tensor:
192
        ...
193

194

195
class L2DeepFoolAttack(DeepFoolAttack):
10✔
196
    """A simple and fast gradient-based adversarial attack.
197

198
    Implements the DeepFool L2 attack. [#Moos15]_
199

200
    Args:
201
        steps : Maximum number of steps to perform.
202
        candidates : Limit on the number of the most likely classes that should
203
            be considered. A small value is usually sufficient and much faster.
204
        overshoot : How much to overshoot the boundary.
205
        loss  Loss function to use inside the update function.
206

207
    References:
208
        .. [#Moos15] Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, Pascal Frossard,
209
            "DeepFool: a simple and accurate method to fool deep neural
210
            networks", https://arxiv.org/abs/1511.04599
211

212
    """
213

214
    distance = l2
10✔
215

216
    def get_distances(self, losses: ep.Tensor, grads: ep.Tensor) -> ep.Tensor:
10✔
217
        return abs(losses) / (flatten(grads, keep=2).norms.l2(axis=-1) + 1e-8)
6✔
218

219
    def get_perturbations(self, distances: ep.Tensor, grads: ep.Tensor) -> ep.Tensor:
10✔
220
        return (
6✔
221
            atleast_kd(
222
                distances / (flatten(grads).norms.l2(axis=-1) + 1e-8),
223
                grads.ndim,
224
            )
225
            * grads
226
        )
227

228

229
class LinfDeepFoolAttack(DeepFoolAttack):
10✔
230
    """A simple and fast gradient-based adversarial attack.
231

232
    Implements the `DeepFool`_ L-Infinity attack.
233

234
    Args:
235
        steps : Maximum number of steps to perform.
236
        candidates : Limit on the number of the most likely classes that should
237
            be considered. A small value is usually sufficient and much faster.
238
        overshoot : How much to overshoot the boundary.
239
        loss  Loss function to use inside the update function.
240

241

242
    .. _DeepFool:
243
            Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, Pascal Frossard,
244
            "DeepFool: a simple and accurate method to fool deep neural
245
            networks", https://arxiv.org/abs/1511.04599
246

247
    """
248

249
    distance = linf
10✔
250

251
    def get_distances(self, losses: ep.Tensor, grads: ep.Tensor) -> ep.Tensor:
10✔
252
        return abs(losses) / (flatten(grads, keep=2).abs().sum(axis=-1) + 1e-8)
6✔
253

254
    def get_perturbations(self, distances: ep.Tensor, grads: ep.Tensor) -> ep.Tensor:
10✔
255
        return atleast_kd(distances, grads.ndim) * grads.sign()
6✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc