• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8137716344

22 Jan 2024 10:53PM UTC coverage: 98.47%. Remained the same
8137716344

push

github

web-flow
Bump pillow from 10.1.0 to 10.2.0 in /tests (#718)

Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.1.0 to 10.2.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/10.1.0...10.2.0)

---
updated-dependencies:
- dependency-name: pillow
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

3475 of 3529 relevant lines covered (98.47%)

7.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

100.0
/foolbox/attacks/ead.py
1
from typing import Union, Tuple, Any, Optional
10✔
2
from typing_extensions import Literal
10✔
3

4
import math
10✔
5

6
import eagerpy as ep
10✔
7

8
from ..models import Model
10✔
9

10
from ..criteria import Misclassification, TargetedMisclassification
10✔
11

12
from ..distances import l1
10✔
13

14
from ..devutils import atleast_kd, flatten
10✔
15

16
from .base import MinimizationAttack
10✔
17
from .base import get_criterion
10✔
18
from .base import T
10✔
19
from .base import raise_if_kwargs
10✔
20
from .base import verify_input_bounds
10✔
21

22

23
class EADAttack(MinimizationAttack):
10✔
24
    """Implementation of the EAD Attack with EN Decision Rule. [#Chen18]_
25

26
    Args:
27
        binary_search_steps : Number of steps to perform in the binary search
28
            over the const c.
29
        steps : Number of optimization steps within each binary search step.
30
        initial_stepsize : Initial stepsize to update the examples.
31
        confidence : Confidence required for an example to be marked as adversarial.
32
            Controls the gap between example and decision boundary.
33
        initial_const : Initial value of the const c with which the binary search starts.
34
        regularization : Controls the L1 regularization.
35
        decision_rule : Rule according to which the best adversarial examples are selected.
36
            They either minimize the L1 or ElasticNet distance.
37
        abort_early : Stop inner search as soons as an adversarial example has been found.
38
            Does not affect the binary search over the const c.
39

40
    References:
41
        .. [#Chen18] Pin-Yu Chen, Yash Sharma, Huan Zhang, Jinfeng Yi, Cho-Jui Hsieh,
42
        "EAD: Elastic-Net Attacks to Deep Neural Networks via Adversarial Examples",
43
        https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewPaper/16893
44
    """
45

46
    distance = l1
10✔
47

48
    def __init__(
10✔
49
        self,
50
        binary_search_steps: int = 9,
51
        steps: int = 10000,
52
        initial_stepsize: float = 1e-2,
53
        confidence: float = 0.0,
54
        initial_const: float = 1e-3,
55
        regularization: float = 1e-2,
56
        decision_rule: Union[Literal["EN"], Literal["L1"]] = "EN",
57
        abort_early: bool = True,
58
    ):
59
        if decision_rule not in ("EN", "L1"):
10✔
60
            raise ValueError("invalid decision rule")
10✔
61

62
        self.binary_search_steps = binary_search_steps
10✔
63
        self.steps = steps
10✔
64
        self.confidence = confidence
10✔
65
        self.initial_stepsize = initial_stepsize
10✔
66
        self.regularization = regularization
10✔
67
        self.initial_const = initial_const
10✔
68
        self.abort_early = abort_early
10✔
69
        self.decision_rule = decision_rule
10✔
70

71
    def run(
10✔
72
        self,
73
        model: Model,
74
        inputs: T,
75
        criterion: Union[Misclassification, TargetedMisclassification, T],
76
        *,
77
        early_stop: Optional[float] = None,
78
        **kwargs: Any,
79
    ) -> T:
80
        raise_if_kwargs(kwargs)
6✔
81
        x, restore_type = ep.astensor_(inputs)
6✔
82
        criterion_ = get_criterion(criterion)
6✔
83
        del inputs, criterion, kwargs
6✔
84

85
        verify_input_bounds(x, model)
6✔
86

87
        N = len(x)
6✔
88

89
        if isinstance(criterion_, Misclassification):
6✔
90
            targeted = False
6✔
91
            classes = criterion_.labels
6✔
92
            change_classes_logits = self.confidence
6✔
93
        elif isinstance(criterion_, TargetedMisclassification):
6✔
94
            targeted = True
6✔
95
            classes = criterion_.target_classes
6✔
96
            change_classes_logits = -self.confidence
6✔
97
        else:
98
            raise ValueError("unsupported criterion")
6✔
99

100
        def is_adversarial(perturbed: ep.Tensor, logits: ep.Tensor) -> ep.Tensor:
6✔
101
            if change_classes_logits != 0:
6✔
102
                logits += ep.onehot_like(logits, classes, value=change_classes_logits)
6✔
103
            return criterion_(perturbed, logits)
6✔
104

105
        if classes.shape != (N,):
6✔
106
            name = "target_classes" if targeted else "labels"
6✔
107
            raise ValueError(
6✔
108
                f"expected {name} to have shape ({N},), got {classes.shape}"
109
            )
110

111
        min_, max_ = model.bounds
6✔
112
        rows = range(N)
6✔
113

114
        def loss_fun(y_k: ep.Tensor, consts: ep.Tensor) -> Tuple[ep.Tensor, ep.Tensor]:
6✔
115
            assert y_k.shape == x.shape
6✔
116
            assert consts.shape == (N,)
6✔
117

118
            logits = model(y_k)
6✔
119

120
            if targeted:
6✔
121
                c_minimize = _best_other_classes(logits, classes)
6✔
122
                c_maximize = classes
6✔
123
            else:
124
                c_minimize = classes
6✔
125
                c_maximize = _best_other_classes(logits, classes)
6✔
126

127
            is_adv_loss = logits[rows, c_minimize] - logits[rows, c_maximize]
6✔
128
            assert is_adv_loss.shape == (N,)
6✔
129

130
            is_adv_loss = is_adv_loss + self.confidence
6✔
131
            is_adv_loss = ep.maximum(0, is_adv_loss)
6✔
132
            is_adv_loss = is_adv_loss * consts
6✔
133

134
            squared_norms = flatten(y_k - x).square().sum(axis=-1)
6✔
135
            loss = is_adv_loss.sum() + squared_norms.sum()
6✔
136
            return loss, logits
6✔
137

138
        loss_aux_and_grad = ep.value_and_grad_fn(x, loss_fun, has_aux=True)
6✔
139

140
        consts = self.initial_const * ep.ones(x, (N,))
6✔
141
        lower_bounds = ep.zeros(x, (N,))
6✔
142
        upper_bounds = ep.inf * ep.ones(x, (N,))
6✔
143

144
        best_advs = ep.zeros_like(x)
6✔
145
        best_advs_norms = ep.ones(x, (N,)) * ep.inf
6✔
146

147
        # the binary search searches for the smallest consts that produce adversarials
148
        for binary_search_step in range(self.binary_search_steps):
6✔
149
            if (
6✔
150
                binary_search_step == self.binary_search_steps - 1
151
                and self.binary_search_steps >= 10
152
            ):
153
                # in the last iteration, repeat the search once
154
                consts = ep.minimum(upper_bounds, 1e10)
6✔
155

156
            # create a new optimizer find the delta that minimizes the loss
157
            x_k = x
6✔
158
            y_k = x
6✔
159

160
            found_advs = ep.full(
6✔
161
                x, (N,), value=False
162
            ).bool()  # found adv with the current consts
163
            loss_at_previous_check = ep.inf
6✔
164

165
            for iteration in range(self.steps):
6✔
166
                # square-root learning rate decay
167
                stepsize = self.initial_stepsize * (1.0 - iteration / self.steps) ** 0.5
6✔
168

169
                loss, logits, gradient = loss_aux_and_grad(y_k, consts)
6✔
170

171
                x_k_old = x_k
6✔
172
                x_k = _project_shrinkage_thresholding(
6✔
173
                    y_k - stepsize * gradient, x, self.regularization, min_, max_
174
                )
175
                y_k = x_k + iteration / (iteration + 3.0) * (x_k - x_k_old)
6✔
176

177
                if self.abort_early and iteration % (math.ceil(self.steps / 10)) == 0:
6✔
178
                    # after each tenth of the iterations, check progress
179
                    if not loss.item() <= 0.9999 * loss_at_previous_check:
6✔
180
                        break  # stop optimization if there has been no progress
6✔
181
                    loss_at_previous_check = loss.item()
6✔
182

183
                found_advs_iter = is_adversarial(x_k, model(x_k))
6✔
184

185
                best_advs, best_advs_norms = _apply_decision_rule(
6✔
186
                    self.decision_rule,
187
                    self.regularization,
188
                    best_advs,
189
                    best_advs_norms,
190
                    x_k,
191
                    x,
192
                    found_advs_iter,
193
                )
194

195
                found_advs = ep.logical_or(found_advs, found_advs_iter)
6✔
196

197
            upper_bounds = ep.where(found_advs, consts, upper_bounds)
6✔
198
            lower_bounds = ep.where(found_advs, lower_bounds, consts)
6✔
199

200
            consts_exponential_search = consts * 10
6✔
201
            consts_binary_search = (lower_bounds + upper_bounds) / 2
6✔
202
            consts = ep.where(
6✔
203
                ep.isinf(upper_bounds), consts_exponential_search, consts_binary_search
204
            )
205

206
        return restore_type(best_advs)
6✔
207

208

209
def _best_other_classes(logits: ep.Tensor, exclude: ep.Tensor) -> ep.Tensor:
10✔
210
    other_logits = logits - ep.onehot_like(logits, exclude, value=ep.inf)
6✔
211
    return other_logits.argmax(axis=-1)
6✔
212

213

214
def _apply_decision_rule(
10✔
215
    decision_rule: Union[Literal["EN"], Literal["L1"]],
216
    beta: float,
217
    best_advs: ep.Tensor,
218
    best_advs_norms: ep.Tensor,
219
    x_k: ep.Tensor,
220
    x: ep.Tensor,
221
    found_advs: ep.Tensor,
222
) -> Tuple[ep.Tensor, ep.Tensor]:
223
    if decision_rule == "EN":
6✔
224
        norms = beta * flatten(x_k - x).abs().sum(axis=-1) + flatten(
6✔
225
            x_k - x
226
        ).square().sum(axis=-1)
227
    else:
228
        # decision rule = L1
229
        norms = flatten(x_k - x).abs().sum(axis=-1)
6✔
230

231
    new_best = ep.logical_and(norms < best_advs_norms, found_advs)
6✔
232
    new_best_kd = atleast_kd(new_best, best_advs.ndim)
6✔
233
    best_advs = ep.where(new_best_kd, x_k, best_advs)
6✔
234
    best_advs_norms = ep.where(new_best, norms, best_advs_norms)
6✔
235

236
    return best_advs, best_advs_norms
6✔
237

238

239
def _project_shrinkage_thresholding(
10✔
240
    z: ep.Tensor, x0: ep.Tensor, regularization: float, min_: float, max_: float
241
) -> ep.Tensor:
242
    """Performs the element-wise projected shrinkage-thresholding
243
    operation"""
244

245
    upper_mask = z - x0 > regularization
6✔
246
    lower_mask = z - x0 < -regularization
6✔
247

248
    projection = ep.where(upper_mask, ep.minimum(z - regularization, max_), x0)
6✔
249
    projection = ep.where(lower_mask, ep.maximum(z + regularization, min_), projection)
6✔
250

251
    return projection
6✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc