• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8137716344

22 Jan 2024 10:53PM UTC coverage: 98.47%. Remained the same
8137716344

push

github

web-flow
Bump pillow from 10.1.0 to 10.2.0 in /tests (#718)

Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.1.0 to 10.2.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/10.1.0...10.2.0)

---
updated-dependencies:
- dependency-name: pillow
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

3475 of 3529 relevant lines covered (98.47%)

7.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

100.0
/foolbox/attacks/ddn.py
1
from typing import Union, Tuple, Optional, Any
10✔
2
import math
10✔
3
import eagerpy as ep
10✔
4

5
from ..models import Model
10✔
6

7
from ..criteria import Misclassification, TargetedMisclassification
10✔
8

9
from ..distances import l2
10✔
10

11
from ..devutils import atleast_kd, flatten
10✔
12

13
from .base import MinimizationAttack
10✔
14
from .base import get_criterion
10✔
15
from .base import T
10✔
16
from .base import raise_if_kwargs
10✔
17
from .base import verify_input_bounds
10✔
18

19

20
def normalize_gradient_l2_norms(grad: ep.Tensor) -> ep.Tensor:
10✔
21
    norms = ep.norms.l2(flatten(grad), -1)
6✔
22

23
    # remove zero gradients
24
    grad = ep.where(
6✔
25
        atleast_kd(norms == 0, grad.ndim), ep.normal(grad, shape=grad.shape), grad
26
    )
27
    # calculate norms again for previously vanishing elements
28
    norms = ep.norms.l2(flatten(grad), -1)
6✔
29

30
    norms = ep.maximum(norms, 1e-12)  # avoid division by zero
6✔
31
    factor = 1 / norms
6✔
32
    factor = atleast_kd(factor, grad.ndim)
6✔
33
    return grad * factor
6✔
34

35

36
class DDNAttack(MinimizationAttack):
10✔
37
    """The Decoupled Direction and Norm L2 adversarial attack. [#Rony18]_
38

39
    Args:
40
        init_epsilon : Initial value for the norm/epsilon ball.
41
        steps : Number of steps for the optimization.
42
        gamma : Factor by which the norm will be modified: new_norm = norm * (1 + or - gamma).
43

44
    References:
45
        .. [#Rony18] Jérôme Rony, Luiz G. Hafemann, Luiz S. Oliveira, Ismail Ben Ayed,
46
            Robert Sabourin, Eric Granger, "Decoupling Direction and Norm for
47
            Efficient Gradient-Based L2 Adversarial Attacks and Defenses",
48
            https://arxiv.org/abs/1811.09600
49
    """
50

51
    distance = l2
10✔
52

53
    def __init__(
10✔
54
        self,
55
        *,
56
        init_epsilon: float = 1.0,
57
        steps: int = 100,
58
        gamma: float = 0.05,
59
    ):
60
        self.init_epsilon = init_epsilon
10✔
61
        self.steps = steps
10✔
62
        self.gamma = gamma
10✔
63

64
    def run(
10✔
65
        self,
66
        model: Model,
67
        inputs: T,
68
        criterion: Union[Misclassification, TargetedMisclassification, T],
69
        *,
70
        early_stop: Optional[float] = None,
71
        **kwargs: Any,
72
    ) -> T:
73
        raise_if_kwargs(kwargs)
6✔
74
        x, restore_type = ep.astensor_(inputs)
6✔
75
        criterion_ = get_criterion(criterion)
6✔
76
        del inputs, criterion, kwargs
6✔
77

78
        verify_input_bounds(x, model)
6✔
79

80
        N = len(x)
6✔
81

82
        if isinstance(criterion_, Misclassification):
6✔
83
            targeted = False
6✔
84
            classes = criterion_.labels
6✔
85
        elif isinstance(criterion_, TargetedMisclassification):
6✔
86
            targeted = True
6✔
87
            classes = criterion_.target_classes
6✔
88
        else:
89
            raise ValueError("unsupported criterion")
6✔
90

91
        if classes.shape != (N,):
6✔
92
            name = "target_classes" if targeted else "labels"
6✔
93
            raise ValueError(
6✔
94
                f"expected {name} to have shape ({N},), got {classes.shape}"
95
            )
96

97
        max_stepsize = 1.0
6✔
98
        min_, max_ = model.bounds
6✔
99

100
        def loss_fn(
6✔
101
            inputs: ep.Tensor, labels: ep.Tensor
102
        ) -> Tuple[ep.Tensor, ep.Tensor]:
103
            logits = model(inputs)
6✔
104

105
            sign = -1.0 if targeted else 1.0
6✔
106
            loss = sign * ep.crossentropy(logits, labels).sum()
6✔
107

108
            return loss, logits
6✔
109

110
        grad_and_logits = ep.value_and_grad_fn(x, loss_fn, has_aux=True)
6✔
111

112
        delta = ep.zeros_like(x)
6✔
113

114
        epsilon = self.init_epsilon * ep.ones(x, len(x))
6✔
115
        worst_norm = ep.norms.l2(flatten(ep.maximum(x - min_, max_ - x)), -1)
6✔
116

117
        best_l2 = worst_norm
6✔
118
        best_delta = delta
6✔
119
        adv_found = ep.zeros(x, len(x)).bool()
6✔
120

121
        for i in range(self.steps):
6✔
122
            # perform cosine annealing of LR starting from 1.0 to 0.01
123
            stepsize = (
6✔
124
                0.01
125
                + (max_stepsize - 0.01) * (1 + math.cos(math.pi * i / self.steps)) / 2
126
            )
127

128
            x_adv = x + delta
6✔
129

130
            _, logits, gradients = grad_and_logits(x_adv, classes)
6✔
131
            gradients = normalize_gradient_l2_norms(gradients)
6✔
132
            is_adversarial = criterion_(x_adv, logits)
6✔
133

134
            l2 = ep.norms.l2(flatten(delta), axis=-1)
6✔
135
            is_smaller = l2 <= best_l2
6✔
136

137
            is_both = ep.logical_and(is_adversarial, is_smaller)
6✔
138
            adv_found = ep.logical_or(adv_found, is_adversarial)
6✔
139
            best_l2 = ep.where(is_both, l2, best_l2)
6✔
140

141
            best_delta = ep.where(atleast_kd(is_both, x.ndim), delta, best_delta)
6✔
142

143
            # do step
144
            delta = delta + stepsize * gradients
6✔
145

146
            epsilon = epsilon * ep.where(
6✔
147
                is_adversarial, 1.0 - self.gamma, 1.0 + self.gamma
148
            )
149
            epsilon = ep.minimum(epsilon, worst_norm)
6✔
150

151
            # project to epsilon ball
152
            delta *= atleast_kd(epsilon / ep.norms.l2(flatten(delta), -1), x.ndim)
6✔
153

154
            # clip to valid bounds
155
            delta = ep.clip(x + delta, *model.bounds) - x
6✔
156

157
        x_adv = x + best_delta
6✔
158

159
        return restore_type(x_adv)
6✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc