• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8137716344

22 Jan 2024 10:53PM UTC coverage: 98.47%. Remained the same
8137716344

push

github

web-flow
Bump pillow from 10.1.0 to 10.2.0 in /tests (#718)

Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.1.0 to 10.2.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/10.1.0...10.2.0)

---
updated-dependencies:
- dependency-name: pillow
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

3475 of 3529 relevant lines covered (98.47%)

7.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

100.0
/foolbox/attacks/newtonfool.py
1
from typing import Union, Tuple, Any, Optional
10✔
2
import eagerpy as ep
10✔
3

4
from ..models import Model
10✔
5

6
from ..criteria import Misclassification
10✔
7

8
from ..distances import l2
10✔
9

10
from ..devutils import atleast_kd, flatten
10✔
11

12
from .base import MinimizationAttack
10✔
13
from .base import get_criterion
10✔
14
from .base import T
10✔
15
from .base import raise_if_kwargs
10✔
16
from .base import verify_input_bounds
10✔
17

18

19
class NewtonFoolAttack(MinimizationAttack):
10✔
20
    """Implementation of the NewtonFool Attack. [#Jang17]_
21

22
    Args:
23
        steps : Number of update steps to perform.
24
        step_size : Size of each update step.
25

26
    References:
27
        .. [#Jang17] Uyeong Jang et al., "Objective Metrics and Gradient Descent
28
            Algorithms for Adversarial Examples in Machine Learning",
29
            https://dl.acm.org/citation.cfm?id=3134635
30
    """
31

32
    distance = l2
10✔
33

34
    def __init__(self, steps: int = 100, stepsize: float = 0.01):
10✔
35
        self.steps = steps
10✔
36
        self.stepsize = stepsize
10✔
37

38
    def run(
10✔
39
        self,
40
        model: Model,
41
        inputs: T,
42
        criterion: Union[Misclassification, T],
43
        *,
44
        early_stop: Optional[float] = None,
45
        **kwargs: Any,
46
    ) -> T:
47
        raise_if_kwargs(kwargs)
6✔
48
        x, restore_type = ep.astensor_(inputs)
6✔
49
        criterion_ = get_criterion(criterion)
6✔
50
        del inputs, criterion, kwargs
6✔
51

52
        verify_input_bounds(x, model)
6✔
53

54
        N = len(x)
6✔
55

56
        if isinstance(criterion_, Misclassification):
6✔
57
            classes = criterion_.labels
6✔
58
        else:
59
            raise ValueError("unsupported criterion")
6✔
60

61
        if classes.shape != (N,):
6✔
62
            raise ValueError(
6✔
63
                f"expected labels to have shape ({N},), got {classes.shape}"
64
            )
65

66
        min_, max_ = model.bounds
6✔
67

68
        x_l2_norm = flatten(x.square()).sum(1)
6✔
69

70
        def loss_fun(x: ep.Tensor) -> Tuple[ep.Tensor, Tuple[ep.Tensor, ep.Tensor]]:
6✔
71
            logits = model(x)
6✔
72
            scores = ep.softmax(logits)
6✔
73
            pred_scores = scores[range(N), classes]
6✔
74
            loss = pred_scores.sum()
6✔
75
            return loss, (scores, pred_scores)
6✔
76

77
        for i in range(self.steps):
6✔
78
            # (1) get the scores and gradients
79
            _, (scores, pred_scores), gradients = ep.value_aux_and_grad(loss_fun, x)
6✔
80

81
            pred = scores.argmax(-1)
6✔
82
            num_classes = scores.shape[-1]
6✔
83

84
            # (2) calculate gradient norm
85
            gradients_l2_norm = flatten(gradients.square()).sum(1)
6✔
86

87
            # (3) calculate delta
88
            a = self.stepsize * x_l2_norm * gradients_l2_norm
6✔
89
            b = pred_scores - 1.0 / num_classes
6✔
90

91
            delta = ep.minimum(a, b)
6✔
92

93
            # (4) stop the attack if an adversarial example has been found
94
            # this is not described in the paper but otherwise once the prob. drops
95
            # below chance level the likelihood is not decreased but increased
96
            is_not_adversarial = (pred == classes).float32()
6✔
97
            delta *= is_not_adversarial
6✔
98

99
            # (5) calculate & apply current perturbation
100
            a = atleast_kd(delta / gradients_l2_norm.square(), gradients.ndim)
6✔
101
            x -= a * gradients
6✔
102

103
            x = ep.clip(x, min_, max_)
6✔
104

105
        return restore_type(x)
6✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc