• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8137716344

22 Jan 2024 10:53PM UTC coverage: 98.47%. Remained the same
8137716344

push

github

web-flow
Bump pillow from 10.1.0 to 10.2.0 in /tests (#718)

Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.1.0 to 10.2.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/10.1.0...10.2.0)

---
updated-dependencies:
- dependency-name: pillow
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

3475 of 3529 relevant lines covered (98.47%)

7.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

100.0
/foolbox/attacks/virtual_adversarial_attack.py
1
from typing import Union, Any
10✔
2
import eagerpy as ep
10✔
3

4
from ..models import Model
10✔
5

6
from ..criteria import Misclassification
10✔
7

8
from ..distances import l2
10✔
9

10
from ..devutils import flatten, atleast_kd
10✔
11

12
from .base import FixedEpsilonAttack
10✔
13
from .base import get_criterion
10✔
14
from .base import T
10✔
15
from .base import raise_if_kwargs
10✔
16
from .base import verify_input_bounds
10✔
17

18

19
class VirtualAdversarialAttack(FixedEpsilonAttack):
10✔
20
    """Second-order gradient-based attack on the logits. [#Miy15]_
21
    The attack calculate an untargeted adversarial perturbation by performing a
22
    approximated second order optimization step on the KL divergence between
23
    the unperturbed predictions and the predictions for the adversarial
24
    perturbation. This attack was originally introduced as the
25
    Virtual Adversarial Training [#Miy15]_ method.
26

27
    Args:
28
        steps : Number of update steps.
29
        xi : L2 distance between original image and first adversarial proposal.
30

31

32
    References:
33
        .. [#Miy15] Takeru Miyato, Shin-ichi Maeda, Masanori Koyama, Ken Nakae,
34
            Shin Ishii, "Distributional Smoothing with Virtual Adversarial Training",
35
            https://arxiv.org/abs/1507.00677
36
    """
37

38
    distance = l2
10✔
39

40
    def __init__(self, steps: int, xi: float = 1e-6):
10✔
41
        self.steps = steps
10✔
42
        self.xi = xi
10✔
43

44
    def run(
10✔
45
        self,
46
        model: Model,
47
        inputs: T,
48
        criterion: Union[Misclassification, T],
49
        *,
50
        epsilon: float,
51
        **kwargs: Any,
52
    ) -> T:
53
        raise_if_kwargs(kwargs)
6✔
54
        x, restore_type = ep.astensor_(inputs)
6✔
55
        criterion_ = get_criterion(criterion)
6✔
56
        del inputs, criterion, kwargs
6✔
57

58
        verify_input_bounds(x, model)
6✔
59

60
        N = len(x)
6✔
61

62
        if isinstance(criterion_, Misclassification):
6✔
63
            classes = criterion_.labels
6✔
64
        else:
65
            raise ValueError("unsupported criterion")
6✔
66

67
        if classes.shape != (N,):
6✔
68
            raise ValueError(
6✔
69
                f"expected labels to have shape ({N},), got {classes.shape}"
70
            )
71

72
        bounds = model.bounds
6✔
73

74
        def loss_fun(delta: ep.Tensor, logits: ep.Tensor) -> ep.Tensor:
6✔
75
            assert x.shape[0] == logits.shape[0]
6✔
76
            assert delta.shape == x.shape
6✔
77

78
            x_hat = x + delta
6✔
79
            logits_hat = model(x_hat)
6✔
80
            loss = ep.kl_div_with_logits(logits, logits_hat).sum()
6✔
81

82
            return loss
6✔
83

84
        value_and_grad = ep.value_and_grad_fn(x, loss_fun, has_aux=False)
6✔
85

86
        clean_logits = model(x)
6✔
87

88
        # start with random vector as search vector
89
        d = ep.normal(x, shape=x.shape, mean=0, stddev=1)
6✔
90
        for it in range(self.steps):
6✔
91
            # normalize proposal to be unit vector
92
            d = d * self.xi / atleast_kd(ep.norms.l2(flatten(d), axis=-1), x.ndim)
6✔
93

94
            # use gradient of KL divergence as new search vector
95
            _, grad = value_and_grad(d, clean_logits)
6✔
96
            d = grad
6✔
97

98
            # rescale search vector
99
            d = (bounds[1] - bounds[0]) * d
6✔
100

101
            if ep.any(ep.norms.l2(flatten(d), axis=-1) < 1e-64):
6✔
102
                raise RuntimeError(  # pragma: no cover
103
                    "Gradient vanished; this can happen if xi is too small."
104
                )
105

106
        final_delta = epsilon / atleast_kd(ep.norms.l2(flatten(d), axis=-1), d.ndim) * d
6✔
107
        x_adv = ep.clip(x + final_delta, *bounds)
6✔
108
        return restore_type(x_adv)
6✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc