• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8139141456

04 Mar 2024 11:03AM UTC coverage: 37.923% (-60.6%) from 98.477%
8139141456

Pull #722

github

web-flow
Merge 5663238db into 17e0e9b31
Pull Request #722: Fix guide compilation

1344 of 3544 relevant lines covered (37.92%)

0.38 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

35.42
/foolbox/attacks/virtual_adversarial_attack.py
1
from typing import Union, Any
1✔
2
import eagerpy as ep
1✔
3

4
from ..models import Model
1✔
5

6
from ..criteria import Misclassification
1✔
7

8
from ..distances import l2
1✔
9

10
from ..devutils import flatten, atleast_kd
1✔
11

12
from .base import FixedEpsilonAttack
1✔
13
from .base import get_criterion
1✔
14
from .base import T
1✔
15
from .base import raise_if_kwargs
1✔
16
from .base import verify_input_bounds
1✔
17

18

19
class VirtualAdversarialAttack(FixedEpsilonAttack):
1✔
20
    """Second-order gradient-based attack on the logits. [Miy15]_
21
    The attack calculate an untargeted adversarial perturbation by performing a
22
    approximated second order optimization step on the KL divergence between
23
    the unperturbed predictions and the predictions for the adversarial
24
    perturbation. This attack was originally introduced as the
25
    Virtual Adversarial Training [Miy15]_ method.
26

27
    Args:
28
        steps : Number of update steps.
29
        xi : L2 distance between original image and first adversarial proposal.
30

31

32
    References:
33
        .. [Miy15] Takeru Miyato, Shin-ichi Maeda, Masanori Koyama, Ken Nakae,
34
            Shin Ishii, "Distributional Smoothing with Virtual Adversarial Training",
35
            https://arxiv.org/abs/1507.00677
36
    """
37

38
    distance = l2
1✔
39

40
    def __init__(self, steps: int, xi: float = 1e-6):
1✔
41
        self.steps = steps
1✔
42
        self.xi = xi
1✔
43

44
    def run(
1✔
45
        self,
46
        model: Model,
47
        inputs: T,
48
        criterion: Union[Misclassification, T],
49
        *,
50
        epsilon: float,
51
        **kwargs: Any,
52
    ) -> T:
53
        raise_if_kwargs(kwargs)
×
54
        x, restore_type = ep.astensor_(inputs)
×
55
        criterion_ = get_criterion(criterion)
×
56
        del inputs, criterion, kwargs
×
57

58
        verify_input_bounds(x, model)
×
59

60
        N = len(x)
×
61

62
        if isinstance(criterion_, Misclassification):
×
63
            classes = criterion_.labels
×
64
        else:
65
            raise ValueError("unsupported criterion")
×
66

67
        if classes.shape != (N,):
×
68
            raise ValueError(
×
69
                f"expected labels to have shape ({N},), got {classes.shape}"
70
            )
71

72
        bounds = model.bounds
×
73

74
        def loss_fun(delta: ep.Tensor, logits: ep.Tensor) -> ep.Tensor:
×
75
            assert x.shape[0] == logits.shape[0]
×
76
            assert delta.shape == x.shape
×
77

78
            x_hat = x + delta
×
79
            logits_hat = model(x_hat)
×
80
            loss = ep.kl_div_with_logits(logits, logits_hat).sum()
×
81

82
            return loss
×
83

84
        value_and_grad = ep.value_and_grad_fn(x, loss_fun, has_aux=False)
×
85

86
        clean_logits = model(x)
×
87

88
        # start with random vector as search vector
89
        d = ep.normal(x, shape=x.shape, mean=0, stddev=1)
×
90
        for it in range(self.steps):
×
91
            # normalize proposal to be unit vector
92
            d = d * self.xi / atleast_kd(ep.norms.l2(flatten(d), axis=-1), x.ndim)
×
93

94
            # use gradient of KL divergence as new search vector
95
            _, grad = value_and_grad(d, clean_logits)
×
96
            d = grad
×
97

98
            # rescale search vector
99
            d = (bounds[1] - bounds[0]) * d
×
100

101
            if ep.any(ep.norms.l2(flatten(d), axis=-1) < 1e-64):
×
102
                raise RuntimeError(  # pragma: no cover
103
                    "Gradient vanished; this can happen if xi is too small."
104
                )
105

106
        final_delta = epsilon / atleast_kd(ep.norms.l2(flatten(d), axis=-1), d.ndim) * d
×
107
        x_adv = ep.clip(x + final_delta, *bounds)
×
108
        return restore_type(x_adv)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc