• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8139141456

04 Mar 2024 11:03AM UTC coverage: 37.923% (-60.6%) from 98.477%
8139141456

Pull #722

github

web-flow
Merge 5663238db into 17e0e9b31
Pull Request #722: Fix guide compilation

1344 of 3544 relevant lines covered (37.92%)

0.38 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

42.59
/foolbox/attacks/gen_attack.py
1
from typing import Optional, Any, Tuple, Union
1✔
2
import numpy as np
1✔
3
import eagerpy as ep
1✔
4

5
from ..devutils import atleast_kd
1✔
6

7
from ..models import Model
1✔
8

9
from ..criteria import TargetedMisclassification
1✔
10

11
from ..distances import linf
1✔
12

13
from .base import FixedEpsilonAttack
1✔
14
from .base import T
1✔
15
from .base import get_channel_axis
1✔
16
from .base import raise_if_kwargs
1✔
17
from .base import verify_input_bounds
1✔
18
import math
1✔
19

20
from .gen_attack_utils import rescale_images
1✔
21

22

23
class GenAttack(FixedEpsilonAttack):
1✔
24
    """A black-box algorithm for L-infinity adversarials. [#Alz18]_
25

26
    This attack is performs a genetic search in order to find an adversarial
27
    perturbation in a black-box scenario in as few queries as possible.
28

29
    References:
30
        .. [#Alz18] Moustafa Alzantot, Yash Sharma, Supriyo Chakraborty, Huan Zhang,
31
           Cho-Jui Hsieh, Mani Srivastava,
32
           "GenAttack: Practical Black-box Attacks with Gradient-Free
33
           Optimization",
34
           https://arxiv.org/abs/1805.11090
35

36
    """
37

38
    def __init__(
1✔
39
        self,
40
        *,
41
        steps: int = 1000,
42
        population: int = 10,
43
        mutation_probability: float = 0.10,
44
        mutation_range: float = 0.15,
45
        sampling_temperature: float = 0.3,
46
        channel_axis: Optional[int] = None,
47
        reduced_dims: Optional[Tuple[int, int]] = None,
48
    ):
49
        self.steps = steps
1✔
50
        self.population = population
1✔
51
        self.min_mutation_probability = mutation_probability
1✔
52
        self.min_mutation_range = mutation_range
1✔
53
        self.sampling_temperature = sampling_temperature
1✔
54
        self.channel_axis = channel_axis
1✔
55
        self.reduced_dims = reduced_dims
1✔
56

57
    distance = linf
1✔
58

59
    def apply_noise(
1✔
60
        self,
61
        x: ep.TensorType,
62
        noise: ep.TensorType,
63
        epsilon: float,
64
        channel_axis: Optional[int],
65
    ) -> ep.TensorType:
66
        if noise.shape != x.shape and channel_axis is not None:
×
67
            # upscale noise
68

69
            noise = rescale_images(noise, x.shape, channel_axis)
×
70

71
        # clip noise to valid linf bounds
72
        noise = ep.clip(noise, -epsilon, +epsilon)
×
73

74
        # clip to image bounds
75
        return ep.clip(x + noise, 0.0, 1.0)
×
76

77
    def choice(
1✔
78
        self, a: int, size: Union[int, ep.TensorType], replace: bool, p: ep.TensorType
79
    ) -> Any:
80
        p_np: np.ndarray = p.numpy()
×
81
        x = np.random.choice(a, size, replace, p_np)  # type: ignore
×
82
        return x
×
83

84
    def run(
1✔
85
        self,
86
        model: Model,
87
        inputs: T,
88
        criterion: TargetedMisclassification,
89
        *,
90
        epsilon: float,
91
        **kwargs: Any,
92
    ) -> T:
93
        raise_if_kwargs(kwargs)
1✔
94
        x, restore_type = ep.astensor_(inputs)
1✔
95
        del inputs, kwargs
1✔
96

97
        verify_input_bounds(x, model)
1✔
98

99
        N = len(x)
1✔
100

101
        if isinstance(criterion, TargetedMisclassification):
1✔
102
            classes = criterion.target_classes
1✔
103
        else:
104
            raise ValueError("unsupported criterion")
×
105

106
        if classes.shape != (N,):
1✔
107
            raise ValueError(
×
108
                f"expected target_classes to have shape ({N},), got {classes.shape}"
109
            )
110

111
        noise_shape: Union[Tuple[int, int, int, int], Tuple[int, ...]]
112
        channel_axis: Optional[int] = None
1✔
113
        if self.reduced_dims is not None:
1✔
114
            if x.ndim != 4:
1✔
115
                raise NotImplementedError(
116
                    "only implemented for inputs with two spatial dimensions"
117
                    " (and one channel and one batch dimension)"
118
                )
119

120
            if self.channel_axis is None:
1✔
121
                maybe_axis = get_channel_axis(model, x.ndim)
1✔
122
                if maybe_axis is None:
1✔
123
                    raise ValueError(
1✔
124
                        "cannot infer the data_format from the model, please"
125
                        " specify channel_axis when initializing the attack"
126
                    )
127
                else:
128
                    channel_axis = maybe_axis
×
129
            else:
130
                channel_axis = self.channel_axis % x.ndim
1✔
131

132
            if channel_axis == 1:
1✔
133
                noise_shape = (x.shape[1], *self.reduced_dims)
×
134
            elif channel_axis == 3:
1✔
135
                noise_shape = (*self.reduced_dims, x.shape[3])
×
136
            else:
137
                raise ValueError(
1✔
138
                    "expected 'channel_axis' to be 1 or 3, got {channel_axis}"
139
                )
140
        else:
141
            noise_shape = x.shape[1:]  # pragma: no cover
142

143
        def is_adversarial(logits: ep.TensorType) -> ep.TensorType:
×
144
            return ep.argmax(logits, 1) == classes
×
145

146
        num_plateaus = ep.zeros(x, len(x))
×
147
        mutation_probability = (
×
148
            ep.ones_like(num_plateaus) * self.min_mutation_probability
149
        )
150
        mutation_range = ep.ones_like(num_plateaus) * self.min_mutation_range
×
151

152
        noise_pops = ep.uniform(
×
153
            x, (N, self.population, *noise_shape), -epsilon, epsilon
154
        )
155

156
        def calculate_fitness(logits: ep.TensorType) -> ep.TensorType:
×
157
            first = logits[range(N), classes]
×
158
            second = ep.log(ep.exp(logits).sum(1) - first)
×
159

160
            return first - second
×
161

162
        n_its_wo_change = ep.zeros(x, (N,))
×
163
        for step in range(self.steps):
×
164
            fitness_l, is_adv_l = [], []
×
165

166
            for i in range(self.population):
×
167
                it = self.apply_noise(x, noise_pops[:, i], epsilon, channel_axis)
×
168
                logits = model(it)
×
169
                f = calculate_fitness(logits)
×
170
                a = is_adversarial(logits)
×
171
                fitness_l.append(f)
×
172
                is_adv_l.append(a)
×
173

174
            fitness = ep.stack(fitness_l)
×
175
            is_adv = ep.stack(is_adv_l, 1)
×
176
            elite_idxs = ep.argmax(fitness, 0)
×
177

178
            elite_noise = noise_pops[range(N), elite_idxs]
×
179
            is_adv = is_adv[range(N), elite_idxs]
×
180

181
            # early stopping
182
            if is_adv.all():
×
183
                return restore_type(  # pragma: no cover
184
                    self.apply_noise(x, elite_noise, epsilon, channel_axis)
185
                )
186

187
            probs = ep.softmax(fitness / self.sampling_temperature, 0)
×
188
            parents_idxs = np.stack(
×
189
                [
190
                    self.choice(
191
                        self.population,
192
                        2 * self.population - 2,
193
                        replace=True,
194
                        p=probs[:, i],
195
                    )
196
                    for i in range(N)
197
                ],
198
                1,
199
            )
200

201
            new_noise_pops = [elite_noise]
×
202
            for i in range(self.population - 1):
×
203
                parents_1 = noise_pops[range(N), parents_idxs[2 * i]]
×
204
                parents_2 = noise_pops[range(N), parents_idxs[2 * i + 1]]
×
205

206
                # calculate crossover
207
                p = probs[parents_idxs[2 * i], range(N)] / (
×
208
                    probs[parents_idxs[2 * i], range(N)]
209
                    + probs[parents_idxs[2 * i + 1], range(N)]
210
                )
211
                p = atleast_kd(p, x.ndim)
×
212
                p = ep.tile(p, (1, *noise_shape))
×
213

214
                crossover_mask = ep.uniform(p, p.shape, 0, 1) < p
×
215
                children = ep.where(crossover_mask, parents_1, parents_2)
×
216

217
                # calculate mutation
218
                mutations = ep.stack(
×
219
                    [
220
                        ep.uniform(
221
                            x,
222
                            noise_shape,
223
                            -mutation_range[i].item() * epsilon,
224
                            mutation_range[i].item() * epsilon,
225
                        )
226
                        for i in range(N)
227
                    ],
228
                    0,
229
                )
230

231
                mutation_mask = ep.uniform(children, children.shape)
×
232
                mutation_mask = mutation_mask <= atleast_kd(
×
233
                    mutation_probability, children.ndim
234
                )
235
                children = ep.where(mutation_mask, children + mutations, children)
×
236

237
                # project back to epsilon range
238
                children = ep.clip(children, -epsilon, epsilon)
×
239

240
                new_noise_pops.append(children)
×
241

242
            noise_pops = ep.stack(new_noise_pops, 1)
×
243

244
            # increase num_plateaus if fitness does not improve
245
            # for 100 consecutive steps
246
            n_its_wo_change = ep.where(
×
247
                elite_idxs == 0, n_its_wo_change + 1, ep.zeros_like(n_its_wo_change)
248
            )
249
            num_plateaus = ep.where(
×
250
                n_its_wo_change >= 100, num_plateaus + 1, num_plateaus
251
            )
252
            n_its_wo_change = ep.where(
×
253
                n_its_wo_change >= 100, ep.zeros_like(n_its_wo_change), n_its_wo_change
254
            )
255

256
            mutation_probability = ep.maximum(
×
257
                self.min_mutation_probability,
258
                0.5 * ep.exp(math.log(0.9) * ep.ones_like(num_plateaus) * num_plateaus),
259
            )
260
            mutation_range = ep.maximum(
×
261
                self.min_mutation_range,
262
                0.4 * ep.exp(math.log(0.9) * ep.ones_like(num_plateaus) * num_plateaus),
263
            )
264

265
        return restore_type(self.apply_noise(x, elite_noise, epsilon, channel_axis))
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc