• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8139141456

04 Mar 2024 11:03AM UTC coverage: 37.923% (-60.6%) from 98.477%
8139141456

Pull #722

github

web-flow
Merge 5663238db into 17e0e9b31
Pull Request #722: Fix guide compilation

1344 of 3544 relevant lines covered (37.92%)

0.38 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

36.36
/foolbox/attacks/gradient_descent_base.py
1
from typing import Union, Any, Optional, Callable, Tuple
1✔
2
from abc import ABC, abstractmethod
1✔
3
import eagerpy as ep
1✔
4

5
from ..devutils import flatten
1✔
6
from ..devutils import atleast_kd
1✔
7

8
from ..types import Bounds
1✔
9

10
from ..models.base import Model
1✔
11

12
from ..criteria import Misclassification, TargetedMisclassification
1✔
13

14
from ..distances import l1, l2, linf
1✔
15

16
from .base import FixedEpsilonAttack
1✔
17
from .base import T
1✔
18
from .base import get_criterion
1✔
19
from .base import raise_if_kwargs
1✔
20
from .base import verify_input_bounds
1✔
21

22

23
class Optimizer(ABC):
1✔
24
    def __init__(self, x: ep.Tensor):
1✔
25
        pass
×
26

27
    @abstractmethod
28
    def __call__(self, gradient: ep.Tensor) -> ep.Tensor:
29
        pass
30

31

32
class AdamOptimizer(Optimizer):
1✔
33
    def __init__(
1✔
34
        self,
35
        x: ep.Tensor,
36
        stepsize: float,
37
        beta1: float = 0.9,
38
        beta2: float = 0.999,
39
        epsilon: float = 1e-8,
40
    ):
41

42
        self.stepsize = stepsize
×
43
        self.beta1 = beta1
×
44
        self.beta2 = beta2
×
45
        self.epsilon = epsilon
×
46

47
        self.m = ep.zeros_like(x)
×
48
        self.v = ep.zeros_like(x)
×
49
        self.t = 0
×
50

51
    def __call__(self, gradient: ep.Tensor) -> ep.Tensor:
1✔
52
        self.t += 1
×
53

54
        self.m = self.beta1 * self.m + (1 - self.beta1) * gradient
×
55
        self.v = self.beta2 * self.v + (1 - self.beta2) * gradient**2
×
56

57
        bias_correction_1 = 1 - self.beta1**self.t
×
58
        bias_correction_2 = 1 - self.beta2**self.t
×
59

60
        m_hat = self.m / bias_correction_1
×
61
        v_hat = self.v / bias_correction_2
×
62

63
        return self.stepsize * m_hat / (ep.sqrt(v_hat) + self.epsilon)
×
64

65

66
class GDOptimizer(Optimizer):
1✔
67
    def __init__(self, x: ep.Tensor, stepsize: float):
1✔
68
        self.stepsize = stepsize
×
69

70
    def __call__(
1✔
71
        self,
72
        gradient: ep.Tensor,
73
    ) -> ep.Tensor:
74
        return self.stepsize * gradient
×
75

76

77
class BaseGradientDescent(FixedEpsilonAttack, ABC):
1✔
78
    def __init__(
1✔
79
        self,
80
        *,
81
        rel_stepsize: float,
82
        abs_stepsize: Optional[float] = None,
83
        steps: int,
84
        random_start: bool,
85
    ):
86
        self.rel_stepsize = rel_stepsize
1✔
87
        self.abs_stepsize = abs_stepsize
1✔
88
        self.steps = steps
1✔
89
        self.random_start = random_start
1✔
90

91
    def get_loss_fn(
1✔
92
        self, model: Model, labels: ep.Tensor
93
    ) -> Callable[[ep.Tensor], ep.Tensor]:
94
        # can be overridden by users
95
        def loss_fn(inputs: ep.Tensor) -> ep.Tensor:
×
96
            logits = model(inputs)
×
97
            return ep.crossentropy(logits, labels).sum()
×
98

99
        return loss_fn
×
100

101
    def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer:
1✔
102
        # can be overridden by users
103
        return GDOptimizer(x, stepsize)
×
104

105
    def value_and_grad(
1✔
106
        # can be overridden by users
107
        self,
108
        loss_fn: Callable[[ep.Tensor], ep.Tensor],
109
        x: ep.Tensor,
110
    ) -> Tuple[ep.Tensor, ep.Tensor]:
111
        return ep.value_and_grad(loss_fn, x)
×
112

113
    def run(
1✔
114
        self,
115
        model: Model,
116
        inputs: T,
117
        criterion: Union[Misclassification, TargetedMisclassification, T],
118
        *,
119
        epsilon: float,
120
        **kwargs: Any,
121
    ) -> T:
122
        raise_if_kwargs(kwargs)
×
123
        x0, restore_type = ep.astensor_(inputs)
×
124
        criterion_ = get_criterion(criterion)
×
125
        del inputs, criterion, kwargs
×
126

127
        verify_input_bounds(x0, model)
×
128

129
        # perform a gradient ascent (targeted attack) or descent (untargeted attack)
130
        if isinstance(criterion_, Misclassification):
×
131
            gradient_step_sign = 1.0
×
132
            classes = criterion_.labels
×
133
        elif hasattr(criterion_, "target_classes"):
×
134
            gradient_step_sign = -1.0
×
135
            classes = criterion_.target_classes
×
136
        else:
137
            raise ValueError("unsupported criterion")
×
138

139
        loss_fn = self.get_loss_fn(model, classes)
×
140

141
        if self.abs_stepsize is None:
×
142
            stepsize = self.rel_stepsize * epsilon
×
143
        else:
144
            stepsize = self.abs_stepsize
×
145

146
        optimizer = self.get_optimizer(x0, stepsize)
×
147

148
        if self.random_start:
×
149
            x = self.get_random_start(x0, epsilon)
×
150
            x = ep.clip(x, *model.bounds)
×
151
        else:
152
            x = x0
×
153

154
        for _ in range(self.steps):
×
155
            _, gradients = self.value_and_grad(loss_fn, x)
×
156
            gradients = self.normalize(gradients, x=x, bounds=model.bounds)
×
157
            x = x + gradient_step_sign * optimizer(gradients)
×
158
            x = self.project(x, x0, epsilon)
×
159
            x = ep.clip(x, *model.bounds)
×
160

161
        return restore_type(x)
×
162

163
    @abstractmethod
164
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
165
        ...
166

167
    @abstractmethod
168
    def normalize(
169
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
170
    ) -> ep.Tensor:
171
        ...
172

173
    @abstractmethod
174
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
175
        ...
176

177

178
def clip_lp_norms(x: ep.Tensor, *, norm: float, p: float) -> ep.Tensor:
1✔
179
    assert 0 < p < ep.inf
×
180
    norms = flatten(x).norms.lp(p=p, axis=-1)
×
181
    norms = ep.maximum(norms, 1e-12)  # avoid divsion by zero
×
182
    factor = ep.minimum(1, norm / norms)  # clipping -> decreasing but not increasing
×
183
    factor = atleast_kd(factor, x.ndim)
×
184
    return x * factor
×
185

186

187
def normalize_lp_norms(x: ep.Tensor, *, p: float) -> ep.Tensor:
1✔
188
    assert 0 < p < ep.inf
×
189
    norms = flatten(x).norms.lp(p=p, axis=-1)
×
190
    norms = ep.maximum(norms, 1e-12)  # avoid divsion by zero
×
191
    factor = 1 / norms
×
192
    factor = atleast_kd(factor, x.ndim)
×
193
    return x * factor
×
194

195

196
def uniform_l1_n_balls(dummy: ep.Tensor, batch_size: int, n: int) -> ep.Tensor:
1✔
197
    # https://mathoverflow.net/a/9188
198
    u = ep.uniform(dummy, (batch_size, n))
×
199
    v = u.sort(axis=-1)
×
200
    vp = ep.concatenate([ep.zeros(v, (batch_size, 1)), v[:, : n - 1]], axis=-1)
×
201
    assert v.shape == vp.shape
×
202
    x = v - vp
×
203
    sign = ep.uniform(dummy, (batch_size, n), low=-1.0, high=1.0).sign()
×
204
    return sign * x
×
205

206

207
def uniform_l2_n_spheres(dummy: ep.Tensor, batch_size: int, n: int) -> ep.Tensor:
1✔
208
    x = ep.normal(dummy, (batch_size, n + 1))
×
209
    r = x.norms.l2(axis=-1, keepdims=True)
×
210
    s = x / r
×
211
    return s
×
212

213

214
def uniform_l2_n_balls(dummy: ep.Tensor, batch_size: int, n: int) -> ep.Tensor:
1✔
215
    """Sampling from the n-ball
216

217
    Implementation of the algorithm proposed by Voelker et al. [#Voel17]_
218

219
    References:
220
        .. [#Voel17] Voelker et al., 2017, Efficiently sampling vectors and coordinates
221
            from the n-sphere and n-ball
222
            http://compneuro.uwaterloo.ca/files/publications/voelker.2017.pdf
223
    """
224
    s = uniform_l2_n_spheres(dummy, batch_size, n + 1)
×
225
    b = s[:, :n]
×
226
    return b
×
227

228

229
class L1BaseGradientDescent(BaseGradientDescent):
1✔
230
    distance = l1
1✔
231

232
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
1✔
233
        batch_size, n = flatten(x0).shape
×
234
        r = uniform_l1_n_balls(x0, batch_size, n).reshape(x0.shape)
×
235
        return x0 + epsilon * r
×
236

237
    def normalize(
1✔
238
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
239
    ) -> ep.Tensor:
240
        return normalize_lp_norms(gradients, p=1)
×
241

242
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
1✔
243
        return x0 + clip_lp_norms(x - x0, norm=epsilon, p=1)
×
244

245

246
class L2BaseGradientDescent(BaseGradientDescent):
1✔
247
    distance = l2
1✔
248

249
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
1✔
250
        batch_size, n = flatten(x0).shape
×
251
        r = uniform_l2_n_balls(x0, batch_size, n).reshape(x0.shape)
×
252
        return x0 + epsilon * r
×
253

254
    def normalize(
1✔
255
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
256
    ) -> ep.Tensor:
257
        return normalize_lp_norms(gradients, p=2)
×
258

259
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
1✔
260
        return x0 + clip_lp_norms(x - x0, norm=epsilon, p=2)
×
261

262

263
class LinfBaseGradientDescent(BaseGradientDescent):
1✔
264
    distance = linf
1✔
265

266
    def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
1✔
267
        return x0 + ep.uniform(x0, x0.shape, -epsilon, epsilon)
×
268

269
    def normalize(
1✔
270
        self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
271
    ) -> ep.Tensor:
272
        return gradients.sign()
×
273

274
    def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
1✔
275
        return x0 + ep.clip(x - x0, -epsilon, epsilon)
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc