• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bethgelab / foolbox / 8137716344

22 Jan 2024 10:53PM UTC coverage: 98.47%. Remained the same
8137716344

push

github

web-flow
Bump pillow from 10.1.0 to 10.2.0 in /tests (#718)

Bumps [pillow](https://github.com/python-pillow/Pillow) from 10.1.0 to 10.2.0.
- [Release notes](https://github.com/python-pillow/Pillow/releases)
- [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst)
- [Commits](https://github.com/python-pillow/Pillow/compare/10.1.0...10.2.0)

---
updated-dependencies:
- dependency-name: pillow
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

3475 of 3529 relevant lines covered (98.47%)

7.22 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

100.0
/foolbox/attacks/projected_gradient_descent.py
1
from typing import Optional
10✔
2

3
from .gradient_descent_base import L1BaseGradientDescent, AdamOptimizer, Optimizer
10✔
4
from .gradient_descent_base import L2BaseGradientDescent
10✔
5
from .gradient_descent_base import LinfBaseGradientDescent
10✔
6
import eagerpy as ep
10✔
7

8

9
class L1ProjectedGradientDescentAttack(L1BaseGradientDescent):
10✔
10
    """L1 Projected Gradient Descent
11

12
    Args:
13
        rel_stepsize: Stepsize relative to epsilon
14
        abs_stepsize: If given, it takes precedence over rel_stepsize.
15
        steps : Number of update steps to perform.
16
        random_start : Whether the perturbation is initialized randomly or starts at zero.
17
    """
18

19
    def __init__(
10✔
20
        self,
21
        *,
22
        rel_stepsize: float = 0.025,
23
        abs_stepsize: Optional[float] = None,
24
        steps: int = 50,
25
        random_start: bool = True,
26
    ):
27
        super().__init__(
10✔
28
            rel_stepsize=rel_stepsize,
29
            abs_stepsize=abs_stepsize,
30
            steps=steps,
31
            random_start=random_start,
32
        )
33

34

35
class L2ProjectedGradientDescentAttack(L2BaseGradientDescent):
10✔
36
    """L2 Projected Gradient Descent
37

38
    Args:
39
        rel_stepsize: Stepsize relative to epsilon
40
        abs_stepsize: If given, it takes precedence over rel_stepsize.
41
        steps : Number of update steps to perform.
42
        random_start : Whether the perturbation is initialized randomly or starts at zero.
43
    """
44

45
    def __init__(
10✔
46
        self,
47
        *,
48
        rel_stepsize: float = 0.025,
49
        abs_stepsize: Optional[float] = None,
50
        steps: int = 50,
51
        random_start: bool = True,
52
    ):
53
        super().__init__(
10✔
54
            rel_stepsize=rel_stepsize,
55
            abs_stepsize=abs_stepsize,
56
            steps=steps,
57
            random_start=random_start,
58
        )
59

60

61
class LinfProjectedGradientDescentAttack(LinfBaseGradientDescent):
10✔
62
    """Linf Projected Gradient Descent
63

64
    Args:
65
        rel_stepsize: Stepsize relative to epsilon (defaults to 0.01 / 0.3).
66
        abs_stepsize: If given, it takes precedence over rel_stepsize.
67
        steps : Number of update steps to perform.
68
        random_start : Whether the perturbation is initialized randomly or starts at zero.
69
    """
70

71
    def __init__(
10✔
72
        self,
73
        *,
74
        rel_stepsize: float = 0.01 / 0.3,
75
        abs_stepsize: Optional[float] = None,
76
        steps: int = 40,
77
        random_start: bool = True,
78
    ):
79
        super().__init__(
10✔
80
            rel_stepsize=rel_stepsize,
81
            abs_stepsize=abs_stepsize,
82
            steps=steps,
83
            random_start=random_start,
84
        )
85

86

87
class L1AdamProjectedGradientDescentAttack(L1ProjectedGradientDescentAttack):
10✔
88
    """L1 Projected Gradient Descent with Adam optimizer
89

90
    Args:
91
        rel_stepsize: Stepsize relative to epsilon
92
        abs_stepsize: If given, it takes precedence over rel_stepsize.
93
        steps : Number of update steps to perform.
94
        random_start : Whether the perturbation is initialized randomly or starts at zero.
95
        adam_beta1 : beta_1 parameter of Adam optimizer
96
        adam_beta2 : beta_2 parameter of Adam optimizer
97
        adam_epsilon : epsilon parameter of Adam optimizer responsible for numerical stability
98
    """
99

100
    def __init__(
10✔
101
        self,
102
        *,
103
        rel_stepsize: float = 0.025,
104
        abs_stepsize: Optional[float] = None,
105
        steps: int = 50,
106
        random_start: bool = True,
107
        adam_beta1: float = 0.9,
108
        adam_beta2: float = 0.999,
109
        adam_epsilon: float = 1e-8,
110
    ):
111
        super().__init__(
10✔
112
            rel_stepsize=rel_stepsize,
113
            abs_stepsize=abs_stepsize,
114
            steps=steps,
115
            random_start=random_start,
116
        )
117

118
        self.adam_beta1 = adam_beta1
10✔
119
        self.adam_beta2 = adam_beta2
10✔
120
        self.adam_epsilon = adam_epsilon
10✔
121

122
    def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer:
10✔
123
        return AdamOptimizer(
6✔
124
            x,
125
            stepsize,
126
            self.adam_beta1,
127
            self.adam_beta2,
128
            self.adam_epsilon,
129
        )
130

131

132
class L2AdamProjectedGradientDescentAttack(L2ProjectedGradientDescentAttack):
10✔
133
    """L2 Projected Gradient Descent with Adam optimizer
134

135
    Args:
136
        rel_stepsize: Stepsize relative to epsilon
137
        abs_stepsize: If given, it takes precedence over rel_stepsize.
138
        steps : Number of update steps to perform.
139
        random_start : Whether the perturbation is initialized randomly or starts at zero.
140
        adam_beta1 : beta_1 parameter of Adam optimizer
141
        adam_beta2 : beta_2 parameter of Adam optimizer
142
        adam_epsilon : epsilon parameter of Adam optimizer responsible for numerical stability
143
    """
144

145
    def __init__(
10✔
146
        self,
147
        *,
148
        rel_stepsize: float = 0.025,
149
        abs_stepsize: Optional[float] = None,
150
        steps: int = 50,
151
        random_start: bool = True,
152
        adam_beta1: float = 0.9,
153
        adam_beta2: float = 0.999,
154
        adam_epsilon: float = 1e-8,
155
    ):
156
        super().__init__(
10✔
157
            rel_stepsize=rel_stepsize,
158
            abs_stepsize=abs_stepsize,
159
            steps=steps,
160
            random_start=random_start,
161
        )
162

163
        self.adam_beta1 = adam_beta1
10✔
164
        self.adam_beta2 = adam_beta2
10✔
165
        self.adam_epsilon = adam_epsilon
10✔
166

167
    def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer:
10✔
168
        return AdamOptimizer(
6✔
169
            x,
170
            stepsize,
171
            self.adam_beta1,
172
            self.adam_beta2,
173
            self.adam_epsilon,
174
        )
175

176

177
class LinfAdamProjectedGradientDescentAttack(LinfProjectedGradientDescentAttack):
10✔
178
    """Linf Projected Gradient Descent with Adam optimizer
179

180
    Args:
181
        rel_stepsize: Stepsize relative to epsilon
182
        abs_stepsize: If given, it takes precedence over rel_stepsize.
183
        steps : Number of update steps to perform.
184
        random_start : Whether the perturbation is initialized randomly or starts at zero.
185
        adam_beta1 : beta_1 parameter of Adam optimizer
186
        adam_beta2 : beta_2 parameter of Adam optimizer
187
        adam_epsilon : epsilon parameter of Adam optimizer responsible for numerical stability
188
    """
189

190
    def __init__(
10✔
191
        self,
192
        *,
193
        rel_stepsize: float = 0.025,
194
        abs_stepsize: Optional[float] = None,
195
        steps: int = 50,
196
        random_start: bool = True,
197
        adam_beta1: float = 0.9,
198
        adam_beta2: float = 0.999,
199
        adam_epsilon: float = 1e-8,
200
    ):
201
        super().__init__(
10✔
202
            rel_stepsize=rel_stepsize,
203
            abs_stepsize=abs_stepsize,
204
            steps=steps,
205
            random_start=random_start,
206
        )
207

208
        self.adam_beta1 = adam_beta1
10✔
209
        self.adam_beta2 = adam_beta2
10✔
210
        self.adam_epsilon = adam_epsilon
10✔
211

212
    def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer:
10✔
213
        return AdamOptimizer(
6✔
214
            x,
215
            stepsize,
216
            self.adam_beta1,
217
            self.adam_beta2,
218
            self.adam_epsilon,
219
        )
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2026 Coveralls, Inc