• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tonegas / nnodely / 16502811447

24 Jul 2025 04:44PM UTC coverage: 97.767% (+0.1%) from 97.651%
16502811447

push

github

web-flow
New version 1.5.0

This pull request introduces version 1.5.0 of **nnodely**, featuring several updates:
1. Improved clarity of documentation and examples.
2. Support for managing multi-dataset features is now available.
3. DataFrames can now be used to create datasets.
4. Datasets can now be resampled.
5. Random data training has been fixed for both classic and recurrent training.
6. The `state` variable has been removed.
7. It is now possible to add or remove a connection or a closed loop.
8. Partial models can now be exported.
9. The `train` function and the result analysis have been separated.
10. A new function, `trainAndAnalyse`, is now available.
11. The report now works across all network types.
12. The training function code has been reorganized.

2901 of 2967 new or added lines in 53 files covered. (97.78%)

16 existing lines in 6 files now uncovered.

12652 of 12941 relevant lines covered (97.77%)

0.98 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

87.84
/nnodely/basic/optimizer.py
1
import copy
1✔
2
import torch
1✔
3

4
from nnodely.support.utils import check
1✔
5

6
class Optimizer:
1✔
7
    """
8
    Represents an optimizer for training neural network models.
9

10
    Parameters
11
    ----------
12
    name : str
13
        The name of the optimizer.
14
    optimizer_defaults : dict, optional
15
        A dictionary of default optimizer settings.
16
    optimizer_params : list, optional
17
        A list of parameter groups for the optimizer.
18

19
    Attributes
20
    ----------
21
    name : str
22
        The name of the optimizer.
23
    optimizer_defaults : dict
24
        A dictionary of default optimizer settings.
25
    optimizer_params : list
26
        A list of parameter groups for the optimizer.
27
    all_params : dict or None
28
        A dictionary of all parameters in the model.
29
    params_to_train : list or None
30
        A list of parameters to be trained.
31
    """
32
    def __init__(self, name, optimizer_defaults = {}, optimizer_params = []):
1✔
33
        """
34
        Initializes the Optimizer object.
35

36
        Parameters
37
        ----------
38
        name : str
39
            The name of the optimizer.
40
        optimizer_defaults : dict, optional
41
            A dictionary of default optimizer settings. Default is an empty dictionary.
42
        optimizer_params : list, optional
43
            A list of parameter groups for the optimizer. Default is an empty list.
44
        """
45
        self.name = name
1✔
46
        self.optimizer_defaults = copy.deepcopy(optimizer_defaults)
1✔
47
        self.optimizer_params = self.unfold(copy.deepcopy(optimizer_params))
1✔
48
        self.all_params = None
1✔
49
        self.params_to_train = None
1✔
50

51
    def set_params_to_train(self, all_params, params_to_train):
1✔
52
        """
53
        Sets the parameters to be trained by the optimizer.
54

55
        Parameters
56
        ----------
57
        all_params : dict
58
            A dictionary of all parameters in the model.
59
        params_to_train : list
60
            A list of parameters to be trained.
61
        """
62
        self.all_params = all_params
1✔
63
        self.params_to_train = params_to_train
1✔
64
        if self.optimizer_params == []:
1✔
65
            for param_name in self.all_params.keys():
1✔
66
                if param_name in self.params_to_train:
1✔
67
                    self.optimizer_params.append({'params': param_name})
1✔
68
                else:
69
                    self.optimizer_params.append({'params': param_name, 'lr': 0.0})
1✔
70

71
    def set_defaults(self, optimizer_defaults):
1✔
72
        """
73
        Sets the default optimizer settings.
74

75
        Parameters
76
        ----------
77
        optimizer_defaults : dict
78
            A dictionary of default optimizer settings.
79
        """
80
        self.optimizer_defaults = copy.deepcopy(optimizer_defaults)
1✔
81

82
    def set_params(self, optimizer_params):
1✔
83
        """
84
        Sets the parameter groups for the optimizer.
85

86
        Parameters
87
        ----------
88
        optimizer_params : list
89
            A list of parameter groups for the optimizer.
90
        """
91
        self.optimizer_params = self.unfold(optimizer_params)
1✔
92

93
    def unfold(self, params):
1✔
94
        """
95
        Unfolds the parameter groups into a flat list.
96

97
        Parameters
98
        ----------
99
        params : list
100
            A list of parameter groups.
101

102
        Returns
103
        -------
104
        list
105
            A flat list of parameter groups.
106

107
        Raises
108
        ------
109
        KeyError
110
            If the params argument is not a list.
111
        """
112
        optimizer_params = []
1✔
113
        check(type(params) is list, KeyError, f'The params {params} must be a list')
1✔
114
        for param in params:
1✔
115
            if type(param['params']) is list:
1✔
116
                par_copy = copy.deepcopy(param)
1✔
117
                del par_copy['params']
1✔
118
                for par in param['params']:
1✔
119
                    optimizer_params.append({'params':par}|par_copy)
1✔
120
            else:
121
                optimizer_params.append(param)
1✔
122
        return optimizer_params
1✔
123

124
    def add_defaults(self, option_name, params, overwrite = True):
1✔
125
        """
126
        Adds default settings to the optimizer.
127

128
        Parameters
129
        ----------
130
        option_name : str
131
            The name of the option to add.
132
        params : any
133
            The parameters for the option.
134
        overwrite : bool, optional
135
            Whether to overwrite existing settings. Default is True.
136
        """
137
        if params is not None:
1✔
138
            if overwrite:
1✔
139
                self.optimizer_defaults[option_name] = params
1✔
140
            elif option_name not in self.optimizer_defaults:
×
141
                self.optimizer_defaults[option_name] = params
×
142

143
    def add_option_to_params(self, option_name, params, overwrite = True):
1✔
144
        if params is None:
1✔
UNCOV
145
            return
×
146
        for key, value in params.items():
1✔
147
            check(self.all_params is not None, RuntimeError, "Call set_params before add_option_to_params")
1✔
148
            old_key = False
1✔
149
            for param in self.optimizer_params:
1✔
150
                if param['params'] == key:
1✔
151
                    old_key = True
1✔
152
                    if overwrite:
1✔
153
                        param[option_name] = value
1✔
154
                    elif option_name not in param:
×
155
                        param[option_name] = value
×
156
            if old_key == False:
1✔
157
                self.optimizer_params.append({'params': key, option_name: value})
×
158

159
    def replace_key_with_params(self):
1✔
160
        params = copy.deepcopy(self.optimizer_params)
1✔
161
        for param in params:
1✔
162
            if type(param['params']) is list:
1✔
163
                for ind, par in enumerate(param['params']):
×
164
                    param['params'][ind] = self.all_params[par]
×
165
            else:
166
                param['params'] = self.all_params[param['params']]
1✔
167
        return params
1✔
168

169
    def get_torch_optimizer(self):
1✔
170
        raise NotImplemented('The function get_torch_optimizer must be implemented.')
×
171

172
class SGD(Optimizer):
1✔
173
    """
174
    Stochastic Gradient Descent (SGD) optimizer.
175

176
    See also:
177
            Official PyTorch SGD documentation: 
178
            `torch.optim.SGD <https://pytorch.org/docs/stable/generated/torch.optim.SGD.html>`_
179

180
    Parameters
181
    ----------
182
    name : str
183
        The name of the optimizer.
184
    optimizer_defaults : dict
185
        A dictionary of default optimizer settings.
186
    optimizer_params : list
187
        A list of parameter groups for the optimizer.
188

189
    Attributes
190
    ----------
191
    name : str
192
        The name of the optimizer.
193
    lr : float, optional
194
        Learning rate. Default is 0.01.
195
    momentum : float, optional
196
        Momentum factor. Default is 0.0.
197
    dampening : float, optional
198
        Dampening for momentum. Default is 0.0.
199
    weight_decay : float, optional
200
        Weight decay (L2 penalty). Default is 0.0.
201
    nesterov : bool, optional
202
        Enables Nesterov momentum. Default is False.
203
    """
204
    def __init__(self, optimizer_defaults = {}, optimizer_params = []):
1✔
205
        super(SGD, self).__init__('SGD', optimizer_defaults, optimizer_params)
1✔
206

207
    def get_torch_optimizer(self):
1✔
208
        return torch.optim.SGD(self.replace_key_with_params(), **self.optimizer_defaults)
1✔
209

210
class Adam(Optimizer):
1✔
211
    """
212
    Stochastic Gradient Descent (SGD) optimizer.
213

214
    See also:
215
            Official PyTorch Adam documentation: 
216
            `torch.optim.Adam <https://pytorch.org/docs/stable/generated/torch.optim.SGD.html>`_
217

218
    Parameters
219
    ----------
220
    name : str
221
        The name of the optimizer.
222
    optimizer_defaults : dict
223
        A dictionary of default optimizer settings.
224
    optimizer_params : list
225
        A list of parameter groups for the optimizer.
226

227
    Attributes
228
    ----------
229
    name : str
230
        The name of the optimizer.
231
    lr : float, optional
232
        Learning rate. Default is 0.001.
233
    betas : tuple of (float, float), optional
234
        Coefficients used for computing running averages of gradient and its square. Default is (0.9, 0.999).
235
    eps : float, optional
236
        Term added to the denominator to improve numerical stability. Default is 1e-8.
237
    weight_decay : float, optional
238
        Weight decay (L2 penalty). Default is 0.0.
239
    amsgrad : bool, optional
240
        Whether to use the AMSGrad variant of this algorithm. Default is False.
241
    """
242
    def __init__(self, optimizer_defaults = {}, optimizer_params = []):
1✔
243
        super(Adam, self).__init__('Adam', optimizer_defaults, optimizer_params)
1✔
244

245
    def get_torch_optimizer(self):
1✔
246
        return torch.optim.Adam(self.replace_key_with_params(), **self.optimizer_defaults)
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc