• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tonegas / nnodely / 16502811447

24 Jul 2025 04:44PM UTC coverage: 97.767% (+0.1%) from 97.651%
16502811447

push

github

web-flow
New version 1.5.0

This pull request introduces version 1.5.0 of **nnodely**, featuring several updates:
1. Improved clarity of documentation and examples.
2. Support for managing multi-dataset features is now available.
3. DataFrames can now be used to create datasets.
4. Datasets can now be resampled.
5. Random data training has been fixed for both classic and recurrent training.
6. The `state` variable has been removed.
7. It is now possible to add or remove a connection or a closed loop.
8. Partial models can now be exported.
9. The `train` function and the result analysis have been separated.
10. A new function, `trainAndAnalyse`, is now available.
11. The report now works across all network types.
12. The training function code has been reorganized.

2901 of 2967 new or added lines in 53 files covered. (97.78%)

16 existing lines in 6 files now uncovered.

12652 of 12941 relevant lines covered (97.77%)

0.98 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.74
/nnodely/nnodely.py
1
# Extern packages
2
import random, torch, copy
1✔
3
import numpy as np
1✔
4

5
# Main operators
6
from nnodely.operators.composer import Composer
1✔
7
from nnodely.operators.trainer import Trainer
1✔
8
from nnodely.operators.loader import Loader
1✔
9
from nnodely.operators.validator import Validator
1✔
10
from nnodely.operators.exporter import Exporter
1✔
11

12
# nnodely packages
13
from nnodely.visualizer import EmptyVisualizer, TextVisualizer
1✔
14
from nnodely.exporter import EmptyExporter
1✔
15
from nnodely.basic.relation import NeuObj
1✔
16
from nnodely.support.utils import ReadOnlyDict, ParamDict, enforce_types, check
1✔
17

18
from nnodely.support.logger import logging, nnLogger
1✔
19
log = nnLogger(__name__, logging.INFO)
1✔
20

21

22
@enforce_types
1✔
23
def clearNames(names:str|list|None = None):
1✔
24
    NeuObj.clearNames(names)
1✔
25

26
class Modely(Composer, Trainer, Loader, Validator, Exporter):
1✔
27
    """
28
    Create the main object, the nnodely object, that will be used to create the network, train and export it.
29

30
    Parameters
31
    ----------
32
    visualizer : str, Visualizer, optional
33
        The visualizer to be used. Default is the 'Standard' visualizer.
34
    exporter : str, Exporter, optional
35
        The exporter to be used. Default is the 'Standard' exporter.
36
    seed : int, optional
37
        Set the seed for all the random modules inside the nnodely framework. Default is None.
38
    workspace : str
39
        The path of the workspace where all the exported files will be saved.
40
    log_internal : bool
41
        Whether or not save the logs. Default is False.
42
    save_history : bool
43
        Whether or not save the history. Default is False.
44

45
    Example
46
    -------
47
        >>> model = Modely()
48
    """
49
    @enforce_types
1✔
50
    def __init__(self, *,
1✔
51
                 visualizer:str|EmptyVisualizer|None = 'Standard',
52
                 exporter:str|EmptyExporter|None = 'Standard',
53
                 seed:int|None = None,
54
                 workspace:str|None = None,
55
                 log_internal:bool = False,
56
                 save_history:bool = False):
57

58
        ## Set the random seed for reproducibility
59
        if seed is not None:
1✔
60
            self.resetSeed(seed)
1✔
61

62
        # Visualizer
63
        if visualizer == 'Standard':
1✔
64
            self.visualizer = TextVisualizer(1)
1✔
65
        elif visualizer != None:
1✔
66
            self.visualizer = visualizer
1✔
67
        else:
68
            self.visualizer = EmptyVisualizer()
1✔
69
        self.visualizer.setModely(self)
1✔
70

71
        Composer.__init__(self)
1✔
72
        Loader.__init__(self)
1✔
73
        Trainer.__init__(self)
1✔
74
        Validator.__init__(self)
1✔
75
        Exporter.__init__(self, exporter, workspace, save_history=save_history)
1✔
76

77
        self._set_log_internal(log_internal)
1✔
78
        self._clean_log_internal()
1✔
79

80
    @property
1✔
81
    def internals(self):
1✔
82
        return ReadOnlyDict(self._internals)
1✔
83

84
    @property
1✔
85
    def neuralized(self):
1✔
86
        return self._neuralized
1✔
87

88
    @neuralized.setter
1✔
89
    def neuralized(self, value):
1✔
90
        raise AttributeError("Cannot modify read-only property 'neuralized' use neuralizeModel() instead.")
1✔
91

92
    @property
1✔
93
    def traced(self):
1✔
UNCOV
94
        return self._traced
×
95

96
    @traced.setter
1✔
97
    def traced(self, value):
1✔
98
        raise AttributeError("Cannot modify read-only property 'traced'.")
1✔
99

100
    @property
1✔
101
    def parameters(self):
1✔
102
        if self._neuralized:
1✔
103
            return ParamDict(self._model_def['Parameters'], self._model.all_parameters)
1✔
104
        else:
NEW
105
            return ParamDict(self._model_def['Parameters'])
×
106

107
    @property
1✔
108
    def constants(self):
1✔
109
        return ReadOnlyDict({key:value.detach().numpy().tolist() for key,value in self._model.all_constants})
×
110

111
    @property
1✔
112
    def states(self):
1✔
113
        return {key:value.detach().numpy().tolist() for key,value in self._states.items()}
1✔
114

115
    @property
1✔
116
    def json(self):
1✔
117
        return copy.deepcopy(self._model_def._ModelDef__json)
1✔
118

119
    @enforce_types
1✔
120
    def resetSeed(self, seed:int) -> None:
1✔
121
        """
122
        Resets the random seed for reproducibility.
123

124
        This method sets the seed for various random number generators used in the project to ensure reproducibility of results.
125

126
        :param seed: The seed value to be used for the random number generators.
127
        :type seed: int
128

129
        Example:
130
            >>> model = nnodely()
131
            >>> model.resetSeed(42)
132
        """
133
        torch.manual_seed(seed)  ## set the pytorch seed
1✔
134
        torch.cuda.manual_seed_all(seed)
1✔
135
        random.seed(seed)  ## set the random module seed
1✔
136
        np.random.seed(seed)  ## set the numpy seed
1✔
137

138
    def trainAndAnalyze(self, *, test_dataset: str | list | dict | None = None, test_batch_size: int = 128, **kwargs):
1✔
139
        """
140
        Trains the model using the provided datasets and parameters. After training, it analyzes the results on the training, validation, and test datasets.
141

142
        Parameters
143
        ----------
144
        test_dataset : str or None, optional
145
            The name of the datasets used for test. Default is None.
146
        test_batch_size : int, optional
147
            The batch size for testing. Default is 1.
148
        models : list or None, optional
149
            A list of models to train. Default is None.
150
        train_dataset : str or None, optional
151
            The name of datasets to use for training. Default is None.
152
        validation_dataset : str or None, optional
153
            The name of datasets to use for validation. Default is None.
154
        dataset : str or None, optional
155
            The name of the datasets to use for training, validation and test.
156
        splits : list or None, optional
157
            A list of 3 elements specifying the percentage of splits for training, validation, and testing. The three elements must sum up to 100!
158
            The parameter splits is only used when dataset is not None
159
        closed_loop : dict or None, optional
160
            A dictionary specifying closed loop connections. The keys are input names and the values are output names. Default is None.
161
        connect : dict or None, optional
162
            A dictionary specifying connections. The keys are input names and the values are output names. Default is None.
163
        step : int or None, optional
164
            The step size for training. A big value will result in less data used for each epochs and a faster train. Default is None.
165
        prediction_samples : int or None, optional
166
            The size of the prediction horizon. Number of samples at each recurrent window Default is None.
167
        shuffle_data : bool or None, optional
168
            Whether to shuffle the data during training. Default is None.
169
        early_stopping : Callable or None, optional
170
            A callable for early stopping. Default is None.
171
        early_stopping_params : dict or None, optional
172
            A dictionary of parameters for early stopping. Default is None.
173
        select_model : Callable or None, optional
174
            A callable for selecting the best model. Default is None.
175
        select_model_params : dict or None, optional
176
            A dictionary of parameters for selecting the best model. Default is None.
177
        minimize_gain : dict or None, optional
178
            A dictionary specifying the gain for each minimization loss function. Default is None.
179
        num_of_epochs : int or None, optional
180
            The number of epochs to train the model. Default is None.
181
        train_batch_size : int or None, optional
182
            The batch size for training. Default is None.
183
        val_batch_size : int or None, optional
184
            The batch size for validation. Default is None.
185
        optimizer : Optimizer or None, optional
186
            The optimizer to use for training. Default is None.
187
        lr : float or None, optional
188
            The learning rate. Default is None.
189
        lr_param : dict or None, optional
190
            A dictionary of learning rate parameters. Default is None.
191
        optimizer_params : list or dict or None, optional
192
            A dictionary of optimizer parameters. Default is None.
193
        optimizer_defaults : dict or None, optional
194
            A dictionary of default optimizer settings. Default is None.
195
        training_params : dict or None, optional
196
            A dictionary of training parameters. Default is None.
197
        add_optimizer_params : list or None, optional
198
            Additional optimizer parameters. Default is None.
199
        add_optimizer_defaults : dict or None, optional
200
            Additional default optimizer settings. Default is None.
201

202
        Raises
203
        ------
204
        RuntimeError
205
            If no data is loaded or if there are no modules with learnable parameters.
206
        KeyError
207
            If the sample horizon is not positive.
208
        ValueError
209
            If an input or output variable is not in the model definition.
210

211
        Examples
212
        --------
213
        .. image:: https://colab.research.google.com/assets/colab-badge.svg
214
            :target: https://colab.research.google.com/github/tonegas/nnodely/blob/main/examples/training.ipynb
215
            :alt: Open in Colab
216

217
        Example - basic feed-forward training:
218
            >>> x = Input('x')
219
            >>> F = Input('F')
220

221
            >>> xk1 = Output('x[k+1]', Fir()(x.tw(0.2))+Fir()(F.last()))
222

223
            >>> mass_spring_damper = Modely(seed=0)
224
            >>> mass_spring_damper.addModel('xk1',xk1)
225
            >>> mass_spring_damper.neuralizeModel(sample_time = 0.05)
226

227
            >>> data_struct = ['time','x','dx','F']
228
            >>> data_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),'dataset','data')
229
            >>> mass_spring_damper.loadData(name='mass_spring_dataset', source=data_folder, format=data_struct, delimiter=';')
230

231
            >>> params = {'num_of_epochs': 100,'train_batch_size': 128,'lr':0.001}
232
            >>> mass_spring_damper.trainModel(splits=[70,20,10], training_params = params)
233

234
        Example - recurrent training:
235
            >>> x = Input('x')
236
            >>> F = Input('F')
237

238
            >>> xk1 = Output('x[k+1]', Fir()(x.tw(0.2))+Fir()(F.last()))
239

240
            >>> mass_spring_damper = Modely(seed=0)
241
            >>> mass_spring_damper.addModel('xk1',xk1)
242
            >>> mass_spring_damper.addClosedLoop(xk1, x)
243
            >>> mass_spring_damper.neuralizeModel(sample_time = 0.05)
244

245
            >>> data_struct = ['time','x','dx','F']
246
            >>> data_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),'dataset','data')
247
            >>> mass_spring_damper.loadData(name='mass_spring_dataset', source=data_folder, format=data_struct, delimiter=';')
248

249
            >>> params = {'num_of_epochs': 100,'train_batch_size': 128,'lr':0.001}
250
            >>> mass_spring_damper.trainModel(splits=[70,20,10], prediction_samples=10, training_params = params)
251
        """
252
        ## Train the model
253
        self.trainModel(**kwargs)
1✔
254
        params = self.running_parameters
1✔
255

256
        minimize_gain = params['minimize_gain']
1✔
257
        closed_loop, connect, prediction_samples = params['closed_loop'], params['connect'], params['prediction_samples']
1✔
258

259
        if kwargs.get('train_dataset', None) is None:
1✔
260
            check(test_dataset is None, ValueError, 'If train_dataset is None, test_dataset must also be None.')
1✔
261
        else:
262
            params['test_tag'] = self._get_tag(test_dataset)
1✔
263
            params['XY_test'] = self._get_data(test_dataset)
1✔
264
            params['n_samples_test'] = next(iter(params['XY_test'].values())).size(0) if params['XY_test'] else 0
1✔
265
            params['test_indexes'] = self._get_batch_indexes(test_dataset, params['n_samples_test'], prediction_samples)
1✔
266

267
        ## Training set Results
268
        self.resultAnalysis(params['XY_train'], name = params['train_tag'], minimize_gain = minimize_gain,
1✔
269
                            closed_loop = closed_loop, connect = connect, prediction_samples = prediction_samples, step = params['train_step'], batch_size = params['train_batch_size'])
270
        
271
        ## Validation set Results
272
        if params['n_samples_val'] > 0:
1✔
273
            self.resultAnalysis(params['XY_val'], name = params['val_tag'], minimize_gain = minimize_gain,
1✔
274
                            closed_loop = closed_loop, connect = connect, prediction_samples = prediction_samples, step = params['val_step'], batch_size = params['val_batch_size'])
275
        else:
276
            log.warning("Validation dataset is empty. Skipping validation results analysis.")
1✔
277

278
        ## Test set Results
279
        if params['n_samples_test'] > 0:
1✔
280
            params['test_batch_size'] = self._clip_batch_size(len(params['test_indexes']), test_batch_size)
1✔
281
            params['test_step'] = self._clip_step(params['step'], params['test_indexes'], params['test_batch_size'])
1✔
282
            self.resultAnalysis(params['XY_test'], name = params['test_tag'], minimize_gain = minimize_gain,
1✔
283
                            closed_loop = closed_loop, connect = connect, prediction_samples = prediction_samples, step = params['test_step'], batch_size = test_batch_size)
284
        else:
285
            log.warning("Test dataset is empty. Skipping test results analysis.")
1✔
286

287
        ## Show the results
288
        self.visualizer.showResults()
1✔
289
        return self.get_training_info()
1✔
290

291
nnodely = Modely
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc