• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tonegas / nnodely / 13520426074

25 Feb 2025 11:43AM UTC coverage: 95.385% (-0.01%) from 95.395%
13520426074

Pull #63

github

web-flow
Merge 6e8d2fa3d into 16b35cf8f
Pull Request #63: 49 general bug fix

185 of 189 new or added lines in 16 files covered. (97.88%)

5 existing lines in 3 files now uncovered.

10521 of 11030 relevant lines covered (95.39%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.63
/nnodely/nnodely.py
1
# Extern packages
2
import random, torch, copy, os
1✔
3
import numpy as np
1✔
4
import pandas as pd
1✔
5

6
# nnodely packages
7
from nnodely.visualizer import TextVisualizer, Visualizer
1✔
8
from nnodely.loss import CustomLoss
1✔
9
from nnodely.model import Model
1✔
10
from nnodely.optimizer import Optimizer, SGD, Adam
1✔
11
from nnodely.exporter import Exporter, StandardExporter
1✔
12
from nnodely.modeldef import ModelDef
1✔
13
from nnodely import relation
1✔
14

15
from nnodely.utils import check, argmax_dict, argmin_dict, tensor_to_list, TORCH_DTYPE, NP_DTYPE
1✔
16

17
from nnodely.logger import logging, nnLogger
1✔
18
log = nnLogger(__name__, logging.INFO)
1✔
19

20

21
class Modely:
1✔
22
    """
23
    Create the main object, the nnodely object, that will be used to create the network, train and export it.
24

25
    Parameters
26
    ----------
27
    visualizer : str, Visualizer, optional
28
        The visualizer to be used. Default is the 'Standard' visualizer.
29
    exporter : str, Exporter, optional
30
        The exporter to be used. Default is the 'Standard' exporter.
31
    seed : int, optional
32
        Set the seed for all the random modules inside the nnodely framework. Default is None.
33
    workspace : str
34
        The path of the workspace where all the exported files will be saved.
35
    log_internal : bool
36
        Whether or not save the logs. Default is False.
37
    save_history : bool
38
        Whether or not save the history. Default is False.
39

40
    Example
41
    -------
42
        >>> model = Modely()
43
    """
44
    def __init__(self,
1✔
45
                 visualizer:str|Visualizer|None = 'Standard',
46
                 exporter:str|Exporter|None = 'Standard',
47
                 seed:int|None = None,
48
                 workspace:str|None = None,
49
                 log_internal:bool = False,
50
                 save_history:bool = False):
51

52
        # Visualizer
53
        if visualizer == 'Standard':
1✔
54
            self.visualizer = TextVisualizer(1)
1✔
55
        elif visualizer != None:
1✔
56
            self.visualizer = visualizer
1✔
57
        else:
58
            self.visualizer = Visualizer()
1✔
59
        self.visualizer.set_n4m(self)
1✔
60

61
        # Exporter
62
        if exporter == 'Standard':
1✔
63
            self.exporter = StandardExporter(workspace, self.visualizer, save_history)
1✔
64
        elif exporter != None:
×
65
            self.exporter = exporter
×
66
        else:
67
            self.exporter = Exporter()
×
68

69
        ## Set the random seed for reproducibility
70
        if seed is not None:
1✔
71
            self.resetSeed(seed)
1✔
72

73
        # Save internal
74
        self.log_internal = log_internal
1✔
75
        if self.log_internal == True:
1✔
76
            self.internals = {}
1✔
77

78
        # Models definition
79
        self.model_def = ModelDef()
1✔
80
        self.input_n_samples = {}
1✔
81
        self.max_n_samples = 0
1✔
82
        self.neuralized = False
1✔
83
        self.traced = False
1✔
84
        self.model = None
1✔
85
        self.states = {}
1✔
86

87
        # Dataaset Parameters
88
        self.data_loaded = False
1✔
89
        self.file_count = 0
1✔
90
        self.num_of_samples = {}
1✔
91
        self.data = {}
1✔
92
        self.n_datasets = 0
1✔
93
        self.datasets_loaded = set()
1✔
94
        self.multifile = {}
1✔
95

96
        # Training Parameters
97
        self.standard_train_parameters = {
1✔
98
            'models' : None,
99
            'train_dataset' : None, 'validation_dataset' : None, 'test_dataset' : None, 'splits' : [70, 20, 10],
100
            'closed_loop' : {}, 'connect' : {}, 'step' : 0, 'prediction_samples' : 0,
101
            'shuffle_data' : True,
102
            'early_stopping' : None, 'early_stopping_params' : {},
103
            'select_model' : 'last', 'select_model_params' : {},
104
            'minimize_gain' : {},
105
            'num_of_epochs': 100,
106
            'train_batch_size' : 128, 'val_batch_size' : None, 'test_batch_size' : None,
107
            'optimizer' : 'Adam',
108
            'lr' : 0.001, 'lr_param' : {},
109
            'optimizer_params' : [], 'add_optimizer_params' : [],
110
            'optimizer_defaults' : {}, 'add_optimizer_defaults' : {}
111
        }
112

113
        # Optimizer
114
        self.optimizer = None
1✔
115

116
        # Training Losses
117
        self.loss_functions = {}
1✔
118

119
        # Validation Parameters
120
        self.training = {}
1✔
121
        self.performance = {}
1✔
122
        self.prediction = {}
1✔
123

124

125
    def resetSeed(self, seed):
1✔
126
        """
127
        Resets the random seed for reproducibility.
128

129
        This method sets the seed for various random number generators used in the project to ensure reproducibility of results.
130

131
        :param seed: The seed value to be used for the random number generators.
132
        :type seed: int
133

134
        Example:
135
            >>> model = nnodely()
136
            >>> model.resetSeed(42)
137
        """
138
        torch.manual_seed(seed)  ## set the pytorch seed
1✔
139
        torch.cuda.manual_seed_all(seed)
1✔
140
        random.seed(seed)  ## set the random module seed
1✔
141
        np.random.seed(seed)  ## set the numpy seed
1✔
142

143

144
    def __call__(self, inputs={}, sampled=False, closed_loop={}, connect={}, prediction_samples='auto', num_of_samples=None): ##, align_input=False):
1✔
145
        """
146
        Performs inference on the model.
147

148
        Parameters
149
        ----------
150
        inputs : dict, optional
151
            A dictionary of input data. The keys are input names and the values are the corresponding data. Default is an empty dictionary.
152
        sampled : bool, optional
153
            A boolean indicating whether the inputs are already sampled. Default is False.
154
        closed_loop : dict, optional
155
            A dictionary specifying closed loop connections. The keys are input names and the values are output names. Default is an empty dictionary.
156
        connect : dict, optional
157
            A dictionary specifying connections. The keys are input names and the values are output names. Default is an empty dictionary.
158
        prediction_samples : str or int, optional
159
            The number of prediction samples. Can be 'auto', None or an integer. Default is 'auto'.
160
        num_of_samples : str or int, optional
161
            The number of samples. Can be 'auto', None or an integer. Default is 'auto'.
162

163
        Returns
164
        -------
165
        dict
166
            A dictionary containing the model's prediction outputs.
167

168
        Raises
169
        ------
170
        RuntimeError
171
            If the network is not neuralized.
172
        ValueError
173
            If an input variable is not in the model definition or if an output variable is not in the model definition.
174

175
        Example
176
        -------
177
        Example usage:
178
            >>> model = Modely()
179
            >>> x = Input('x')
180
            >>> out = Output('out', Fir(x.last()))
181
            >>> model.addModel('example_model', [out])
182
            >>> model.neuralizeModel()
183
            >>> predictions = model(inputs={'x': [1, 2, 3]})
184
        """
185

186
        ## Copy dict for avoid python bug
187
        inputs = copy.deepcopy(inputs)
1✔
188
        closed_loop = copy.deepcopy(closed_loop)
1✔
189
        connect = copy.deepcopy(connect)
1✔
190

191
        ## Check neuralize
192
        check(self.neuralized, RuntimeError, "The network is not neuralized.")
1✔
193

194
        ## Check closed loop integrity
195
        for close_in, close_out in closed_loop.items():
1✔
196
            check(close_in in self.model_def['Inputs'], ValueError, f'the tag {close_in} is not an input variable.')
1✔
197
            check(close_out in self.model_def['Outputs'], ValueError, f'the tag {close_out} is not an output of the network')
1✔
198

199
        ## List of keys
200
        model_inputs = list(self.model_def['Inputs'].keys())
1✔
201
        model_states = list(self.model_def['States'].keys())
1✔
202
        state_closed_loop = [key for key, value in self.model_def['States'].items() if 'closedLoop' in value.keys()] + list(closed_loop.keys())
1✔
203
        state_connect = [key for key, value in self.model_def['States'].items() if 'connect' in value.keys()] + list(connect.keys())
1✔
204
        extra_inputs = list(set(list(inputs.keys())) - set(model_inputs) - set(model_states))
1✔
205
        non_mandatory_inputs = state_closed_loop + state_connect 
1✔
206
        mandatory_inputs = list(set(model_inputs) - set(non_mandatory_inputs))
1✔
207

208
        ## Remove extra inputs
209
        for key in extra_inputs:
1✔
210
            log.warning(f'The provided input {key} is not used inside the network. the inference will continue without using it')
1✔
211
            del inputs[key]
1✔
212

213
        ## Get the number of data windows for each input/state
214
        num_of_windows = {key: len(value) for key, value in inputs.items()} if sampled else {key: len(value) - self.input_n_samples[key] + 1 for key, value in inputs.items()}
1✔
215

216
        ## Get the maximum inference window
217
        if num_of_samples:
1✔
218
            window_dim = num_of_samples
1✔
219
            for key in inputs.keys():
1✔
220
                input_dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
221
                if input_dim > 1:
1✔
222
                    inputs[key] += [[0 for _ in range(input_dim)] for _ in range(num_of_samples - (len(inputs[key]) - self.input_n_samples[key] + 1))]
1✔
223
                else:
224
                    inputs[key] += [0 for _ in range(num_of_samples - (len(inputs[key]) - self.input_n_samples[key] + 1))]
1✔
225
        elif inputs:
1✔
226
            windows = []
1✔
227
            for key in inputs.keys():
1✔
228
                if key in mandatory_inputs:
1✔
229
                    n_samples = len(inputs[key]) if sampled else len(inputs[key]) - self.model_def['Inputs'][key]['ntot'] + 1
1✔
230
                    windows.append(n_samples)
1✔
231
            if not windows:
1✔
232
                for key in inputs.keys():
1✔
233
                    if key in non_mandatory_inputs:
1✔
234
                        if key in model_inputs:
1✔
235
                            n_samples = len(inputs[key]) if sampled else len(inputs[key]) - self.model_def['Inputs'][key]['ntot'] + 1
1✔
236
                        else:
237
                            n_samples = len(inputs[key]) if sampled else len(inputs[key]) - self.model_def['States'][key]['ntot'] + 1
1✔
238
                        windows.append(n_samples)
1✔
239
            window_dim = min(windows) if windows else 0
1✔
240
        else: ## No inputs
241
            window_dim = 1 if non_mandatory_inputs else 0
1✔
242
        check(window_dim > 0, StopIteration, f'Missing samples in the input window')
1✔
243

244
        if len(set(num_of_windows.values())) > 1:
1✔
245
            max_ind_key, max_dim = argmax_dict(num_of_windows)
1✔
246
            min_ind_key, min_dim = argmin_dict(num_of_windows)
1✔
247
            log.warning(f'Different number of samples between inputs [MAX {num_of_windows[max_ind_key]} = {max_dim}; MIN {num_of_windows[min_ind_key]} = {min_dim}]')
1✔
248

249
        ## Autofill the missing inputs
250
        provided_inputs = list(inputs.keys())
1✔
251
        missing_inputs = list(set(mandatory_inputs) - set(provided_inputs))
1✔
252
        if missing_inputs:
1✔
253
            log.warning(f'Inputs not provided: {missing_inputs}. Autofilling with zeros..')
1✔
254
            for key in missing_inputs:
1✔
255
                inputs[key] = np.zeros(shape=(self.input_n_samples[key] + window_dim - 1, self.model_def['Inputs'][key]['dim']),dtype=NP_DTYPE).tolist()
1✔
256

257
        ## Transform inputs in 3D Tensors
258
        for key, val in inputs.items():
1✔
259
            input_dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
260
            inputs[key] = torch.from_numpy(np.array(inputs[key])).to(TORCH_DTYPE)
1✔
261

262
            if input_dim > 1:
1✔
263
                correct_dim = 3 if sampled else 2
1✔
264
                check(len(inputs[key].shape) == correct_dim, ValueError,f'The input {key} must have {correct_dim} dimensions')
1✔
265
                check(inputs[key].shape[correct_dim - 1] == input_dim, ValueError,f'The second dimension of the input "{key}" must be equal to {input_dim}')
1✔
266

267
            if input_dim == 1 and inputs[key].shape[-1] != 1: ## add the input dimension
1✔
268
                inputs[key] = inputs[key].unsqueeze(-1)
1✔
269
            if inputs[key].ndim <= 1: ## add the batch dimension
1✔
270
                inputs[key] = inputs[key].unsqueeze(0)
1✔
271
            if inputs[key].ndim <= 2: ## add the time dimension
1✔
272
                inputs[key] = inputs[key].unsqueeze(0)
1✔
273

274
        ## initialize the resulting dictionary
275
        result_dict = {}
1✔
276
        for key in self.model_def['Outputs'].keys():
1✔
277
            result_dict[key] = []
1✔
278

279
        ## Inference
280
        with torch.inference_mode():
1✔
281
            self.model.eval()
1✔
282
            ## Update with virtual states
283
            if prediction_samples is not None:
1✔
284
                self.model.update(closed_loop=closed_loop, connect=connect)
1✔
285
            else:
286
                prediction_samples = 0
1✔
287
            X = {}
1✔
288
            count = 0
1✔
289
            first = True
1✔
290
            for idx in range(window_dim):
1✔
291
                ## Get mandatory data inputs
292
                for key in mandatory_inputs:
1✔
293
                    X[key] = inputs[key][idx:idx+1] if sampled else inputs[key][:, idx:idx + self.input_n_samples[key]]
1✔
294
                ## reset states
295
                if count == 0 or prediction_samples=='auto':
1✔
296
                    count = prediction_samples
1✔
297
                    for key in non_mandatory_inputs: ## Get non mandatory data (from inputs, from states, or with zeros)
1✔
298
                        ## if prediction_samples is 'auto' and i have enough samples
299
                        ## if prediction_samples is NOT 'auto' but i have enough extended window (with zeros)
300
                        if (key in inputs.keys() and prediction_samples == 'auto' and idx < num_of_windows[key]) or (key in inputs.keys() and prediction_samples != 'auto' and idx < inputs[key].shape[1]):
1✔
301
                            X[key] = inputs[key][idx:idx+1].clone() if sampled else inputs[key][:, idx:idx + self.input_n_samples[key]].clone()
1✔
302
                        ## if im in the first reset
303
                        ## if i have a state in memory
304
                        ## if i have prediction_samples = 'auto' and not enough samples
305
                        elif (key in self.states.keys() and (first or prediction_samples == 'auto')) and (prediction_samples == 'auto' or prediction_samples == None):
1✔
306
                            X[key] = self.states[key]
1✔
307
                        else: ## if i have no samples and no states
308
                            window_size = self.input_n_samples[key]
1✔
309
                            dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
310
                            X[key] = torch.zeros(size=(1, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
311
                            self.states[key] = X[key]
1✔
312
                    first = False
1✔
313
                else:
314
                    count -= 1
1✔
315
                ## Forward pass
316
                result, _, out_closed_loop, out_connect = self.model(X)
1✔
317

318
                ## Append the prediction of the current sample to the result dictionary
319
                for key in self.model_def['Outputs'].keys():
1✔
320
                    if result[key].shape[-1] == 1:
1✔
321
                        result[key] = result[key].squeeze(-1)
1✔
322
                        if result[key].shape[-1] == 1:
1✔
323
                            result[key] = result[key].squeeze(-1)
1✔
324
                    result_dict[key].append(result[key].detach().squeeze(dim=0).tolist())
1✔
325

326
                ## Update closed_loop and connect
327
                if prediction_samples:
1✔
328
                    for key, val in out_closed_loop.items():
1✔
329
                        shift = val.shape[1]  ## take the output time dimension
1✔
330
                        X[key] = torch.roll(X[key], shifts=-1, dims=1) ## Roll the time window
1✔
331
                        X[key][:, -shift:, :] = val ## substitute with the predicted value
1✔
332
                        self.states[key] = X[key]
1✔
333
                    for key, val in out_connect.items():
1✔
334
                        X[key] = val
1✔
335
                        self.states[key] = X[key]
1✔
336

337
        ## Remove virtual states
338
        for key in (connect.keys() | closed_loop.keys()):
1✔
339
            if key in self.states.keys():
1✔
340
                del self.states[key]
1✔
341
        
342
        return result_dict
1✔
343
    
344
    def clearTags():
1✔
NEW
345
        relation.NeuObj_names = []
×
346

347
    def getSamples(self, dataset, index = None, window=1):
1✔
348
        """
349
        Retrieves a window of samples from a given dataset.
350

351
        Parameters
352
        ----------
353
        dataset : str
354
            The name of the dataset to retrieve samples from.
355
        index : int, optional
356
            The starting index of the samples. If None, a random index is chosen. Default is None.
357
        window : int, optional
358
            The number of consecutive samples to retrieve. Default is 1.
359

360
        Returns
361
        -------
362
        dict
363
            A dictionary containing the retrieved samples. The keys are input and state names, and the values are lists of samples.
364

365
        Raises
366
        ------
367
        ValueError
368
            If the dataset is not loaded.
369

370
        Example
371
        -------
372
        Example usage:
373
            >>> model = Modely()
374
            >>> model.loadData('dataset_name')
375
            >>> samples = model.getSamples('dataset_name', index=10, window=5)
376
        """
377
        if index is None:
1✔
378
            index = random.randint(0, self.num_of_samples[dataset] - window)
1✔
379
        check(self.data_loaded, ValueError, 'The Dataset must first be loaded using <loadData> function!')
1✔
380
        if self.data_loaded:
1✔
381
            result_dict = {}
1✔
382
            for key in (self.model_def['Inputs'].keys() | self.model_def['States'].keys()):
1✔
383
                result_dict[key] = []
1✔
384
            for idx in range(window):
1✔
385
                for key ,samples in self.data[dataset].items():
1✔
386
                    if key in (self.model_def['Inputs'].keys() | self.model_def['States'].keys()):
1✔
387
                        result_dict[key].append(samples[index+idx])
1✔
388
            return result_dict
1✔
389

390
    def addConnect(self, stream_out, state_list_in):
1✔
391
        """
392
        Adds a connection from a relation stream to an input state.
393

394
        Parameters
395
        ----------
396
        stream_out : Stream
397
            The relation stream to connect from.
398
        state_list_in : list of State
399
            The list of input states to connect to.
400

401
        Example
402
        -------
403
        Example usage:
404
            >>> model = Modely()
405
            >>> x = Input('x')
406
            >>> y = State('y')
407
            >>> relation = Fir(x.last())
408
            >>> model.addConnect(relation, y)
409
        """
410
        self.model_def.addConnect(stream_out, state_list_in)
1✔
411

412
    def addClosedLoop(self, stream_out, state_list_in):
1✔
413
        """
414
        Adds a closed loop connection from a relation stream to an input state.
415

416
        Parameters
417
        ----------
418
        stream_out : Stream
419
            The relation stream to connect from.
420
        state_list_in : list of State
421
            The list of input states to connect to.
422

423
        Example
424
        -------
425
        Example usage:
426
            >>> model = Modely()
427
            >>> x = Input('x')
428
            >>> y = State('y')
429
            >>> relation = Fir(x.last())
430
            >>> model.addClosedLoop(relation, y)
431
        """
432
        self.model_def.addClosedLoop(stream_out, state_list_in)
1✔
433

434
    def addModel(self, name, stream_list):
1✔
435
        """
436
        Adds a new model with the given name along with a list of Outputs.
437

438
        Parameters
439
        ----------
440
        name : str
441
            The name of the model.
442
        stream_list : list of Stream
443
            The list of Outputs stream in the model.
444

445
        Example
446
        -------
447
        Example usage:
448
            >>> model = Modely()
449
            >>> x = Input('x')
450
            >>> out = Output('out', Fir(x.last()))
451
            >>> model.addModel('example_model', [out])
452
        """
453
        try:
1✔
454
            self.model_def.addModel(name, stream_list)
1✔
455
        except Exception as e:
1✔
456
            self.model_def.removeModel(name)
1✔
457
            raise e
1✔
458

459
    def removeModel(self, name_list):
1✔
460
        """
461
        Removes models with the given list of names.
462

463
        Parameters
464
        ----------
465
        name_list : list of str
466
            The list of model names to remove.
467

468
        Example
469
        -------
470
        Example usage:
471
            >>> model.removeModel(['sub_model1', 'sub_model2'])
472
        """
473
        self.model_def.removeModel(name_list)
×
474

475
    def addMinimize(self, name, streamA, streamB, loss_function='mse'):
1✔
476
        """
477
        Adds a minimize loss function to the model.
478

479
        Parameters
480
        ----------
481
        name : str
482
            The name of the cost function.
483
        streamA : Stream
484
            The first relation stream for the minimize operation.
485
        streamB : Stream
486
            The second relation stream for the minimize operation.
487
        loss_function : str, optional
488
            The loss function to use from the ones provided. Default is 'mse'.
489

490
        Example
491
        -------
492
        Example usage:
493
            >>> model.addMinimize('minimize_op', streamA, streamB, loss_function='mse')
494
        """
495
        self.model_def.addMinimize(name, streamA, streamB, loss_function)
1✔
496
        self.visualizer.showaddMinimize(name)
1✔
497

498
    def removeMinimize(self, name_list):
1✔
499
        """
500
        Removes minimize loss functions using the given list of names.
501

502
        Parameters
503
        ----------
504
        name_list : list of str
505
            The list of minimize operation names to remove.
506

507
        Example
508
        -------
509
        Example usage:
510
            >>> model.removeMinimize(['minimize_op1', 'minimize_op2'])
511
        """
512
        self.model_def.removeMinimize(name_list)
1✔
513

514
    def resetStates(self, states=[], batch=1):
1✔
515
        if states: ## reset only specific states
1✔
516
            for key in states:
1✔
517
                window_size = self.input_n_samples[key]
1✔
518
                dim = self.model_def['States'][key]['dim']
1✔
519
                self.states[key] = torch.zeros(size=(batch, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
520
        else: ## reset all states
521
            self.states = {}
1✔
522
            for key, state in self.model_def['States'].items():
1✔
523
                window_size = self.input_n_samples[key]
1✔
524
                dim = state['dim']
1✔
525
                self.states[key] = torch.zeros(size=(batch, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
526

527
    def neuralizeModel(self, sample_time = None, clear_model = False, model_def = None):
1✔
528
        """
529
        Neuralizes the model, preparing it for inference and training. This method creates a neural network model starting from the model definition.
530
        It will also create all the time windows for the inputs and states.
531

532
        Parameters
533
        ----------
534
        sample_time : float or None, optional
535
            The sample time for the model. Default is None.
536
        clear_model : bool, optional
537
            Whether to clear the existing model definition. Default is False.
538
        model_def : dict or None, optional
539
            A dictionary defining the model. If provided, it overrides the existing model definition. Default is None.
540

541
        Raises
542
        ------
543
        ValueError
544
            If sample_time is not None and model_def is provided.
545
            If clear_model is True and model_def is provided.
546

547
        Example
548
        -------
549
        Example usage:
550
            >>> model = Modely(name='example_model')
551
            >>> model.neuralizeModel(sample_time=0.1, clear_model=True)
552
        """
553
        if model_def is not None:
1✔
554
            check(sample_time == None, ValueError, 'The sample_time must be None if a model_def is provided')
1✔
555
            check(clear_model == False, ValueError, 'The clear_model must be False if a model_def is provided')
1✔
556
            self.model_def = ModelDef(model_def)
1✔
557
        else:
558
            if clear_model:
1✔
559
                self.model_def.update()
1✔
560
            else:
561
                self.model_def.updateParameters(self.model)
1✔
562

563
        for key, state in self.model_def['States'].items():
1✔
564
            check("connect" in state.keys() or  'closedLoop' in state.keys(), KeyError, f'The connect or closed loop missing for state "{key}"')
1✔
565

566
        self.model_def.setBuildWindow(sample_time)
1✔
567
        self.model = Model(self.model_def.json)
1✔
568

569
        input_ns_backward = {key:value['ns'][0] for key, value in (self.model_def['Inputs']|self.model_def['States']).items()}
1✔
570
        input_ns_forward = {key:value['ns'][1] for key, value in (self.model_def['Inputs']|self.model_def['States']).items()}
1✔
571
        self.input_n_samples = {}
1✔
572
        for key, value in (self.model_def['Inputs'] | self.model_def['States']).items():
1✔
573
            self.input_n_samples[key] = input_ns_backward[key] + input_ns_forward[key]
1✔
574
        self.max_n_samples = max(input_ns_backward.values()) + max(input_ns_forward.values())
1✔
575

576
        ## Initialize States 
577
        self.resetStates()
1✔
578

579
        self.neuralized = True
1✔
580
        self.traced = False
1✔
581
        self.visualizer.showModel(self.model_def.json)
1✔
582
        self.visualizer.showModelInputWindow()
1✔
583
        self.visualizer.showBuiltModel()
1✔
584

585
    def loadData(self, name, source, format=None, skiplines=0, delimiter=',', header=None):
1✔
586
        """
587
        Loads data into the model. The data can be loaded from a directory path containing the csv files or from a crafted dataset.
588

589
        Parameters
590
        ----------
591
        name : str
592
            The name of the dataset.
593
        source : str or list
594
            The source of the data. Can be a directory path containing the csv files or a list of custom data.
595
        format : list or None, optional
596
            The format of the data. When loading multiple csv files the format parameter will define how to read each column of the file. Default is None.
597
        skiplines : int, optional
598
            The number of lines to skip at the beginning of the file. Default is 0.
599
        delimiter : str, optional
600
            The delimiter used in the data files. Default is ','.
601
        header : list or None, optional
602
            The header of the data files. Default is None.
603

604
        Raises
605
        ------
606
        ValueError
607
            If the network is not neuralized.
608
            If the delimiter is not valid.
609

610
        Example
611
        -------
612
        Example - load data from files:
613
            >>> x = Input('x')
614
            >>> y = Input('y')
615
            >>> out = Output('out',Fir(x.tw(0.05)))
616
            >>> test = Modely(visualizer=None)
617
            >>> test.addModel('example_model', out)
618
            >>> test.neuralizeModel(0.01)
619
            >>> data_struct = ['x', '', 'y']
620
            >>> test.loadData(name='example_dataset', source='path/to/data', format=data_struct)
621

622
        Example - load data from a crafted dataset:
623
            >>> x = Input('x')
624
            >>> y = Input('y')
625
            >>> out = Output('out',Fir(x.tw(0.05)))
626
            >>> test = Modely(visualizer=None)
627
            >>> test.addModel('example_model', out)
628
            >>> test.neuralizeModel(0.01)
629
            >>> data_x = np.array(range(10))
630
            >>> dataset = {'x': data_x, 'y': (2*data_x)}
631
            >>> test.loadData(name='example_dataset',source=dataset)
632
        """
633
        check(self.neuralized, ValueError, "The network is not neuralized.")
1✔
634
        check(delimiter in ['\t', '\n', ';', ',', ' '], ValueError, 'delimiter not valid!')
1✔
635

636
        json_inputs = self.model_def['Inputs'] | self.model_def['States']
1✔
637
        model_inputs = list(json_inputs.keys())
1✔
638
        ## Initialize the dictionary containing the data
639
        if name in list(self.data.keys()):
1✔
640
            log.warning(f'Dataset named {name} already loaded! overriding the existing one..')
1✔
641
        self.data[name] = {}
1✔
642

643
        input_ns_backward = {key:value['ns'][0] for key, value in json_inputs.items()}
1✔
644
        input_ns_forward = {key:value['ns'][1] for key, value in json_inputs.items()}
1✔
645
        max_samples_backward = max(input_ns_backward.values())
1✔
646
        max_samples_forward = max(input_ns_forward.values())
1✔
647
        max_n_samples = max_samples_backward + max_samples_forward
1✔
648

649
        num_of_samples = {}
1✔
650
        if type(source) is str: ## we have a directory path containing the files
1✔
651
            ## collect column indexes
652
            format_idx = {}
1✔
653
            idx = 0
1✔
654
            for item in format:
1✔
655
                if isinstance(item, tuple):
1✔
656
                    for key in item:
×
657
                        if key not in model_inputs:
×
658
                            idx += 1
×
659
                            break
×
660
                        n_cols = json_inputs[key]['dim']
×
661
                        format_idx[key] = (idx, idx+n_cols)
×
662
                    idx += n_cols
×
663
                else:
664
                    if item not in model_inputs:
1✔
665
                        idx += 1
1✔
666
                        continue
1✔
667
                    n_cols = json_inputs[item]['dim']
1✔
668
                    format_idx[item] = (idx, idx+n_cols)
1✔
669
                    idx += n_cols
1✔
670

671
            ## Initialize each input key
672
            for key in format_idx.keys():
1✔
673
                self.data[name][key] = []
1✔
674

675
            ## obtain the file names
676
            try:
1✔
677
                _,_,files = next(os.walk(source))
1✔
678
                files.sort()
1✔
679
            except StopIteration as e:
×
680
                check(False,StopIteration, f'ERROR: The path "{source}" does not exist!')
×
681
                return
×
682
            self.file_count = len(files)
1✔
683
            if self.file_count > 1: ## Multifile
1✔
684
                self.multifile[name] = []
1✔
685

686
            ## Cycle through all the files
687
            for file in files:
1✔
688
                try:
1✔
689
                    ## read the csv
690
                    df = pd.read_csv(os.path.join(source,file), skiprows=skiplines, delimiter=delimiter, header=header)
1✔
691
                except:
×
692
                    log.warning(f'Cannot read file {os.path.join(source,file)}')
×
693
                    continue
×
694
                if self.file_count > 1:
1✔
695
                    self.multifile[name].append((self.multifile[name][-1] + (len(df) - max_n_samples + 1)) if self.multifile[name] else len(df) - max_n_samples + 1)
1✔
696
                ## Cycle through all the windows
697
                for key, idxs in format_idx.items():
1✔
698
                    back, forw = input_ns_backward[key], input_ns_forward[key]
1✔
699
                    ## Save as numpy array the data
700
                    data = df.iloc[:, idxs[0]:idxs[1]].to_numpy()
1✔
701
                    self.data[name][key] += [data[i-back:i+forw] for i in range(max_samples_backward, len(df)-max_samples_forward+1)]
1✔
702

703
            ## Stack the files
704
            for key in format_idx.keys():
1✔
705
                self.data[name][key] = np.stack(self.data[name][key])
1✔
706
                num_of_samples[key] = self.data[name][key].shape[0]
1✔
707

708
        elif type(source) is dict:  ## we have a crafted dataset
1✔
709
            self.file_count = 1
1✔
710

711
            ## Check if the inputs are correct
712
            #assert set(model_inputs).issubset(source.keys()), f'The dataset is missing some inputs. Inputs needed for the model: {model_inputs}'
713

714
            # Merge a list of
715
            for key in model_inputs:
1✔
716
                if key not in source.keys():
1✔
717
                    continue
1✔
718

719
                self.data[name][key] = []  ## Initialize the dataset
1✔
720

721
                back, forw = input_ns_backward[key], input_ns_forward[key]
1✔
722
                for idx in range(len(source[key]) - max_n_samples+1):
1✔
723
                    self.data[name][key].append(source[key][idx + (max_samples_backward - back):idx + (max_samples_backward + forw)])
1✔
724

725
            ## Stack the files
726
            for key in model_inputs:
1✔
727
                if key not in source.keys():
1✔
728
                    continue
1✔
729
                self.data[name][key] = np.stack(self.data[name][key])
1✔
730
                if self.data[name][key].ndim == 2: ## Add the sample dimension
1✔
731
                    self.data[name][key] = np.expand_dims(self.data[name][key], axis=-1)
1✔
732
                if self.data[name][key].ndim > 3:
1✔
733
                    self.data[name][key] = np.squeeze(self.data[name][key], axis=1)
×
734
                num_of_samples[key] = self.data[name][key].shape[0]
1✔
735

736
        # Check dim of the samples
737
        check(len(set(num_of_samples.values())) == 1, ValueError,
1✔
738
              f"The number of the sample of the dataset {name} are not the same for all input in the dataset: {num_of_samples}")
739
        self.num_of_samples[name] = num_of_samples[list(num_of_samples.keys())[0]]
1✔
740

741
        ## Set the Loaded flag to True
742
        self.data_loaded = True
1✔
743
        ## Update the number of datasets loaded
744
        self.n_datasets = len(self.data.keys())
1✔
745
        self.datasets_loaded.add(name)
1✔
746
        ## Show the dataset
747
        self.visualizer.showDataset(name=name)
1✔
748

749
    def filterData(self, filter_function, dataset_name = None):
1✔
750
        """
751
        Filters the data in the dataset using the provided filter function.
752

753
        Parameters
754
        ----------
755
        filter_function : Callable
756
            A function that takes a sample as input and returns True if the sample should be kept, and False if it should be removed.
757
        dataset_name : str or None, optional
758
            The name of the dataset to filter. If None, all datasets are filtered. Default is None.
759

760
        Example
761
        -------
762
        Example usage:
763
            >>> model = Modely()
764
            >>> model.loadData('dataset_name', 'path/to/data')
765
            >>> def filter_fn(sample):
766
            >>>     return sample['input1'] > 0
767
            >>> model.filterData(filter_fn, 'dataset_name')
768
        """
769
        idx_to_remove = []
×
770
        if dataset_name is None:
×
771
            for name in self.data.keys():
×
772
                dataset = self.data[name]
×
773
                n_samples = len(dataset[list(dataset.keys())[0]])
×
774

775
                data_for_filter = []
×
776
                for i in range(n_samples):
×
777
                    new_sample = {key: val[i] for key, val in dataset.items()}
×
778
                    data_for_filter.append(new_sample)
×
779

780
                for idx, sample in enumerate(data_for_filter):
×
781
                    if not filter_function(sample):
×
782
                        idx_to_remove.append(idx)
×
783

784
                for key in self.data[name].keys():
×
785
                    self.data[name][key] = np.delete(self.data[name][key], idx_to_remove, axis=0)
×
786
                    self.num_of_samples[name] = self.data[name][key].shape[0]
×
787
                self.visualizer.showDataset(name=name)
×
788

789
        else:
790
            dataset = self.data[dataset_name]
×
791
            n_samples = len(dataset[list(dataset.keys())[0]])
×
792

793
            data_for_filter = []
×
794
            for i in range(n_samples):
×
795
                new_sample = {key: val[i] for key, val in dataset.items()}
×
796
                data_for_filter.append(new_sample)
×
797

798
            for idx, sample in enumerate(data_for_filter):
×
799
                if not filter_function(sample):
×
800
                    idx_to_remove.append(idx)
×
801

802
            for key in self.data[dataset_name].keys():
×
803
                self.data[dataset_name][key] = np.delete(self.data[dataset_name][key], idx_to_remove, axis=0)
×
804
                self.num_of_samples[dataset_name] = self.data[dataset_name][key].shape[0]
×
805
            self.visualizer.showDataset(name=dataset_name)
×
806

807
    def __save_internal(self, key, value):
1✔
808
        self.internals[key] = tensor_to_list(value)
1✔
809

810
    def __get_train_parameters(self, training_params):
1✔
811
        run_train_parameters = copy.deepcopy(self.standard_train_parameters)
1✔
812
        if training_params is None:
1✔
813
            return run_train_parameters
1✔
814
        for key, value in training_params.items():
1✔
815
            check(key in run_train_parameters, KeyError, f"The param {key} is not exist as standard parameters")
1✔
816
            run_train_parameters[key] = value
1✔
817
        return run_train_parameters
1✔
818

819
    def __get_parameter(self, **parameter):
1✔
820
        assert len(parameter) == 1
1✔
821
        name = list(parameter.keys())[0]
1✔
822
        self.run_training_params[name] =  parameter[name] if parameter[name] is not None else self.run_training_params[name]
1✔
823
        return self.run_training_params[name]
1✔
824

825
    def __get_batch_sizes(self, train_batch_size, val_batch_size, test_batch_size):
1✔
826
        ## Check if the batch_size can be used for the current dataset, otherwise set the batch_size to the maximum value
827
        self.__get_parameter(train_batch_size = train_batch_size)
1✔
828
        self.__get_parameter(val_batch_size = val_batch_size)
1✔
829
        self.__get_parameter(test_batch_size = test_batch_size)
1✔
830

831
        if self.run_training_params['recurrent_train']:
1✔
832
            if self.run_training_params['train_batch_size'] > self.run_training_params['n_samples_train']:
1✔
833
                self.run_training_params['train_batch_size'] = self.run_training_params['n_samples_train'] - self.run_training_params['prediction_samples']
1✔
834
            if self.run_training_params['val_batch_size'] is None or self.run_training_params['val_batch_size'] > self.run_training_params['n_samples_val']:
1✔
835
                self.run_training_params['val_batch_size'] = max(0,self.run_training_params['n_samples_val'] - self.run_training_params['prediction_samples'])
1✔
836
            if self.run_training_params['test_batch_size'] is None or self.run_training_params['test_batch_size'] > self.run_training_params['n_samples_test']:
1✔
837
                self.run_training_params['test_batch_size'] = max(0,self.run_training_params['n_samples_test'] - self.run_training_params['prediction_samples'])
1✔
838
        else:
839
            if self.run_training_params['train_batch_size'] > self.run_training_params['n_samples_train']:
1✔
840
                self.run_training_params['train_batch_size'] = self.run_training_params['n_samples_train']
1✔
841
            if self.run_training_params['val_batch_size'] is None or self.run_training_params['val_batch_size'] > self.run_training_params['n_samples_val']:
1✔
842
                self.run_training_params['val_batch_size'] = self.run_training_params['n_samples_val']
1✔
843
            if self.run_training_params['test_batch_size'] is None or self.run_training_params['test_batch_size'] > self.run_training_params['n_samples_test']:
1✔
844
                self.run_training_params['test_batch_size'] = self.run_training_params['n_samples_test']
1✔
845

846
        check(self.run_training_params['train_batch_size'] > 0, ValueError, f'The auto train_batch_size ({self.run_training_params["train_batch_size"] }) = n_samples_train ({self.run_training_params["n_samples_train"]}) - prediction_samples ({self.run_training_params["prediction_samples"]}), must be greater than 0.')
1✔
847

848
        return self.run_training_params['train_batch_size'], self.run_training_params['val_batch_size'], self.run_training_params['test_batch_size']
1✔
849

850
    def __inizilize_optimizer(self, optimizer, optimizer_params, optimizer_defaults, add_optimizer_params, add_optimizer_defaults, models, lr, lr_param):
1✔
851
        # Get optimizer and initialization parameters
852
        optimizer = copy.deepcopy(self.__get_parameter(optimizer=optimizer))
1✔
853
        optimizer_params = copy.deepcopy(self.__get_parameter(optimizer_params=optimizer_params))
1✔
854
        optimizer_defaults = copy.deepcopy(self.__get_parameter(optimizer_defaults=optimizer_defaults))
1✔
855
        add_optimizer_params = copy.deepcopy(self.__get_parameter(add_optimizer_params=add_optimizer_params))
1✔
856
        add_optimizer_defaults = copy.deepcopy(self.__get_parameter(add_optimizer_defaults=add_optimizer_defaults))
1✔
857

858
        ## Get parameter to be trained
859
        json_models = []
1✔
860
        models = self.__get_parameter(models=models)
1✔
861
        if 'Models' in self.model_def:
1✔
862
            json_models = list(self.model_def['Models'].keys()) if type(self.model_def['Models']) is dict else [self.model_def['Models']]
1✔
863
        if models is None:
1✔
864
            models = json_models
1✔
865
        self.run_training_params['models'] = models
1✔
866
        params_to_train = set()
1✔
867
        if isinstance(models, str):
1✔
868
            models = [models]
1✔
869
        for model in models:
1✔
870
            check(model in json_models, ValueError, f'The model {model} is not in the model definition')
1✔
871
            if type(self.model_def['Models']) is dict:
1✔
872
                params_to_train |= set(self.model_def['Models'][model]['Parameters'])
1✔
873
            else:
874
                params_to_train |= set(self.model_def['Parameters'].keys())
1✔
875

876
        # Get the optimizer
877
        if type(optimizer) is str:
1✔
878
            if optimizer == 'SGD':
1✔
879
                optimizer = SGD({},[])
1✔
880
            elif optimizer == 'Adam':
1✔
881
                optimizer = Adam({},[])
1✔
882
        else:
883
            check(issubclass(type(optimizer), Optimizer), TypeError,
1✔
884
                  "The optimizer must be an Optimizer or str")
885

886
        optimizer.set_params_to_train(self.model.all_parameters, params_to_train)
1✔
887

888
        optimizer.add_defaults('lr', self.run_training_params['lr'])
1✔
889
        optimizer.add_option_to_params('lr', self.run_training_params['lr_param'])
1✔
890

891
        if optimizer_defaults != {}:
1✔
892
            optimizer.set_defaults(optimizer_defaults)
1✔
893
        if optimizer_params != []:
1✔
894
            optimizer.set_params(optimizer_params)
1✔
895

896
        for key, value in add_optimizer_defaults.items():
1✔
897
            optimizer.add_defaults(key, value)
1✔
898

899
        add_optimizer_params = optimizer.unfold(add_optimizer_params)
1✔
900
        for param in add_optimizer_params:
1✔
901
            par = param['params']
1✔
902
            del param['params']
1✔
903
            for key, value in param.items():
1✔
904
                optimizer.add_option_to_params(key, {par:value})
1✔
905

906
        # Modify the parameter
907
        optimizer.add_defaults('lr', lr)
1✔
908
        optimizer.add_option_to_params('lr', lr_param)
1✔
909

910
        return optimizer
1✔
911

912
    def trainModel(self,
1✔
913
                    models=None,
914
                    train_dataset = None, validation_dataset = None, test_dataset = None, splits = None,
915
                    closed_loop = None, connect = None, step = None, prediction_samples = None,
916
                    shuffle_data = None,
917
                    early_stopping = None, early_stopping_params = None,
918
                    select_model = None, select_model_params = None,
919
                    minimize_gain = None,
920
                    num_of_epochs = None,
921
                    train_batch_size = None, val_batch_size = None, test_batch_size = None,
922
                    optimizer = None,
923
                    lr = None, lr_param = None,
924
                    optimizer_params = None, optimizer_defaults = None,
925
                    training_params = None,
926
                    add_optimizer_params = None, add_optimizer_defaults = None
927
                   ):
928
        """
929
        Trains the model using the provided datasets and parameters.
930

931
        Parameters
932
        ----------
933
        models : list or None, optional
934
            A list of models to train. Default is None.
935
        train_dataset : str or None, optional
936
            The name of the training dataset. Default is None.
937
        validation_dataset : str or None, optional
938
            The name of the validation dataset. Default is None.
939
        test_dataset : str or None, optional
940
            The name of the test dataset. Default is None.
941
        splits : list or None, optional
942
            A list of 3 elements specifying the percentage of splits for training, validation, and testing. The three elements must sum up to 100!
943
            The parameter splits is only used when there is only 1 dataset loaded. Default is None.
944
        closed_loop : dict or None, optional
945
            A dictionary specifying closed loop connections. The keys are input names and the values are output names. Default is None.
946
        connect : dict or None, optional
947
            A dictionary specifying connections. The keys are input names and the values are output names. Default is None.
948
        step : int or None, optional
949
            The step size for training. A big value will result in less data used for each epochs and a faster train. Default is None.
950
        prediction_samples : int or None, optional
951
            The size of the prediction horizon. Number of samples at each recurrent window Default is None.
952
        shuffle_data : bool or None, optional
953
            Whether to shuffle the data during training. Default is None.
954
        early_stopping : Callable or None, optional
955
            A callable for early stopping. Default is None.
956
        early_stopping_params : dict or None, optional
957
            A dictionary of parameters for early stopping. Default is None.
958
        select_model : Callable or None, optional
959
            A callable for selecting the best model. Default is None.
960
        select_model_params : dict or None, optional
961
            A dictionary of parameters for selecting the best model. Default is None.
962
        minimize_gain : dict or None, optional
963
            A dictionary specifying the gain for each minimization loss function. Default is None.
964
        num_of_epochs : int or None, optional
965
            The number of epochs to train the model. Default is None.
966
        train_batch_size : int or None, optional
967
            The batch size for training. Default is None.
968
        val_batch_size : int or None, optional
969
            The batch size for validation. Default is None.
970
        test_batch_size : int or None, optional
971
            The batch size for testing. Default is None.
972
        optimizer : Optimizer or None, optional
973
            The optimizer to use for training. Default is None.
974
        lr : float or None, optional
975
            The learning rate. Default is None.
976
        lr_param : dict or None, optional
977
            A dictionary of learning rate parameters. Default is None.
978
        optimizer_params : dict or None, optional
979
            A dictionary of optimizer parameters. Default is None.
980
        optimizer_defaults : dict or None, optional
981
            A dictionary of default optimizer settings. Default is None.
982
        training_params : dict or None, optional
983
            A dictionary of training parameters. Default is None.
984
        add_optimizer_params : dict or None, optional
985
            Additional optimizer parameters. Default is None.
986
        add_optimizer_defaults : dict or None, optional
987
            Additional default optimizer settings. Default is None.
988

989
        Raises
990
        ------
991
        RuntimeError
992
            If no data is loaded or if there are no modules with learnable parameters.
993
        KeyError
994
            If the sample horizon is not positive.
995
        ValueError
996
            If an input or output variable is not in the model definition.
997

998
        Example
999
        -------
1000
        Example - basic feed-forward training:
1001
            >>> x = Input('x')
1002
            >>> F = Input('F')
1003

1004
            >>> xk1 = Output('x[k+1]', Fir()(x.tw(0.2))+Fir()(F.last()))
1005

1006
            >>> mass_spring_damper = Modely(seed=0)
1007
            >>> mass_spring_damper.addModel('xk1',xk1)
1008
            >>> mass_spring_damper.neuralizeModel(sample_time = 0.05) 
1009

1010
            >>> data_struct = ['time','x','dx','F']
1011
            >>> data_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),'dataset','data')
1012
            >>> mass_spring_damper.loadData(name='mass_spring_dataset', source=data_folder, format=data_struct, delimiter=';')
1013

1014
            >>> params = {'num_of_epochs': 100,'train_batch_size': 128,'lr':0.001}
1015
            >>> mass_spring_damper.trainModel(splits=[70,20,10], training_params = params)
1016

1017
        Example - recurrent training:
1018
            >>> x = Input('x')
1019
            >>> F = Input('F')
1020

1021
            >>> xk1 = Output('x[k+1]', Fir()(x.tw(0.2))+Fir()(F.last()))
1022

1023
            >>> mass_spring_damper = Modely(seed=0)
1024
            >>> mass_spring_damper.addModel('xk1',xk1)
1025
            >>> mass_spring_damper.addClosedLoop(xk1, x)
1026
            >>> mass_spring_damper.neuralizeModel(sample_time = 0.05) 
1027

1028
            >>> data_struct = ['time','x','dx','F']
1029
            >>> data_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),'dataset','data')
1030
            >>> mass_spring_damper.loadData(name='mass_spring_dataset', source=data_folder, format=data_struct, delimiter=';')
1031

1032
            >>> params = {'num_of_epochs': 100,'train_batch_size': 128,'lr':0.001}
1033
            >>> mass_spring_damper.trainModel(splits=[70,20,10], prediction_samples=10, training_params = params)
1034
        """
1035
        check(self.data_loaded, RuntimeError, 'There is no data loaded! The Training will stop.')
1✔
1036
        check(list(self.model.parameters()), RuntimeError, 'There are no modules with learnable parameters! The Training will stop.')
1✔
1037

1038
        ## Get running parameter from dict
1039
        self.run_training_params = copy.deepcopy(self.__get_train_parameters(training_params))
1✔
1040

1041
        ## Get connect and closed_loop
1042
        prediction_samples = self.__get_parameter(prediction_samples = prediction_samples)
1✔
1043
        check(prediction_samples >= 0, KeyError, 'The sample horizon must be positive!')
1✔
1044

1045
        ## Check close loop and connect
1046
        if self.log_internal:
1✔
1047
            self.internals = {}
1✔
1048
        step = self.__get_parameter(step = step)
1✔
1049
        closed_loop = self.__get_parameter(closed_loop = closed_loop)
1✔
1050
        connect = self.__get_parameter(connect = connect)
1✔
1051
        recurrent_train = True
1✔
1052
        if closed_loop:
1✔
1053
            for input, output in closed_loop.items():
1✔
1054
                check(input in self.model_def['Inputs'], ValueError, f'the tag {input} is not an input variable.')
1✔
1055
                check(output in self.model_def['Outputs'], ValueError, f'the tag {output} is not an output of the network')
1✔
1056
                log.warning(f'Recurrent train: closing the loop between the the input ports {input} and the output ports {output} for {prediction_samples} samples')
1✔
1057
        elif connect:
1✔
1058
            for connect_in, connect_out in connect.items():
1✔
1059
                check(connect_in in self.model_def['Inputs'], ValueError, f'the tag {connect_in} is not an input variable.')
1✔
1060
                check(connect_out in self.model_def['Outputs'], ValueError, f'the tag {connect_out} is not an output of the network')
1✔
1061
                log.warning(f'Recurrent train: connecting the input ports {connect_in} with output ports {connect_out} for {prediction_samples} samples')
1✔
1062
        elif self.model_def['States']: ## if we have state variables we have to do the recurrent train
1✔
1063
            log.warning(f"Recurrent train: update States variables {list(self.model_def['States'].keys())} for {prediction_samples} samples")
1✔
1064
        else:
1065
            if prediction_samples != 0:
1✔
1066
                log.warning(
1✔
1067
                    f"The value of the prediction_samples={prediction_samples} is not used in not recursive network.")
1068
            recurrent_train = False
1✔
1069
        self.run_training_params['recurrent_train'] = recurrent_train
1✔
1070

1071
        ## Get early stopping
1072
        early_stopping = self.__get_parameter(early_stopping = early_stopping)
1✔
1073
        if early_stopping:
1✔
1074
            self.run_training_params['early_stopping'] = early_stopping.__name__
×
1075
        early_stopping_params = self.__get_parameter(early_stopping_params = early_stopping_params)
1✔
1076

1077
        ## Get dataset for training
1078
        shuffle_data = self.__get_parameter(shuffle_data = shuffle_data)
1✔
1079

1080
        ## Get the dataset name
1081
        train_dataset = self.__get_parameter(train_dataset = train_dataset)
1✔
1082
        #TODO manage multiple datasets
1083
        if train_dataset is None: ## If we use all datasets with the splits
1✔
1084
            splits = self.__get_parameter(splits = splits)
1✔
1085
            check(len(splits)==3, ValueError, '3 elements must be inserted for the dataset split in training, validation and test')
1✔
1086
            check(sum(splits)==100, ValueError, 'Training, Validation and Test splits must sum up to 100.')
1✔
1087
            check(splits[0] > 0, ValueError, 'The training split cannot be zero.')
1✔
1088

1089
            ## Get the dataset name
1090
            dataset = list(self.data.keys())[0] ## take the dataset name
1✔
1091
            train_dataset_name = val_dataset_name = test_dataset_name = dataset
1✔
1092

1093
            ## Collect the split sizes
1094
            train_size = splits[0] / 100.0
1✔
1095
            val_size = splits[1] / 100.0
1✔
1096
            test_size = 1 - (train_size + val_size)
1✔
1097
            num_of_samples = self.num_of_samples[dataset]
1✔
1098
            n_samples_train = round(num_of_samples*train_size)
1✔
1099
            if splits[1] == 0:
1✔
1100
                n_samples_test = num_of_samples-n_samples_train
1✔
1101
                n_samples_val = 0
1✔
1102
            else:
1103
                n_samples_test = round(num_of_samples*test_size)
1✔
1104
                n_samples_val = num_of_samples-n_samples_train-n_samples_test
1✔
1105

1106
            ## Split into train, validation and test
1107
            XY_train, XY_val, XY_test = {}, {}, {}
1✔
1108
            for key, samples in self.data[dataset].items():
1✔
1109
                if val_size == 0.0 and test_size == 0.0: ## we have only training set
1✔
1110
                    XY_train[key] = torch.from_numpy(samples).to(TORCH_DTYPE)
1✔
1111
                elif val_size == 0.0 and test_size != 0.0: ## we have only training and test set
1✔
1112
                    XY_train[key] = torch.from_numpy(samples[:n_samples_train]).to(TORCH_DTYPE)
1✔
1113
                    XY_test[key] = torch.from_numpy(samples[n_samples_train:]).to(TORCH_DTYPE)
1✔
1114
                elif val_size != 0.0 and test_size == 0.0: ## we have only training and validation set
1✔
1115
                    XY_train[key] = torch.from_numpy(samples[:n_samples_train]).to(TORCH_DTYPE)
1✔
1116
                    XY_val[key] = torch.from_numpy(samples[n_samples_train:]).to(TORCH_DTYPE)
1✔
1117
                else: ## we have training, validation and test set
1118
                    XY_train[key] = torch.from_numpy(samples[:n_samples_train]).to(TORCH_DTYPE)
1✔
1119
                    XY_val[key] = torch.from_numpy(samples[n_samples_train:-n_samples_test]).to(TORCH_DTYPE)
1✔
1120
                    XY_test[key] = torch.from_numpy(samples[n_samples_train+n_samples_val:]).to(TORCH_DTYPE)
1✔
1121

1122
            ## Set name for resultsAnalysis
1123
            train_dataset = self.__get_parameter(train_dataset = f"train_{dataset}_{train_size:0.2f}")
1✔
1124
            validation_dataset = self.__get_parameter(validation_dataset =f"validation_{dataset}_{val_size:0.2f}")
1✔
1125
            test_dataset = self.__get_parameter(test_dataset = f"test_{dataset}_{test_size:0.2f}")
1✔
1126
        else: ## Multi-Dataset
1127
            ## Get the names of the datasets
1128
            datasets = list(self.data.keys())
1✔
1129
            validation_dataset = self.__get_parameter(validation_dataset=validation_dataset)
1✔
1130
            test_dataset = self.__get_parameter(test_dataset=test_dataset)
1✔
1131
            train_dataset_name, val_dataset_name, test_dataset_name = train_dataset, validation_dataset, test_dataset
1✔
1132

1133
            ## Collect the number of samples for each dataset
1134
            n_samples_train, n_samples_val, n_samples_test = 0, 0, 0
1✔
1135

1136
            check(train_dataset in datasets, KeyError, f'{train_dataset} Not Loaded!')
1✔
1137
            if validation_dataset is not None and validation_dataset not in datasets:
1✔
1138
                log.warning(f'Validation Dataset [{validation_dataset}] Not Loaded. The training will continue without validation')
×
1139
            if test_dataset is not None and test_dataset not in datasets:
1✔
1140
                log.warning(f'Test Dataset [{test_dataset}] Not Loaded. The training will continue without test')
×
1141

1142
            ## Split into train, validation and test
1143
            XY_train, XY_val, XY_test = {}, {}, {}
1✔
1144
            n_samples_train = self.num_of_samples[train_dataset]
1✔
1145
            XY_train = {key: torch.from_numpy(val).to(TORCH_DTYPE) for key, val in self.data[train_dataset].items()}
1✔
1146
            if validation_dataset in datasets:
1✔
1147
                n_samples_val = self.num_of_samples[validation_dataset]
1✔
1148
                XY_val = {key: torch.from_numpy(val).to(TORCH_DTYPE) for key, val in self.data[validation_dataset].items()}
1✔
1149
            if test_dataset in datasets:
1✔
1150
                n_samples_test = self.num_of_samples[test_dataset]
1✔
1151
                XY_test = {key: torch.from_numpy(val).to(TORCH_DTYPE) for key, val in self.data[test_dataset].items()}
1✔
1152

1153
        for key in XY_train.keys():
1✔
1154
            assert n_samples_train == XY_train[key].shape[0], f'The number of train samples {n_samples_train}!={XY_train[key].shape[0]} not compliant.'
1✔
1155
            if key in XY_val:
1✔
1156
                assert n_samples_val == XY_val[key].shape[0], f'The number of val samples {n_samples_val}!={XY_val[key].shape[0]} not compliant.'
1✔
1157
            if key in XY_test:
1✔
1158
                assert n_samples_test == XY_test[key].shape[0], f'The number of test samples {n_samples_test}!={XY_test[key].shape[0]} not compliant.'
1✔
1159

1160
        assert n_samples_train > 0, f'There are {n_samples_train} samples for training.'
1✔
1161
        self.run_training_params['n_samples_train'] = n_samples_train
1✔
1162
        self.run_training_params['n_samples_val'] = n_samples_val
1✔
1163
        self.run_training_params['n_samples_test'] = n_samples_test
1✔
1164
        train_batch_size, val_batch_size, test_batch_size = self.__get_batch_sizes(train_batch_size, val_batch_size, test_batch_size)
1✔
1165

1166
        ## Define the optimizer
1167
        optimizer = self.__inizilize_optimizer(optimizer, optimizer_params, optimizer_defaults, add_optimizer_params, add_optimizer_defaults, models, lr, lr_param)
1✔
1168
        self.run_training_params['optimizer'] = optimizer.name
1✔
1169
        self.run_training_params['optimizer_params'] = optimizer.optimizer_params
1✔
1170
        self.run_training_params['optimizer_defaults'] = optimizer.optimizer_defaults
1✔
1171
        self.optimizer = optimizer.get_torch_optimizer()
1✔
1172

1173
        ## Get num_of_epochs
1174
        num_of_epochs = self.__get_parameter(num_of_epochs = num_of_epochs)
1✔
1175

1176
        ## Define the loss functions
1177
        minimize_gain = self.__get_parameter(minimize_gain = minimize_gain)
1✔
1178
        self.run_training_params['minimizers'] = {}
1✔
1179
        for name, values in self.model_def['Minimizers'].items():
1✔
1180
            self.loss_functions[name] = CustomLoss(values['loss'])
1✔
1181
            self.run_training_params['minimizers'][name] = {}
1✔
1182
            self.run_training_params['minimizers'][name]['A'] = values['A']
1✔
1183
            self.run_training_params['minimizers'][name]['B'] = values['B']
1✔
1184
            self.run_training_params['minimizers'][name]['loss'] = values['loss']
1✔
1185
            if name in minimize_gain:
1✔
1186
                self.run_training_params['minimizers'][name]['gain'] = minimize_gain[name]
1✔
1187

1188
        ## Clean the dict of the training parameter
1189
        del self.run_training_params['minimize_gain']
1✔
1190
        del self.run_training_params['lr']
1✔
1191
        del self.run_training_params['lr_param']
1✔
1192
        if not recurrent_train:
1✔
1193
            del self.run_training_params['connect']
1✔
1194
            del self.run_training_params['closed_loop']
1✔
1195
            del self.run_training_params['step']
1✔
1196
            del self.run_training_params['prediction_samples']
1✔
1197
        if early_stopping is None:
1✔
1198
            del self.run_training_params['early_stopping']
1✔
1199
            del self.run_training_params['early_stopping_params']
1✔
1200

1201
        ## Create the train, validation and test loss dictionaries
1202
        train_losses, val_losses, test_losses = {}, {}, {}
1✔
1203
        for key in self.model_def['Minimizers'].keys():
1✔
1204
            train_losses[key] = []
1✔
1205
            if n_samples_val > 0:
1✔
1206
                val_losses[key] = []
1✔
1207

1208
        ## Check the needed keys are in the datasets
1209
        keys = set(self.model_def['Inputs'].keys())
1✔
1210
        keys |= {value['A'] for value in self.model_def['Minimizers'].values()}|{value['B'] for value in self.model_def['Minimizers'].values()}
1✔
1211
        keys -= set(self.model_def['Relations'].keys())
1✔
1212
        keys -= set(self.model_def['States'].keys())
1✔
1213
        keys -= set(self.model_def['Outputs'].keys())
1✔
1214
        if 'connect' in self.run_training_params:
1✔
1215
            keys -= set(self.run_training_params['connect'].keys())
1✔
1216
        if 'closed_loop' in self.run_training_params:
1✔
1217
            keys -= set(self.run_training_params['closed_loop'].keys())
1✔
1218
        check(set(keys).issubset(set(XY_train.keys())), KeyError, f"Not all the mandatory keys {keys} are present in the training dataset {set(XY_train.keys())}.")
1✔
1219

1220
        # Evaluate the number of update for epochs and the unsued samples
1221
        if recurrent_train:
1✔
1222
            list_of_batch_indexes = range(0, (n_samples_train - train_batch_size - prediction_samples + 1), (train_batch_size + step))
1✔
1223
            check(n_samples_train - train_batch_size - prediction_samples + 1 > 0, ValueError,
1✔
1224
                  f"The number of available sample are (n_samples_train ({n_samples_train}) - train_batch_size ({train_batch_size}) - prediction_samples ({prediction_samples}) + 1) = {n_samples_train - train_batch_size - prediction_samples + 1}.")
1225
            update_per_epochs = (n_samples_train - train_batch_size - prediction_samples + 1)//(train_batch_size + step) + 1
1✔
1226
            unused_samples = n_samples_train - list_of_batch_indexes[-1] - train_batch_size - prediction_samples
1✔
1227
        else:
1228
            update_per_epochs =  (n_samples_train - train_batch_size)/train_batch_size + 1
1✔
1229
            unused_samples = n_samples_train - update_per_epochs * train_batch_size
1✔
1230

1231
        self.run_training_params['update_per_epochs'] = update_per_epochs
1✔
1232
        self.run_training_params['unused_samples'] = unused_samples
1✔
1233

1234
        ## Select the model
1235
        select_model = self.__get_parameter(select_model = select_model)
1✔
1236
        select_model_params = self.__get_parameter(select_model_params = select_model_params)
1✔
1237
        selected_model_def = ModelDef(self.model_def.json)
1✔
1238

1239
        ## Show the training parameters
1240
        self.visualizer.showTrainParams()
1✔
1241

1242
        import time
1✔
1243
        ## start the train timer
1244
        start = time.time()
1✔
1245
        self.visualizer.showStartTraining()
1✔
1246

1247
        for epoch in range(num_of_epochs):
1✔
1248
            ## TRAIN
1249
            self.model.train()
1✔
1250
            if recurrent_train:
1✔
1251
                losses = self.__recurrentTrain(XY_train, n_samples_train, train_dataset_name, train_batch_size, minimize_gain, closed_loop, connect, prediction_samples, step, shuffle=shuffle_data, train=True)
1✔
1252
            else:
1253
                losses = self.__Train(XY_train, n_samples_train, train_batch_size, minimize_gain, shuffle=shuffle_data, train=True)
1✔
1254
            ## save the losses
1255
            for ind, key in enumerate(self.model_def['Minimizers'].keys()):
1✔
1256
                train_losses[key].append(torch.mean(losses[ind]).tolist())
1✔
1257

1258
            if n_samples_val > 0:
1✔
1259
                ## VALIDATION
1260
                self.model.eval()
1✔
1261
                if recurrent_train:
1✔
1262
                    losses = self.__recurrentTrain(XY_val, n_samples_val, val_dataset_name, val_batch_size, minimize_gain, closed_loop, connect, prediction_samples, step, shuffle=False, train=False)
1✔
1263
                else:
1264
                    losses = self.__Train(XY_val, n_samples_val, val_batch_size, minimize_gain, shuffle=False, train=False)
1✔
1265
                ## save the losses
1266
                for ind, key in enumerate(self.model_def['Minimizers'].keys()):
1✔
1267
                    val_losses[key].append(torch.mean(losses[ind]).tolist())
1✔
1268

1269
            ## Early-stopping
1270
            if callable(early_stopping):
1✔
1271
                if early_stopping(train_losses, val_losses, early_stopping_params):
×
1272
                    log.info(f'Stopping the training at epoch {epoch} due to early stopping.')
×
1273
                    break
×
1274

1275
            if callable(select_model):
1✔
1276
                if select_model(train_losses, val_losses, select_model_params):
×
1277
                    best_model_epoch = epoch
×
1278
                    selected_model_def.updateParameters(self.model)
×
1279

1280
            ## Visualize the training...
1281
            self.visualizer.showTraining(epoch, train_losses, val_losses)
1✔
1282
            self.visualizer.showWeightsInTrain(epoch = epoch)
1✔
1283

1284
        ## Save the training time
1285
        end = time.time()
1✔
1286
        ## Visualize the training time
1287
        for key in self.model_def['Minimizers'].keys():
1✔
1288
            self.training[key] = {'train': train_losses[key]}
1✔
1289
            if n_samples_val > 0:
1✔
1290
                self.training[key]['val'] = val_losses[key]
1✔
1291
        self.visualizer.showEndTraining(num_of_epochs-1, train_losses, val_losses)
1✔
1292
        self.visualizer.showTrainingTime(end-start)
1✔
1293

1294
        ## Select the model
1295
        if callable(select_model):
1✔
1296
            log.info(f'Selected the model at the epoch {best_model_epoch+1}.')
×
1297
            self.model = Model(selected_model_def)
×
1298
        else:
1299
            log.info('The selected model is the LAST model of the training.')
1✔
1300

1301
        self.resultAnalysis(train_dataset, XY_train, minimize_gain, closed_loop, connect,  prediction_samples, step, train_batch_size)
1✔
1302
        if self.run_training_params['n_samples_val'] > 0:
1✔
1303
            self.resultAnalysis(validation_dataset, XY_val, minimize_gain, closed_loop, connect,  prediction_samples, step, val_batch_size)
1✔
1304
        if self.run_training_params['n_samples_test'] > 0:
1✔
1305
            self.resultAnalysis(test_dataset, XY_test, minimize_gain, closed_loop, connect,  prediction_samples, step, test_batch_size)
1✔
1306

1307
        self.visualizer.showResults()
1✔
1308

1309
        ## Get trained model from torch and set the model_def
1310
        self.model_def.updateParameters(self.model)
1✔
1311

1312
    def __recurrentTrain(self, data, n_samples, dataset_name, batch_size, loss_gains, closed_loop, connect, prediction_samples, step, shuffle=False, train=True):
1✔
1313
        model_inputs = list(self.model_def['Inputs'].keys())
1✔
1314
        state_closed_loop = [key for key, value in self.model_def['States'].items() if 'closedLoop' in value.keys()] + list(closed_loop.keys())
1✔
1315
        state_connect = [key for key, value in self.model_def['States'].items() if 'connect' in value.keys()] + list(connect.keys())
1✔
1316
        non_mandatory_inputs = state_closed_loop + state_connect 
1✔
1317
        mandatory_inputs = list(set(model_inputs) - set(non_mandatory_inputs))
1✔
1318

1319
        n_available_samples = n_samples - prediction_samples 
1✔
1320
        list_of_batch_indexes = list(range(n_available_samples))
1✔
1321

1322
        ## Remove forbidden indexes in case of a multi-file dataset
1323
        if dataset_name in self.multifile.keys(): ## Multi-file Dataset
1✔
1324
            if n_samples == self.run_training_params['n_samples_train']: ## Training
1✔
1325
                start_idx, end_idx = 0, n_samples
1✔
1326
            elif n_samples == self.run_training_params['n_samples_val']: ## Validation
1✔
1327
                start_idx, end_idx = self.run_training_params['n_samples_train'], self.run_training_params['n_samples_train'] + n_samples
1✔
1328
            else: ## Test
1329
                start_idx, end_idx = self.run_training_params['n_samples_train'] + self.run_training_params['n_samples_val'], self.run_training_params['n_samples_train'] + self.run_training_params['n_samples_val'] + n_samples
×
1330
            forbidden_idxs = []
1✔
1331
            for i in self.multifile[dataset_name]:
1✔
1332
                if i < end_idx and i > start_idx:
1✔
1333
                    forbidden_idxs.extend(range(i-prediction_samples, i, 1))
1✔
1334
            list_of_batch_indexes = [idx for idx in list_of_batch_indexes if idx not in forbidden_idxs]
1✔
1335

1336
        ## Clip the step 
1337
        if step < 0: ## clip the step to zero
1✔
1338
            log.warning(f"The step is negative ({step}). The step is set to zero.", stacklevel=5)
1✔
1339
            step = 0
1✔
1340
        if step > (len(list_of_batch_indexes)-batch_size): ## Clip the step to the maximum number of samples
1✔
1341
            log.warning(f"The step ({step}) is greater than the number of available samples ({len(list_of_batch_indexes)-batch_size}). The step is set to the maximum number.", stacklevel=5)
1✔
1342
            step = len(list_of_batch_indexes)-batch_size
1✔
1343
        ## Loss vector 
1344
        check((batch_size+step)>0, ValueError, f"The batch_size+step must be greater than 0.")
1✔
1345
        aux_losses = torch.zeros([len(self.model_def['Minimizers']), round(len(list_of_batch_indexes)/(batch_size+step))])
1✔
1346

1347
        ## Update with virtual states
1348
        self.model.update(closed_loop=closed_loop, connect=connect)
1✔
1349
        X = {}
1✔
1350
        batch_val = 0
1✔
1351
        while len(list_of_batch_indexes) >= batch_size:
1✔
1352
            idxs = random.sample(list_of_batch_indexes, batch_size) if shuffle else list_of_batch_indexes[:batch_size]
1✔
1353
            for num in idxs:
1✔
1354
                list_of_batch_indexes.remove(num)
1✔
1355
            if step > 0:
1✔
1356
                if len(list_of_batch_indexes) >= step:
1✔
1357
                    step_idxs = random.sample(list_of_batch_indexes, step) if shuffle else list_of_batch_indexes[:step]
1✔
1358
                    for num in step_idxs:
1✔
1359
                        list_of_batch_indexes.remove(num)
1✔
1360
            if train:
1✔
1361
                self.optimizer.zero_grad() ## Reset the gradient
1✔
1362
            ## Reset 
1363
            horizon_losses = {ind: [] for ind in range(len(self.model_def['Minimizers']))}
1✔
1364
            for key in non_mandatory_inputs:
1✔
1365
                if key in data.keys():
1✔
1366
                ## with data
1367
                    X[key] = data[key][idxs]
1✔
1368
                else: ## with zeros
1369
                    window_size = self.input_n_samples[key]
1✔
1370
                    dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
1371
                    X[key] = torch.zeros(size=(batch_size, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
1372
                    self.states[key] = X[key]
1✔
1373

1374
            for horizon_idx in range(prediction_samples + 1):
1✔
1375
                ## Get data 
1376
                for key in mandatory_inputs:
1✔
1377
                    X[key] = data[key][[idx+horizon_idx for idx in idxs]]
1✔
1378
                ## Forward pass
1379
                out, minimize_out, out_closed_loop, out_connect = self.model(X)
1✔
1380

1381
                if self.log_internal and train:
1✔
1382
                    internals_dict = {'XY':tensor_to_list(X),'out':out,'param':self.model.all_parameters,'closedLoop':self.model.closed_loop_update,'connect':self.model.connect_update}
1✔
1383

1384
                ## Loss Calculation
1385
                for ind, (key, value) in enumerate(self.model_def['Minimizers'].items()):
1✔
1386
                    loss = self.loss_functions[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1387
                    loss = (loss * loss_gains[key]) if key in loss_gains.keys() else loss  ## Multiply by the gain if necessary
1✔
1388
                    horizon_losses[ind].append(loss)
1✔
1389

1390
                ## Update
1391
                for key, val in out_closed_loop.items():
1✔
1392
                    shift = val.shape[1]  ## take the output time dimension
1✔
1393
                    X[key] = torch.roll(X[key], shifts=-1, dims=1) ## Roll the time window
1✔
1394
                    X[key][:, -shift:, :] = val ## substitute with the predicted value
1✔
1395
                    self.states[key] = X[key].clone()
1✔
1396
                for key, value in out_connect.items():
1✔
1397
                    X[key] = value
1✔
1398
                    self.states[key] = X[key].clone()
1✔
1399

1400
                if self.log_internal and train:
1✔
1401
                    internals_dict['state'] = self.states
1✔
1402
                    self.__save_internal('inout_'+str(batch_val)+'_'+str(horizon_idx),internals_dict)
1✔
1403

1404
            ## Calculate the total loss
1405
            total_loss = 0
1✔
1406
            for ind in range(len(self.model_def['Minimizers'])):
1✔
1407
                loss = sum(horizon_losses[ind])/(prediction_samples+1)
1✔
1408
                aux_losses[ind][batch_val] = loss.item()
1✔
1409
                total_loss += loss
1✔
1410

1411
            ## Gradient Step
1412
            if train:
1✔
1413
                total_loss.backward() ## Backpropagate the error
1✔
1414
                self.optimizer.step()
1✔
1415
                self.visualizer.showWeightsInTrain(batch = batch_val)
1✔
1416
            batch_val += 1
1✔
1417

1418
        ## Remove virtual states
1419
        for key in (connect.keys() | closed_loop.keys()):
1✔
1420
            if key in self.states.keys():
1✔
1421
                del self.states[key]
1✔
1422

1423
        ## return the losses
1424
        return aux_losses
1✔
1425

1426
    def __Train(self, data, n_samples, batch_size, loss_gains, shuffle=True, train=True):
1✔
1427
        check((n_samples - batch_size + 1) > 0, ValueError,
1✔
1428
              f"The number of available sample are (n_samples_train - train_batch_size + 1) = {n_samples - batch_size + 1}.")
1429
        if shuffle:
1✔
1430
            randomize = torch.randperm(n_samples)
1✔
1431
            data = {key: val[randomize] for key, val in data.items()}
1✔
1432
        ## Initialize the train losses vector
1433
        aux_losses = torch.zeros([len(self.model_def['Minimizers']),n_samples//batch_size])
1✔
1434
        for idx in range(0, (n_samples - batch_size + 1), batch_size):
1✔
1435
            ## Build the input tensor
1436
            XY = {key: val[idx:idx+batch_size] for key, val in data.items()}
1✔
1437
            ## Reset gradient
1438
            if train:
1✔
1439
                self.optimizer.zero_grad()
1✔
1440
            ## Model Forward
1441
            _, minimize_out, _, _ = self.model(XY)  ## Forward pass
1✔
1442
            ## Loss Calculation
1443
            total_loss = 0
1✔
1444
            for ind, (key, value) in enumerate(self.model_def['Minimizers'].items()):
1✔
1445
                loss = self.loss_functions[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1446
                loss = (loss * loss_gains[key]) if key in loss_gains.keys() else loss  ## Multiply by the gain if necessary
1✔
1447
                aux_losses[ind][idx//batch_size] = loss.item()
1✔
1448
                total_loss += loss
1✔
1449
            ## Gradient step
1450
            if train:
1✔
1451
                total_loss.backward()
1✔
1452
                self.optimizer.step()
1✔
1453
                self.visualizer.showWeightsInTrain(batch = idx//batch_size)
1✔
1454

1455
        ## return the losses
1456
        return aux_losses
1✔
1457

1458
    def resultAnalysis(self, dataset, data = None, minimize_gain = {}, closed_loop = {}, connect = {},  prediction_samples = None, step = 0, batch_size = None):
1✔
1459
        import warnings
1✔
1460
        with torch.inference_mode():
1✔
1461
            ## Init model for retults analysis
1462
            self.model.eval()
1✔
1463
            self.performance[dataset] = {}
1✔
1464
            self.prediction[dataset] = {}
1✔
1465
            A = {}
1✔
1466
            B = {}
1✔
1467
            total_losses = {}
1✔
1468

1469
            # Create the losses
1470
            losses = {}
1✔
1471
            for name, values in self.model_def['Minimizers'].items():
1✔
1472
                losses[name] = CustomLoss(values['loss'])
1✔
1473

1474
            recurrent = False
1✔
1475
            if (closed_loop or connect or self.model_def['States']) and prediction_samples is not None:
1✔
1476
                recurrent = True
1✔
1477

1478
            if data is None:
1✔
1479
                check(dataset in self.data.keys(), ValueError, f'The dataset {dataset} is not loaded!')
1✔
1480
                data = {key: torch.from_numpy(val).to(TORCH_DTYPE) for key, val in self.data[dataset].items()}
1✔
1481
            n_samples = len(data[list(data.keys())[0]])
1✔
1482

1483
            if recurrent:
1✔
1484
                batch_size = batch_size if batch_size is not None else n_samples - prediction_samples
1✔
1485

1486
                model_inputs = list(self.model_def['Inputs'].keys())
1✔
1487

1488
                state_closed_loop = [key for key, value in self.model_def['States'].items() if 'closedLoop' in value.keys()] + list(closed_loop.keys())
1✔
1489
                state_connect = [key for key, value in self.model_def['States'].items() if 'connect' in value.keys()] + list(connect.keys())
1✔
1490

1491
                non_mandatory_inputs = state_closed_loop + state_connect 
1✔
1492
                mandatory_inputs = list(set(model_inputs) - set(non_mandatory_inputs))
1✔
1493

1494
                for key, value in self.model_def['Minimizers'].items():
1✔
1495
                    total_losses[key], A[key], B[key] = [], [], []
1✔
1496
                    for horizon_idx in range(prediction_samples + 1):
1✔
1497
                        A[key].append([])
1✔
1498
                        B[key].append([])
1✔
1499
                
1500
                list_of_batch_indexes = list(range(n_samples - prediction_samples))
1✔
1501
                ## Remove forbidden indexes in case of a multi-file dataset
1502
                if dataset in self.multifile.keys(): ## Multi-file Dataset
1✔
1503
                    if n_samples == self.run_training_params['n_samples_train']: ## Training
×
1504
                        start_idx, end_idx = 0, n_samples
×
1505
                    elif n_samples == self.run_training_params['n_samples_val']: ## Validation
×
1506
                        start_idx, end_idx = self.run_training_params['n_samples_train'], self.run_training_params['n_samples_train'] + n_samples
×
1507
                    else: ## Test
1508
                        start_idx, end_idx = self.run_training_params['n_samples_train'] + self.run_training_params['n_samples_val'], self.run_training_params['n_samples_train'] + self.run_training_params['n_samples_val'] + n_samples
×
1509
                    forbidden_idxs = []
×
1510
                    for i in self.multifile[dataset]:
×
1511
                        if i < end_idx and i > start_idx:
×
1512
                            forbidden_idxs.extend(range(i-prediction_samples, i, 1))
×
1513
                    list_of_batch_indexes = [idx for idx in list_of_batch_indexes if idx not in forbidden_idxs]
×
1514

1515
                ## Clip the step 
1516
                if step < 0: ## clip the step to zero
1✔
1517
                    log.warning(f"The step is negative ({step}). The step is set to zero.", stacklevel=5)
1✔
1518
                    step = 0
1✔
1519
                if step > (len(list_of_batch_indexes)-batch_size): ## Clip the step to the maximum number of samples
1✔
1520
                    log.warning(f"The step ({step}) is greater than the number of available samples ({len(list_of_batch_indexes)-batch_size}). The step is set to the maximum number.", stacklevel=5)
1✔
1521
                    step = len(list_of_batch_indexes)-batch_size
1✔
1522

1523
                X = {}
1✔
1524
                ## Update with virtual states
1525
                self.model.update(closed_loop=closed_loop, connect=connect)
1✔
1526
                while len(list_of_batch_indexes) >= batch_size:
1✔
1527
                    idxs = list_of_batch_indexes[:batch_size]
1✔
1528
                    for num in idxs:
1✔
1529
                        list_of_batch_indexes.remove(num)
1✔
1530
                    if step > 0:
1✔
1531
                        if len(list_of_batch_indexes) >= step:
1✔
1532
                            step_idxs = list_of_batch_indexes[:step]
1✔
1533
                            for num in step_idxs:
1✔
1534
                                list_of_batch_indexes.remove(num)
1✔
1535
                    ## Reset 
1536
                    horizon_losses = {key: [] for key in self.model_def['Minimizers'].keys()}
1✔
1537
                    for key in non_mandatory_inputs:
1✔
1538
                        if key in data.keys(): # and len(data[key]) >= (idx + self.input_n_samples[key]): 
1✔
1539
                        ## with data
1540
                            X[key] = data[key][idxs]
1✔
1541
                        else: ## with zeros
1542
                            window_size = self.input_n_samples[key]
1✔
1543
                            dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
1544
                            X[key] = torch.zeros(size=(batch_size, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
1545
                            self.states[key] = X[key]
1✔
1546

1547
                    for horizon_idx in range(prediction_samples + 1):
1✔
1548
                        ## Get data 
1549
                        for key in mandatory_inputs:
1✔
1550
                            X[key] = data[key][[idx+horizon_idx for idx in idxs]]
1✔
1551
                        ## Forward pass
1552
                        out, minimize_out, out_closed_loop, out_connect = self.model(X)
1✔
1553

1554
                        ## Loss Calculation
1555
                        for key, value in self.model_def['Minimizers'].items():
1✔
1556
                            A[key][horizon_idx].append(minimize_out[value['A']])
1✔
1557
                            B[key][horizon_idx].append(minimize_out[value['B']])
1✔
1558
                            loss = losses[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1559
                            loss = (loss * minimize_gain[key]) if key in minimize_gain.keys() else loss  ## Multiply by the gain if necessary
1✔
1560
                            horizon_losses[key].append(loss)
1✔
1561

1562
                        ## Update
1563
                        for key, val in out_closed_loop.items():
1✔
1564
                            shift = val.shape[1]  ## take the output time dimension
1✔
1565
                            X[key] = torch.roll(X[key], shifts=-1, dims=1) ## Roll the time window
1✔
1566
                            X[key][:, -shift:, :] = val ## substitute with the predicted value
1✔
1567
                            self.states[key] = X[key].clone()
1✔
1568
                        for key, value in out_connect.items():
1✔
1569
                            X[key] = value
1✔
1570
                            self.states[key] = X[key].clone()
1✔
1571

1572
                    ## Calculate the total loss
1573
                    for key in self.model_def['Minimizers'].keys():
1✔
1574
                        loss = sum(horizon_losses[key]) / (prediction_samples + 1)
1✔
1575
                        total_losses[key].append(loss.detach().numpy())
1✔
1576

1577
                for key, value in self.model_def['Minimizers'].items():
1✔
1578
                    for horizon_idx in range(prediction_samples + 1):
1✔
1579
                        A[key][horizon_idx] = np.concatenate(A[key][horizon_idx])
1✔
1580
                        B[key][horizon_idx] = np.concatenate(B[key][horizon_idx])
1✔
1581
                    total_losses[key] = np.mean(total_losses[key])
1✔
1582

1583
            else:
1584
                if batch_size is None:
1✔
1585
                    batch_size = n_samples
1✔
1586

1587
                for key, value in self.model_def['Minimizers'].items():
1✔
1588
                    total_losses[key], A[key], B[key] = [], [], []
1✔
1589

1590
                for idx in range(0, (n_samples - batch_size + 1), batch_size):
1✔
1591
                    ## Build the input tensor
1592
                    XY = {key: val[idx:idx + batch_size] for key, val in data.items()}
1✔
1593

1594
                    ## Model Forward
1595
                    _, minimize_out, _, _ = self.model(XY)  ## Forward pass
1✔
1596
                    ## Loss Calculation
1597
                    for key, value in self.model_def['Minimizers'].items():
1✔
1598
                        A[key].append(minimize_out[value['A']].numpy())
1✔
1599
                        B[key].append(minimize_out[value['B']].numpy())
1✔
1600
                        loss = losses[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1601
                        loss = (loss * minimize_gain[key]) if key in minimize_gain.keys() else loss
1✔
1602
                        total_losses[key].append(loss.detach().numpy())
1✔
1603

1604
                for key, value in self.model_def['Minimizers'].items():
1✔
1605
                    A[key] = np.concatenate(A[key])
1✔
1606
                    B[key] = np.concatenate(B[key])
1✔
1607
                    total_losses[key] = np.mean(total_losses[key])
1✔
1608

1609
            for ind, (key, value) in enumerate(self.model_def['Minimizers'].items()):
1✔
1610
                A_np = np.array(A[key])
1✔
1611
                B_np = np.array(B[key])
1✔
1612
                self.performance[dataset][key] = {}
1✔
1613
                self.performance[dataset][key][value['loss']] = np.mean(total_losses[key]).item()
1✔
1614
                self.performance[dataset][key]['fvu'] = {}
1✔
1615
                # Compute FVU
1616
                residual = A_np - B_np
1✔
1617
                error_var = np.var(residual)
1✔
1618
                error_mean = np.mean(residual)
1✔
1619
                #error_var_manual = np.sum((residual-error_mean) ** 2) / (len(self.prediction['B'][ind]) - 0)
1620
                #print(f"{key} var np:{new_error_var} and var manual:{error_var_manual}")
1621
                with warnings.catch_warnings(record=True) as w:
1✔
1622
                    self.performance[dataset][key]['fvu']['A'] = (error_var / np.var(A_np)).item()
1✔
1623
                    self.performance[dataset][key]['fvu']['B'] = (error_var / np.var(B_np)).item()
1✔
1624
                    if w and np.var(A_np) == 0.0 and  np.var(B_np) == 0.0:
1✔
1625
                        self.performance[dataset][key]['fvu']['A'] = np.nan
1✔
1626
                        self.performance[dataset][key]['fvu']['B'] = np.nan
1✔
1627
                self.performance[dataset][key]['fvu']['total'] = np.mean([self.performance[dataset][key]['fvu']['A'],self.performance[dataset][key]['fvu']['B']]).item()
1✔
1628
                # Compute AIC
1629
                #normal_dist = norm(0, error_var ** 0.5)
1630
                #probability_of_residual = normal_dist.pdf(residual)
1631
                #log_likelihood_first = sum(np.log(probability_of_residual))
1632
                p1 = -len(residual)/2.0*np.log(2*np.pi)
1✔
1633
                with warnings.catch_warnings(record=True) as w:
1✔
1634
                    p2 = -len(residual)/2.0*np.log(error_var)
1✔
1635
                    p3 = -1 / (2.0 * error_var) * np.sum(residual ** 2)
1✔
1636
                    if w and p2 == np.float32(np.inf) and p3 == np.float32(-np.inf):
1✔
1637
                        p2 = p3 = 0.0
1✔
1638
                log_likelihood = p1+p2+p3
1✔
1639
                #print(f"{key} log likelihood second mode:{log_likelihood} = {p1}+{p2}+{p3} first mode: {log_likelihood_first}")
1640
                total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad) #TODO to be check the number is doubled
1✔
1641
                #print(f"{key} total_params:{total_params}")
1642
                aic = - 2 * log_likelihood + 2 * total_params
1✔
1643
                #print(f"{key} aic:{aic}")
1644
                self.performance[dataset][key]['aic'] = {'value':aic,'total_params':total_params,'log_likelihood':log_likelihood}
1✔
1645
                # Prediction and target
1646
                self.prediction[dataset][key] = {}
1✔
1647
                self.prediction[dataset][key]['A'] = A_np.tolist()
1✔
1648
                self.prediction[dataset][key]['B'] = B_np.tolist()
1✔
1649

1650
            ## Remove virtual states
1651
            for key in (connect.keys() | closed_loop.keys()):
1✔
1652
                if key in self.states.keys():
1✔
1653
                    del self.states[key]
1✔
1654

1655
            self.performance[dataset]['total'] = {}
1✔
1656
            self.performance[dataset]['total']['mean_error'] = np.mean([value for key,value in total_losses.items()])
1✔
1657
            self.performance[dataset]['total']['fvu'] = np.mean([self.performance[dataset][key]['fvu']['total'] for key in self.model_def['Minimizers'].keys()])
1✔
1658
            self.performance[dataset]['total']['aic'] = np.mean([self.performance[dataset][key]['aic']['value']for key in self.model_def['Minimizers'].keys()])
1✔
1659

1660
        self.visualizer.showResult(dataset)
1✔
1661

1662
    def getWorkspace(self):
1✔
1663
        return self.exporter.getWorkspace()
1✔
1664

1665
    def saveTorchModel(self, name = 'net', model_folder = None, models = None):
1✔
1666
        """
1667
        Saves the neural network model in PyTorch format.
1668

1669
        Parameters
1670
        ----------
1671
        name : str, optional
1672
            The name of the saved model file. Default is 'net'.
1673
        model_folder : str or None, optional
1674
            The folder to save the model file in. Default is None.
1675
        models : list or None, optional
1676
            A list of model names to save. If None, the entire model is saved. Default is None.
1677

1678
        Raises
1679
        ------
1680
        RuntimeError
1681
            If the model is not neuralized.
1682

1683
        Example
1684
        -------
1685
        Example usage:
1686
            >>> model = Modely()
1687
            >>> model.neuralizeModel()
1688
            >>> model.saveTorchModel(name='example_model', model_folder='path/to/save')
1689
        """
1690
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet!')
1✔
1691
        if models is not None:
1✔
1692
            if name == 'net':
×
1693
                name += '_' + '_'.join(models)
×
1694
            model_def = ModelDef()
×
1695
            model_def.update(model_dict = {key: self.model_dict[key] for key in models if key in self.model_dict})
×
1696
            model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
×
1697
            model_def.updateParameters(self.model)
×
1698
            model = Model(model_def.json)
×
1699
        else:
1700
            model = self.model
1✔
1701
        self.exporter.saveTorchModel(model, name, model_folder)
1✔
1702

1703
    def loadTorchModel(self, name = 'net', model_folder = None):
1✔
1704
        """
1705
        Loads a neural network model from a PyTorch format file.
1706

1707
        Parameters
1708
        ----------
1709
        name : str, optional
1710
            The name of the model file to load. Default is 'net'.
1711
        model_folder : str or None, optional
1712
            The folder to load the model file from. Default is None.
1713

1714
        Raises
1715
        ------
1716
        RuntimeError
1717
            If the model is not neuralized.
1718

1719
        Example
1720
        -------
1721
        Example usage:
1722
            >>> model = Modely()
1723
            >>> model.neuralizeModel()
1724
            >>> model.loadTorchModel(name='example_model', model_folder='path/to/load')
1725
        """
1726
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet.')
1✔
1727
        self.exporter.loadTorchModel(self.model, name, model_folder)
1✔
1728

1729
    def saveModel(self, name = 'net', model_path = None, models = None):
1✔
1730
        """
1731
        Saves the neural network model definition in a json file.
1732

1733
        Parameters
1734
        ----------
1735
        name : str, optional
1736
            The name of the saved model file. Default is 'net'.
1737
        model_path : str or None, optional
1738
            The path to save the model file. Default is None.
1739
        models : list or None, optional
1740
            A list of model names to save. If None, the entire model is saved. Default is None.
1741

1742
        Raises
1743
        ------
1744
        RuntimeError
1745
            If the network has not been defined.
1746

1747
        Example
1748
        -------
1749
        Example usage:
1750
            >>> model = Modely()
1751
            >>> model.neuralizeModel()
1752
            >>> model.saveModel(name='example_model', model_path='path/to/save')
1753
        """
1754
        if models is not None:
1✔
1755
            if name == 'net':
×
1756
                name += '_' + '_'.join(models)
×
1757
            model_def = ModelDef()
×
1758
            model_def.update(model_dict = {key: self.model_dict[key] for key in models if key in self.model_dict})
×
1759
            model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
×
1760
            model_def.updateParameters(self.model)
×
1761
        else:
1762
            model_def = self.model_def
1✔
1763
        check(model_def.isDefined(), RuntimeError, "The network has not been defined.")
1✔
1764
        self.exporter.saveModel(model_def.json, name, model_path)
1✔
1765

1766
    def loadModel(self, name = None, model_folder = None):
1✔
1767
        """
1768
        Loads a neural network model from a json file containing the model definition.
1769

1770
        Parameters
1771
        ----------
1772
        name : str or None, optional
1773
            The name of the model file to load. Default is 'net'.
1774
        model_folder : str or None, optional
1775
            The folder to load the model file from. Default is None.
1776

1777
        Raises
1778
        ------
1779
        RuntimeError
1780
            If there is an error loading the network.
1781

1782
        Example
1783
        -------
1784
        Example usage:
1785
            >>> model = Modely()
1786
            >>> model.loadModel(name='example_model', model_folder='path/to/load')
1787
        """
1788
        if name is None:
1✔
1789
            name = 'net'
1✔
1790
        model_def = self.exporter.loadModel(name, model_folder)
1✔
1791
        check(model_def, RuntimeError, "Error to load the network.")
1✔
1792
        self.model_def = ModelDef(model_def)
1✔
1793
        self.model = None
1✔
1794
        self.neuralized = False
1✔
1795
        self.traced = False
1✔
1796

1797
    def exportPythonModel(self, name = 'net', model_path = None, models = None):
1✔
1798
        """
1799
        Exports the neural network model as a standalone PyTorch Module class.
1800

1801
        Parameters
1802
        ----------
1803
        name : str, optional
1804
            The name of the exported model file. Default is 'net'.
1805
        model_path : str or None, optional
1806
            The path to save the exported model file. Default is None.
1807
        models : list or None, optional
1808
            A list of model names to export. If None, the entire model is exported. Default is None.
1809

1810
        Raises
1811
        ------
1812
        RuntimeError
1813
            If the network has not been defined.
1814
            If the model is traced and cannot be exported to Python.
1815
            If the model is not neuralized.
1816

1817
        Example
1818
        -------
1819
        Example usage:
1820
            >>> model = Modely(name='example_model')
1821
            >>> model.neuralizeModel()
1822
            >>> model.exportPythonModel(name='example_model', model_path='path/to/export')
1823
        """
1824
        if models is not None:
1✔
1825
            if name == 'net':
×
1826
                name += '_' + '_'.join(models)
×
1827
            model_def = ModelDef()
×
1828
            model_def.update(model_dict = {key: self.model_dict[key] for key in models if key in self.model_dict})
×
1829
            model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
×
1830
            model_def.updateParameters(self.model)
×
1831
            model = Model(model_def.json)
×
1832
        else:
1833
            model_def = self.model_def
1✔
1834
            model = self.model
1✔
1835
        #check(model_def['States'] == {}, TypeError, "The network has state variables. The export to python is not possible.")
1836
        check(model_def.isDefined(), RuntimeError, "The network has not been defined.")
1✔
1837
        check(self.traced == False, RuntimeError,
1✔
1838
                  'The model is traced and cannot be exported to Python.\n Run neuralizeModel() to recreate a standard model.')
1839
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet.')
1✔
1840
        self.exporter.saveModel(model_def.json, name, model_path)
1✔
1841
        self.exporter.exportPythonModel(model_def, model, name, model_path)
1✔
1842

1843
    def importPythonModel(self, name = None, model_folder = None):
1✔
1844
        """
1845
        Imports a neural network model from a standalone PyTorch Module class.
1846

1847
        Parameters
1848
        ----------
1849
        name : str or None, optional
1850
            The name of the model file to import. Default is 'net'.
1851
        model_folder : str or None, optional
1852
            The folder to import the model file from. Default is None.
1853

1854
        Raises
1855
        ------
1856
        RuntimeError
1857
            If there is an error loading the network.
1858

1859
        Example
1860
        -------
1861
        Example usage:
1862
            >>> model = Modely()
1863
            >>> model.importPythonModel(name='example_model', model_folder='path/to/import')
1864
        """
1865
        if name is None:
1✔
1866
            name = 'net'
1✔
1867
        model_def = self.exporter.loadModel(name, model_folder)
1✔
1868
        check(model_def is not None, RuntimeError, "Error to load the network.")
1✔
1869
        self.neuralizeModel(model_def=model_def)
1✔
1870
        self.model = self.exporter.importPythonModel(name, model_folder)
1✔
1871
        self.traced = True
1✔
1872
        self.model_def.updateParameters(self.model)
1✔
1873

1874
    def exportONNX(self, inputs_order=None, outputs_order=None,  models = None, name = 'net', model_folder = None):
1✔
1875
        """
1876
        Exports the neural network model to an ONNX file.
1877

1878
        -----
1879
        .. note::
1880
            The input_order must contain all the inputs and states of the model in the order that you want to export them.
1881

1882
        Parameters
1883
        ----------
1884
        inputs_order : list
1885
            The order of the input and state variables.
1886
        outputs_order : list
1887
            The order of the output variables.
1888
        models : list or None, optional
1889
            A list of model names to export. If None, the entire model is exported. Default is None.
1890
        name : str, optional
1891
            The name of the exported ONNX file. Default is 'net'.
1892
        model_folder : str or None, optional
1893
            The folder to save the exported ONNX file. Default is None.
1894

1895
        Raises
1896
        ------
1897
        RuntimeError
1898
            If the network has not been defined.
1899
            If the model is traced and cannot be exported to ONNX.
1900
            If the model is not neuralized.
1901
            If the model is loaded and not created.
1902

1903
        Example
1904
        -------
1905
        Example usage:
1906
            >>> input1 = Input('input1').last()
1907
            >>> input2 = Input('input2').last()
1908
            >>> out = Output('output1', input1+input2)
1909
 
1910
            >>> model = Modely()
1911
            >>> model.neuralizeModel()
1912
            >>> model.exportONNX(inputs_order=['input1', 'input2'], outputs_order=['output1'], name='example_model', model_folder='path/to/export')
1913
        """
1914
        check(self.model_def.isDefined(), RuntimeError, "The network has not been defined.")
1✔
1915
        check(self.traced == False, RuntimeError, 'The model is traced and cannot be exported to ONNX.\n Run neuralizeModel() to recreate a standard model.')
1✔
1916
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet.')
1✔
1917
        check(self.model_def.model_dict != {}, RuntimeError, 'The model is loaded and not created.')
1✔
1918
        model_def = ModelDef()
1✔
1919
        if models is not None:
1✔
1920
            if name == 'net':
1✔
1921
                name += '_' + '_'.join(models)
1✔
1922
            model_def.update(model_dict = {key: self.model_def.model_dict[key] for key in models if key in self.model_def.model_dict})
1✔
1923
        else:
1924
            model_def.update(model_dict = self.model_def.model_dict)
1✔
1925
        model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
1✔
1926
        model_def.updateParameters(self.model)
1✔
1927
        model = Model(model_def.json)
1✔
1928
        model.update()
1✔
1929
        self.exporter.exportONNX(model_def, model, inputs_order, outputs_order, name, model_folder)
1✔
1930

1931
    def onnxInference(self, inputs:dict, path:str):
1✔
1932
        """
1933
        Run an inference session using an onnx model previously exported using the nnodely framework. 
1934

1935
        -----
1936
        .. note:: Feed-Forward ONNX model
1937
            For feed-forward models, the onnx model expect all the inputs and states to have 3 dimensions. The first dimension is the batch size, the second is the time window and the third is the feature dimension.
1938
        .. note:: Recurrent ONNX model
1939
            For recurrent models, the onnx model expect all the inputs to have 4 dimensions. The first dimension is the prediction horizon, the second is the batch size, the third is the time window and the fourth is the feature dimension.
1940
            For recurrent models, the onnx model expect all the States to have 3 dimensions. The first dimension is the batch size, the second is the time window, the third is the feature dimension
1941

1942
        Parameters
1943
        ----------
1944
        inputs : dict
1945
            A dictionary containing the input and state variables to be used to make the inference. 
1946
            State variables are mandatory and are used to initialize the states of the model.
1947
        path : str
1948
            The path to the ONNX file to use.
1949

1950
        Raises
1951
        ------
1952
        RuntimeError
1953
            If the shape of the inputs are not equals to the ones defined in the onnx model.
1954
            If the batch size is not equal for all the inputs and states.
1955

1956
        Example
1957
        -------
1958
        feed-forward Example:
1959
            >>> x = Input('x')
1960
 
1961
            >>> onnx_model_path = path/to/net.onnx
1962
            >>> dummy_input = {'x':np.ones(shape=(3, 1, 1)).astype(np.float32)}
1963
            >>> predictions = Modely().onnxInference(dummy_input, onnx_model_path)
1964
        Recurrent Example:
1965
            >>> x = Input('x')
1966
            >>> y = State('y')
1967
 
1968
            >>> onnx_model_path = path/to/net.onnx
1969
            >>> dummy_input = {'x':np.ones(shape=(3, 1, 1, 1)).astype(np.float32)
1970
                                'y':np.ones(shape=(1, 1, 1)).astype(np.float32)}
1971
            >>> predictions = Modely().onnxInference(dummy_input, onnx_model_path)
1972
        """
1973
        return self.exporter.onnxInference(inputs, path)
1✔
1974

1975
    def exportReport(self, name = 'net', model_folder = None):
1✔
1976
        """
1977
        Generates a PDF report with plots containing the results of the training and validation of the neural network.
1978

1979
        Parameters
1980
        ----------
1981
        name : str, optional
1982
            The name of the exported report file. Default is 'net'.
1983
        model_folder : str or None, optional
1984
            The folder to save the exported report file. Default is None.
1985

1986
        Example
1987
        -------
1988
        Example usage:
1989
            >>> model = Modely()
1990
            >>> model.neuralizeModel()
1991
            >>> model.trainModel(train_dataset='train_dataset', validation_dataset='val_dataset', num_of_epochs=10)
1992
            >>> model.exportReport(name='example_model', model_folder='path/to/export')
1993
        """
1994
        self.exporter.exportReport(self, name, model_folder)
1✔
1995

1996
nnodely = Modely
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc