• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

tonegas / nnodely / 13056267505

30 Jan 2025 04:04PM UTC coverage: 94.525% (+0.6%) from 93.934%
13056267505

push

github

web-flow
Merge pull request #48 from tonegas/develop

Develop merge on main release 1.0.0

1185 of 1215 new or added lines in 21 files covered. (97.53%)

3 existing lines in 2 files now uncovered.

9426 of 9972 relevant lines covered (94.52%)

0.95 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

90.5
/nnodely/nnodely.py
1
# Extern packages
2
import random, torch, copy, os
1✔
3
import numpy as np
1✔
4
import pandas as pd
1✔
5

6
# nnodely packages
7
from nnodely.visualizer import TextVisualizer, Visualizer
1✔
8
from nnodely.loss import CustomLoss
1✔
9
from nnodely.model import Model
1✔
10
from nnodely.optimizer import Optimizer, SGD, Adam
1✔
11
from nnodely.exporter import Exporter, StandardExporter
1✔
12
from nnodely.modeldef import ModelDef
1✔
13

14
from nnodely.utils import check, argmax_dict, argmin_dict, tensor_to_list, TORCH_DTYPE, NP_DTYPE
1✔
15

16
from nnodely.logger import logging, nnLogger
1✔
17
log = nnLogger(__name__, logging.INFO)
1✔
18

19

20
class Modely:
1✔
21
    """
22
    Create the main object, the nnodely object, that will be used to create the network, train and export it.
23

24
    Parameters
25
    ----------
26
    visualizer : str, Visualizer, optional
27
        The visualizer to be used. Default is the 'Standard' visualizer.
28
    exporter : str, Exporter, optional
29
        The exporter to be used. Default is the 'Standard' exporter.
30
    seed : int, optional
31
        Set the seed for all the random modules inside the nnodely framework. Default is None.
32
    workspace : str
33
        The path of the workspace where all the exported files will be saved.
34
    log_internal : bool
35
        Whether or not save the logs. Default is False.
36
    save_history : bool
37
        Whether or not save the history. Default is False.
38

39
    Example
40
    -------
41
        >>> model = Modely()
42
    """
43
    def __init__(self,
1✔
44
                 visualizer:str|Visualizer|None = 'Standard',
45
                 exporter:str|Exporter|None = 'Standard',
46
                 seed:int|None = None,
47
                 workspace:str|None = None,
48
                 log_internal:bool = False,
49
                 save_history:bool = False):
50

51
        # Visualizer
52
        if visualizer == 'Standard':
1✔
53
            self.visualizer = TextVisualizer(1)
1✔
54
        elif visualizer != None:
1✔
55
            self.visualizer = visualizer
1✔
56
        else:
57
            self.visualizer = Visualizer()
1✔
58
        self.visualizer.set_n4m(self)
1✔
59

60
        # Exporter
61
        if exporter == 'Standard':
1✔
62
            self.exporter = StandardExporter(workspace, self.visualizer, save_history)
1✔
63
        elif exporter != None:
×
64
            self.exporter = exporter
×
65
        else:
66
            self.exporter = Exporter()
×
67

68
        ## Set the random seed for reproducibility
69
        if seed is not None:
1✔
70
            self.resetSeed(seed)
1✔
71

72
        # Save internal
73
        self.log_internal = log_internal
1✔
74
        if self.log_internal == True:
1✔
75
            self.internals = {}
1✔
76

77
        # Models definition
78
        self.model_def = ModelDef()
1✔
79
        self.input_n_samples = {}
1✔
80
        self.max_n_samples = 0
1✔
81
        self.neuralized = False
1✔
82
        self.traced = False
1✔
83
        self.model = None
1✔
84
        self.states = {}
1✔
85

86
        # Dataaset Parameters
87
        self.data_loaded = False
1✔
88
        self.file_count = 0
1✔
89
        self.num_of_samples = {}
1✔
90
        self.data = {}
1✔
91
        self.n_datasets = 0
1✔
92
        self.datasets_loaded = set()
1✔
93
        self.multifile = {}
1✔
94

95
        # Training Parameters
96
        self.standard_train_parameters = {
1✔
97
            'models' : None,
98
            'train_dataset' : None, 'validation_dataset' : None, 'test_dataset' : None, 'splits' : [70, 20, 10],
99
            'closed_loop' : {}, 'connect' : {}, 'step' : 0, 'prediction_samples' : 0,
100
            'shuffle_data' : True,
101
            'early_stopping' : None, 'early_stopping_params' : {},
102
            'select_model' : 'last', 'select_model_params' : {},
103
            'minimize_gain' : {},
104
            'num_of_epochs': 100,
105
            'train_batch_size' : 128, 'val_batch_size' : None, 'test_batch_size' : None,
106
            'optimizer' : 'Adam',
107
            'lr' : 0.001, 'lr_param' : {},
108
            'optimizer_params' : [], 'add_optimizer_params' : [],
109
            'optimizer_defaults' : {}, 'add_optimizer_defaults' : {}
110
        }
111

112
        # Optimizer
113
        self.optimizer = None
1✔
114

115
        # Training Losses
116
        self.loss_functions = {}
1✔
117

118
        # Validation Parameters
119
        self.training = {}
1✔
120
        self.performance = {}
1✔
121
        self.prediction = {}
1✔
122

123

124
    def resetSeed(self, seed):
1✔
125
        """
126
        Resets the random seed for reproducibility.
127

128
        This method sets the seed for various random number generators used in the project to ensure reproducibility of results.
129

130
        :param seed: The seed value to be used for the random number generators.
131
        :type seed: int
132

133
        Example:
134
            >>> model = nnodely()
135
            >>> model.resetSeed(42)
136
        """
137
        torch.manual_seed(seed)  ## set the pytorch seed
1✔
138
        torch.cuda.manual_seed_all(seed)
1✔
139
        random.seed(seed)  ## set the random module seed
1✔
140
        np.random.seed(seed)  ## set the numpy seed
1✔
141

142

143
    def __call__(self, inputs={}, sampled=False, closed_loop={}, connect={}, prediction_samples='auto', num_of_samples=None): ##, align_input=False):
1✔
144
        """
145
        Performs inference on the model.
146

147
        Parameters
148
        ----------
149
        inputs : dict, optional
150
            A dictionary of input data. The keys are input names and the values are the corresponding data. Default is an empty dictionary.
151
        sampled : bool, optional
152
            A boolean indicating whether the inputs are already sampled. Default is False.
153
        closed_loop : dict, optional
154
            A dictionary specifying closed loop connections. The keys are input names and the values are output names. Default is an empty dictionary.
155
        connect : dict, optional
156
            A dictionary specifying connections. The keys are input names and the values are output names. Default is an empty dictionary.
157
        prediction_samples : str or int, optional
158
            The number of prediction samples. Can be 'auto', None or an integer. Default is 'auto'.
159
        num_of_samples : str or int, optional
160
            The number of samples. Can be 'auto', None or an integer. Default is 'auto'.
161

162
        Returns
163
        -------
164
        dict
165
            A dictionary containing the model's prediction outputs.
166

167
        Raises
168
        ------
169
        RuntimeError
170
            If the network is not neuralized.
171
        ValueError
172
            If an input variable is not in the model definition or if an output variable is not in the model definition.
173

174
        Example
175
        -------
176
        Example usage:
177
            >>> model = Modely()
178
            >>> x = Input('x')
179
            >>> out = Output('out', Fir(x.last()))
180
            >>> model.addModel('example_model', [out])
181
            >>> model.neuralizeModel()
182
            >>> predictions = model(inputs={'x': [1, 2, 3]})
183
        """
184

185
        ## Copy dict for avoid python bug
186
        inputs = copy.deepcopy(inputs)
1✔
187
        closed_loop = copy.deepcopy(closed_loop)
1✔
188
        connect = copy.deepcopy(connect)
1✔
189

190
        ## Check neuralize
191
        check(self.neuralized, RuntimeError, "The network is not neuralized.")
1✔
192

193
        ## Check closed loop integrity
194
        for close_in, close_out in closed_loop.items():
1✔
195
            check(close_in in self.model_def['Inputs'], ValueError, f'the tag {close_in} is not an input variable.')
1✔
196
            check(close_out in self.model_def['Outputs'], ValueError, f'the tag {close_out} is not an output of the network')
1✔
197

198
        ## List of keys
199
        model_inputs = list(self.model_def['Inputs'].keys())
1✔
200
        model_states = list(self.model_def['States'].keys())
1✔
201
        state_closed_loop = [key for key, value in self.model_def['States'].items() if 'closedLoop' in value.keys()] + list(closed_loop.keys())
1✔
202
        state_connect = [key for key, value in self.model_def['States'].items() if 'connect' in value.keys()] + list(connect.keys())
1✔
203
        extra_inputs = list(set(list(inputs.keys())) - set(model_inputs) - set(model_states))
1✔
204
        non_mandatory_inputs = state_closed_loop + state_connect 
1✔
205
        mandatory_inputs = list(set(model_inputs) - set(non_mandatory_inputs))
1✔
206

207
        ## Remove extra inputs
208
        for key in extra_inputs:
1✔
209
            log.warning(f'The provided input {key} is not used inside the network. the inference will continue without using it')
1✔
210
            del inputs[key]
1✔
211

212
        ## Get the number of data windows for each input/state
213
        num_of_windows = {key: len(value) for key, value in inputs.items()} if sampled else {key: len(value) - self.input_n_samples[key] + 1 for key, value in inputs.items()}
1✔
214

215
        ## Get the maximum inference window
216
        if num_of_samples:
1✔
217
            window_dim = num_of_samples
1✔
218
            for key in inputs.keys():
1✔
219
                input_dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
220
                if input_dim > 1:
1✔
221
                    inputs[key] += [[0 for _ in range(input_dim)] for _ in range(num_of_samples - (len(inputs[key]) - self.input_n_samples[key] + 1))]
1✔
222
                else:
223
                    inputs[key] += [0 for _ in range(num_of_samples - (len(inputs[key]) - self.input_n_samples[key] + 1))]
1✔
224
        elif inputs:
1✔
225
            windows = []
1✔
226
            for key in inputs.keys():
1✔
227
                if key in mandatory_inputs:
1✔
228
                    n_samples = len(inputs[key]) if sampled else len(inputs[key]) - self.model_def['Inputs'][key]['ntot'] + 1
1✔
229
                    windows.append(n_samples)
1✔
230
            if not windows:
1✔
231
                for key in inputs.keys():
1✔
232
                    if key in non_mandatory_inputs:
1✔
233
                        if key in model_inputs:
1✔
234
                            n_samples = len(inputs[key]) if sampled else len(inputs[key]) - self.model_def['Inputs'][key]['ntot'] + 1
1✔
235
                        else:
236
                            n_samples = len(inputs[key]) if sampled else len(inputs[key]) - self.model_def['States'][key]['ntot'] + 1
1✔
237
                        windows.append(n_samples)
1✔
238
            window_dim = min(windows) if windows else 0
1✔
239
        else: ## No inputs
240
            window_dim = 1 if non_mandatory_inputs else 0
1✔
241
        check(window_dim > 0, StopIteration, f'Missing samples in the input window')
1✔
242

243
        if len(set(num_of_windows.values())) > 1:
1✔
244
            max_ind_key, max_dim = argmax_dict(num_of_windows)
1✔
245
            min_ind_key, min_dim = argmin_dict(num_of_windows)
1✔
246
            log.warning(f'Different number of samples between inputs [MAX {num_of_windows[max_ind_key]} = {max_dim}; MIN {num_of_windows[min_ind_key]} = {min_dim}]')
1✔
247

248
        ## Autofill the missing inputs
249
        provided_inputs = list(inputs.keys())
1✔
250
        missing_inputs = list(set(mandatory_inputs) - set(provided_inputs))
1✔
251
        if missing_inputs:
1✔
252
            log.warning(f'Inputs not provided: {missing_inputs}. Autofilling with zeros..')
1✔
253
            for key in missing_inputs:
1✔
254
                inputs[key] = np.zeros(shape=(self.input_n_samples[key] + window_dim - 1, self.model_def['Inputs'][key]['dim']),dtype=NP_DTYPE).tolist()
1✔
255

256
        ## Transform inputs in 3D Tensors
257
        for key, val in inputs.items():
1✔
258
            input_dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
259
            inputs[key] = torch.from_numpy(np.array(inputs[key])).to(TORCH_DTYPE)
1✔
260

261
            if input_dim > 1:
1✔
262
                correct_dim = 3 if sampled else 2
1✔
263
                check(len(inputs[key].shape) == correct_dim, ValueError,f'The input {key} must have {correct_dim} dimensions')
1✔
264
                check(inputs[key].shape[correct_dim - 1] == input_dim, ValueError,f'The second dimension of the input "{key}" must be equal to {input_dim}')
1✔
265

266
            if input_dim == 1 and inputs[key].shape[-1] != 1: ## add the input dimension
1✔
267
                inputs[key] = inputs[key].unsqueeze(-1)
1✔
268
            if inputs[key].ndim <= 1: ## add the batch dimension
1✔
269
                inputs[key] = inputs[key].unsqueeze(0)
1✔
270
            if inputs[key].ndim <= 2: ## add the time dimension
1✔
271
                inputs[key] = inputs[key].unsqueeze(0)
1✔
272

273
        ## initialize the resulting dictionary
274
        result_dict = {}
1✔
275
        for key in self.model_def['Outputs'].keys():
1✔
276
            result_dict[key] = []
1✔
277

278
        ## Inference
279
        with torch.inference_mode():
1✔
280
            self.model.eval()
1✔
281
            ## Update with virtual states
282
            if prediction_samples is not None:
1✔
283
                self.model.update(closed_loop=closed_loop, connect=connect)
1✔
284
            else:
285
                prediction_samples = 0
1✔
286
            X = {}
1✔
287
            count = 0
1✔
288
            first = True
1✔
289
            for idx in range(window_dim):
1✔
290
                ## Get mandatory data inputs
291
                for key in mandatory_inputs:
1✔
292
                    X[key] = inputs[key][idx:idx+1] if sampled else inputs[key][:, idx:idx + self.input_n_samples[key]]
1✔
293
                ## reset states
294
                if count == 0 or prediction_samples=='auto':
1✔
295
                    count = prediction_samples
1✔
296
                    for key in non_mandatory_inputs: ## Get non mandatory data (from inputs, from states, or with zeros)
1✔
297
                        ## if prediction_samples is 'auto' and i have enough samples
298
                        ## if prediction_samples is NOT 'auto' but i have enough extended window (with zeros)
299
                        if (key in inputs.keys() and prediction_samples == 'auto' and idx < num_of_windows[key]) or (key in inputs.keys() and prediction_samples != 'auto' and idx < inputs[key].shape[1]):
1✔
300
                            X[key] = inputs[key][idx:idx+1].clone() if sampled else inputs[key][:, idx:idx + self.input_n_samples[key]].clone()
1✔
301
                        ## if im in the first reset
302
                        ## if i have a state in memory
303
                        ## if i have prediction_samples = 'auto' and not enough samples
304
                        elif (key in self.states.keys() and (first or prediction_samples == 'auto')) and (prediction_samples == 'auto' or prediction_samples == None):
1✔
305
                            X[key] = self.states[key]
1✔
306
                        else: ## if i have no samples and no states
307
                            window_size = self.input_n_samples[key]
1✔
308
                            dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
309
                            X[key] = torch.zeros(size=(1, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
310
                            self.states[key] = X[key]
1✔
311
                    first = False
1✔
312
                else:
313
                    count -= 1
1✔
314
                ## Forward pass
315
                result, _, out_closed_loop, out_connect = self.model(X)
1✔
316

317
                ## Append the prediction of the current sample to the result dictionary
318
                for key in self.model_def['Outputs'].keys():
1✔
319
                    if result[key].shape[-1] == 1:
1✔
320
                        result[key] = result[key].squeeze(-1)
1✔
321
                        if result[key].shape[-1] == 1:
1✔
322
                            result[key] = result[key].squeeze(-1)
1✔
323
                    result_dict[key].append(result[key].detach().squeeze(dim=0).tolist())
1✔
324

325
                ## Update closed_loop and connect
326
                if prediction_samples:
1✔
327
                    for key, val in out_closed_loop.items():
1✔
328
                        shift = val.shape[1]  ## take the output time dimension
1✔
329
                        X[key] = torch.roll(X[key], shifts=-1, dims=1) ## Roll the time window
1✔
330
                        X[key][:, -shift:, :] = val ## substitute with the predicted value
1✔
331
                        self.states[key] = X[key]
1✔
332
                    for key, val in out_connect.items():
1✔
333
                        X[key] = val
1✔
334
                        self.states[key] = X[key]
1✔
335

336
        ## Remove virtual states
337
        for key in (connect.keys() | closed_loop.keys()):
1✔
338
            if key in self.states.keys():
1✔
339
                del self.states[key]
1✔
340
        
341
        return result_dict
1✔
342

343
    def getSamples(self, dataset, index = None, window=1):
1✔
344
        """
345
        Retrieves a window of samples from a given dataset.
346

347
        Parameters
348
        ----------
349
        dataset : str
350
            The name of the dataset to retrieve samples from.
351
        index : int, optional
352
            The starting index of the samples. If None, a random index is chosen. Default is None.
353
        window : int, optional
354
            The number of consecutive samples to retrieve. Default is 1.
355

356
        Returns
357
        -------
358
        dict
359
            A dictionary containing the retrieved samples. The keys are input and state names, and the values are lists of samples.
360

361
        Raises
362
        ------
363
        ValueError
364
            If the dataset is not loaded.
365

366
        Example
367
        -------
368
        Example usage:
369
            >>> model = Modely()
370
            >>> model.loadData('dataset_name')
371
            >>> samples = model.getSamples('dataset_name', index=10, window=5)
372
        """
373
        if index is None:
1✔
374
            index = random.randint(0, self.num_of_samples[dataset] - window)
1✔
375
        check(self.data_loaded, ValueError, 'The Dataset must first be loaded using <loadData> function!')
1✔
376
        if self.data_loaded:
1✔
377
            result_dict = {}
1✔
378
            for key in (self.model_def['Inputs'].keys() | self.model_def['States'].keys()):
1✔
379
                result_dict[key] = []
1✔
380
            for idx in range(window):
1✔
381
                for key ,samples in self.data[dataset].items():
1✔
382
                    if key in (self.model_def['Inputs'].keys() | self.model_def['States'].keys()):
1✔
383
                        result_dict[key].append(samples[index+idx])
1✔
384
            return result_dict
1✔
385

386
    def addConnect(self, stream_out, state_list_in):
1✔
387
        """
388
        Adds a connection from a relation stream to an input state.
389

390
        Parameters
391
        ----------
392
        stream_out : Stream
393
            The relation stream to connect from.
394
        state_list_in : list of State
395
            The list of input states to connect to.
396

397
        Example
398
        -------
399
        Example usage:
400
            >>> model = Modely()
401
            >>> x = Input('x')
402
            >>> y = State('y')
403
            >>> relation = Fir(x.last())
404
            >>> model.addConnect(relation, y)
405
        """
406
        self.model_def.addConnect(stream_out, state_list_in)
1✔
407

408
    def addClosedLoop(self, stream_out, state_list_in):
1✔
409
        """
410
        Adds a closed loop connection from a relation stream to an input state.
411

412
        Parameters
413
        ----------
414
        stream_out : Stream
415
            The relation stream to connect from.
416
        state_list_in : list of State
417
            The list of input states to connect to.
418

419
        Example
420
        -------
421
        Example usage:
422
            >>> model = Modely()
423
            >>> x = Input('x')
424
            >>> y = State('y')
425
            >>> relation = Fir(x.last())
426
            >>> model.addClosedLoop(relation, y)
427
        """
428
        self.model_def.addClosedLoop(stream_out, state_list_in)
1✔
429

430
    def addModel(self, name, stream_list):
1✔
431
        """
432
        Adds a new model with the given name along with a list of Outputs.
433

434
        Parameters
435
        ----------
436
        name : str
437
            The name of the model.
438
        stream_list : list of Stream
439
            The list of Outputs stream in the model.
440

441
        Example
442
        -------
443
        Example usage:
444
            >>> model = Modely()
445
            >>> x = Input('x')
446
            >>> out = Output('out', Fir(x.last()))
447
            >>> model.addModel('example_model', [out])
448
        """
449
        self.model_def.addModel(name, stream_list)
1✔
450

451
    def removeModel(self, name_list):
1✔
452
        """
453
        Removes models with the given list of names.
454

455
        Parameters
456
        ----------
457
        name_list : list of str
458
            The list of model names to remove.
459

460
        Example
461
        -------
462
        Example usage:
463
            >>> model.removeModel(['sub_model1', 'sub_model2'])
464
        """
465
        self.model_def.removeModel(name_list)
×
466

467
    def addMinimize(self, name, streamA, streamB, loss_function='mse'):
1✔
468
        """
469
        Adds a minimize loss function to the model.
470

471
        Parameters
472
        ----------
473
        name : str
474
            The name of the cost function.
475
        streamA : Stream
476
            The first relation stream for the minimize operation.
477
        streamB : Stream
478
            The second relation stream for the minimize operation.
479
        loss_function : str, optional
480
            The loss function to use from the ones provided. Default is 'mse'.
481

482
        Example
483
        -------
484
        Example usage:
485
            >>> model.addMinimize('minimize_op', streamA, streamB, loss_function='mse')
486
        """
487
        self.model_def.addMinimize(name, streamA, streamB, loss_function)
1✔
488
        self.visualizer.showaddMinimize(name)
1✔
489

490
    def removeMinimize(self, name_list):
1✔
491
        """
492
        Removes minimize loss functions using the given list of names.
493

494
        Parameters
495
        ----------
496
        name_list : list of str
497
            The list of minimize operation names to remove.
498

499
        Example
500
        -------
501
        Example usage:
502
            >>> model.removeMinimize(['minimize_op1', 'minimize_op2'])
503
        """
504
        self.model_def.removeMinimize(name_list)
1✔
505

506
    def resetStates(self, states=[], batch=1):
1✔
507
        if states: ## reset only specific states
1✔
508
            for key in states:
1✔
509
                window_size = self.input_n_samples[key]
1✔
510
                dim = self.model_def['States'][key]['dim']
1✔
511
                self.states[key] = torch.zeros(size=(batch, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
512
        else: ## reset all states
513
            self.states = {}
1✔
514
            for key, state in self.model_def['States'].items():
1✔
515
                window_size = self.input_n_samples[key]
1✔
516
                dim = state['dim']
1✔
517
                self.states[key] = torch.zeros(size=(batch, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
518

519
    def neuralizeModel(self, sample_time = None, clear_model = False, model_def = None):
1✔
520
        """
521
        Neuralizes the model, preparing it for inference and training. This method creates a neural network model starting from the model definition.
522
        It will also create all the time windows for the inputs and states.
523

524
        Parameters
525
        ----------
526
        sample_time : float or None, optional
527
            The sample time for the model. Default is None.
528
        clear_model : bool, optional
529
            Whether to clear the existing model definition. Default is False.
530
        model_def : dict or None, optional
531
            A dictionary defining the model. If provided, it overrides the existing model definition. Default is None.
532

533
        Raises
534
        ------
535
        ValueError
536
            If sample_time is not None and model_def is provided.
537
            If clear_model is True and model_def is provided.
538

539
        Example
540
        -------
541
        Example usage:
542
            >>> model = Modely(name='example_model')
543
            >>> model.neuralizeModel(sample_time=0.1, clear_model=True)
544
        """
545
        if model_def is not None:
1✔
546
            check(sample_time == None, ValueError, 'The sample_time must be None if a model_def is provided')
1✔
547
            check(clear_model == False, ValueError, 'The clear_model must be False if a model_def is provided')
1✔
548
            self.model_def = ModelDef(model_def)
1✔
549
        else:
550
            if clear_model:
1✔
551
                self.model_def.update()
1✔
552
            else:
553
                self.model_def.updateParameters(self.model)
1✔
554

555
        self.model_def.setBuildWindow(sample_time)
1✔
556
        self.model = Model(self.model_def.json)
1✔
557

558
        input_ns_backward = {key:value['ns'][0] for key, value in (self.model_def['Inputs']|self.model_def['States']).items()}
1✔
559
        input_ns_forward = {key:value['ns'][1] for key, value in (self.model_def['Inputs']|self.model_def['States']).items()}
1✔
560
        self.input_n_samples = {}
1✔
561
        for key, value in (self.model_def['Inputs'] | self.model_def['States']).items():
1✔
562
            self.input_n_samples[key] = input_ns_backward[key] + input_ns_forward[key]
1✔
563
        self.max_n_samples = max(input_ns_backward.values()) + max(input_ns_forward.values())
1✔
564

565
        ## Initialize States 
566
        self.resetStates()
1✔
567

568
        self.neuralized = True
1✔
569
        self.traced = False
1✔
570
        self.visualizer.showModel(self.model_def.json)
1✔
571
        self.visualizer.showModelInputWindow()
1✔
572
        self.visualizer.showBuiltModel()
1✔
573

574
    def loadData(self, name, source, format=None, skiplines=0, delimiter=',', header=None):
1✔
575
        """
576
        Loads data into the model. The data can be loaded from a directory path containing the csv files or from a crafted dataset.
577

578
        Parameters
579
        ----------
580
        name : str
581
            The name of the dataset.
582
        source : str or list
583
            The source of the data. Can be a directory path containing the csv files or a list of custom data.
584
        format : list or None, optional
585
            The format of the data. When loading multiple csv files the format parameter will define how to read each column of the file. Default is None.
586
        skiplines : int, optional
587
            The number of lines to skip at the beginning of the file. Default is 0.
588
        delimiter : str, optional
589
            The delimiter used in the data files. Default is ','.
590
        header : list or None, optional
591
            The header of the data files. Default is None.
592

593
        Raises
594
        ------
595
        ValueError
596
            If the network is not neuralized.
597
            If the delimiter is not valid.
598

599
        Example
600
        -------
601
        Example - load data from files:
602
            >>> x = Input('x')
603
            >>> y = Input('y')
604
            >>> out = Output('out',Fir(x.tw(0.05)))
605
            >>> test = Modely(visualizer=None)
606
            >>> test.addModel('example_model', out)
607
            >>> test.neuralizeModel(0.01)
608
            >>> data_struct = ['x', '', 'y']
609
            >>> test.loadData(name='example_dataset', source='path/to/data', format=data_struct)
610

611
        Example - load data from a crafted dataset:
612
            >>> x = Input('x')
613
            >>> y = Input('y')
614
            >>> out = Output('out',Fir(x.tw(0.05)))
615
            >>> test = Modely(visualizer=None)
616
            >>> test.addModel('example_model', out)
617
            >>> test.neuralizeModel(0.01)
618
            >>> data_x = np.array(range(10))
619
            >>> dataset = {'x': data_x, 'y': (2*data_x)}
620
            >>> test.loadData(name='example_dataset',source=dataset)
621
        """
622
        check(self.neuralized, ValueError, "The network is not neuralized.")
1✔
623
        check(delimiter in ['\t', '\n', ';', ',', ' '], ValueError, 'delimiter not valid!')
1✔
624

625
        json_inputs = self.model_def['Inputs'] | self.model_def['States']
1✔
626
        model_inputs = list(json_inputs.keys())
1✔
627
        ## Initialize the dictionary containing the data
628
        if name in list(self.data.keys()):
1✔
629
            log.warning(f'Dataset named {name} already loaded! overriding the existing one..')
1✔
630
        self.data[name] = {}
1✔
631

632
        input_ns_backward = {key:value['ns'][0] for key, value in json_inputs.items()}
1✔
633
        input_ns_forward = {key:value['ns'][1] for key, value in json_inputs.items()}
1✔
634
        max_samples_backward = max(input_ns_backward.values())
1✔
635
        max_samples_forward = max(input_ns_forward.values())
1✔
636
        max_n_samples = max_samples_backward + max_samples_forward
1✔
637

638
        num_of_samples = {}
1✔
639
        if type(source) is str: ## we have a directory path containing the files
1✔
640
            ## collect column indexes
641
            format_idx = {}
1✔
642
            idx = 0
1✔
643
            for item in format:
1✔
644
                if isinstance(item, tuple):
1✔
645
                    for key in item:
×
646
                        if key not in model_inputs:
×
647
                            idx += 1
×
648
                            break
×
649
                        n_cols = json_inputs[key]['dim']
×
650
                        format_idx[key] = (idx, idx+n_cols)
×
651
                    idx += n_cols
×
652
                else:
653
                    if item not in model_inputs:
1✔
654
                        idx += 1
1✔
655
                        continue
1✔
656
                    n_cols = json_inputs[item]['dim']
1✔
657
                    format_idx[item] = (idx, idx+n_cols)
1✔
658
                    idx += n_cols
1✔
659

660
            ## Initialize each input key
661
            for key in format_idx.keys():
1✔
662
                self.data[name][key] = []
1✔
663

664
            ## obtain the file names
665
            try:
1✔
666
                _,_,files = next(os.walk(source))
1✔
667
                files.sort()
1✔
668
            except StopIteration as e:
×
669
                check(False,StopIteration, f'ERROR: The path "{source}" does not exist!')
×
670
                return
×
671
            self.file_count = len(files)
1✔
672
            if self.file_count > 1: ## Multifile
1✔
673
                self.multifile[name] = []
1✔
674

675
            ## Cycle through all the files
676
            for file in files:
1✔
677
                try:
1✔
678
                    ## read the csv
679
                    df = pd.read_csv(os.path.join(source,file), skiprows=skiplines, delimiter=delimiter, header=header)
1✔
680
                except:
×
681
                    log.warning(f'Cannot read file {os.path.join(source,file)}')
×
682
                    continue
×
683
                if self.file_count > 1:
1✔
684
                    self.multifile[name].append((self.multifile[name][-1] + (len(df) - max_n_samples + 1)) if self.multifile[name] else len(df) - max_n_samples + 1)
1✔
685
                ## Cycle through all the windows
686
                for key, idxs in format_idx.items():
1✔
687
                    back, forw = input_ns_backward[key], input_ns_forward[key]
1✔
688
                    ## Save as numpy array the data
689
                    data = df.iloc[:, idxs[0]:idxs[1]].to_numpy()
1✔
690
                    self.data[name][key] += [data[i-back:i+forw] for i in range(max_samples_backward, len(df)-max_samples_forward+1)]
1✔
691

692
            ## Stack the files
693
            for key in format_idx.keys():
1✔
694
                self.data[name][key] = np.stack(self.data[name][key])
1✔
695
                num_of_samples[key] = self.data[name][key].shape[0]
1✔
696

697
        elif type(source) is dict:  ## we have a crafted dataset
1✔
698
            self.file_count = 1
1✔
699

700
            ## Check if the inputs are correct
701
            #assert set(model_inputs).issubset(source.keys()), f'The dataset is missing some inputs. Inputs needed for the model: {model_inputs}'
702

703
            # Merge a list of
704
            for key in model_inputs:
1✔
705
                if key not in source.keys():
1✔
706
                    continue
1✔
707

708
                self.data[name][key] = []  ## Initialize the dataset
1✔
709

710
                back, forw = input_ns_backward[key], input_ns_forward[key]
1✔
711
                for idx in range(len(source[key]) - max_n_samples+1):
1✔
712
                    self.data[name][key].append(source[key][idx + (max_samples_backward - back):idx + (max_samples_backward + forw)])
1✔
713

714
            ## Stack the files
715
            for key in model_inputs:
1✔
716
                if key not in source.keys():
1✔
717
                    continue
1✔
718
                self.data[name][key] = np.stack(self.data[name][key])
1✔
719
                if self.data[name][key].ndim == 2: ## Add the sample dimension
1✔
720
                    self.data[name][key] = np.expand_dims(self.data[name][key], axis=-1)
1✔
721
                if self.data[name][key].ndim > 3:
1✔
722
                    self.data[name][key] = np.squeeze(self.data[name][key], axis=1)
×
723
                num_of_samples[key] = self.data[name][key].shape[0]
1✔
724

725
        # Check dim of the samples
726
        check(len(set(num_of_samples.values())) == 1, ValueError,
1✔
727
              f"The number of the sample of the dataset {name} are not the same for all input in the dataset: {num_of_samples}")
728
        self.num_of_samples[name] = num_of_samples[list(num_of_samples.keys())[0]]
1✔
729

730
        ## Set the Loaded flag to True
731
        self.data_loaded = True
1✔
732
        ## Update the number of datasets loaded
733
        self.n_datasets = len(self.data.keys())
1✔
734
        self.datasets_loaded.add(name)
1✔
735
        ## Show the dataset
736
        self.visualizer.showDataset(name=name)
1✔
737

738
    def filterData(self, filter_function, dataset_name = None):
1✔
739
        """
740
        Filters the data in the dataset using the provided filter function.
741

742
        Parameters
743
        ----------
744
        filter_function : Callable
745
            A function that takes a sample as input and returns True if the sample should be kept, and False if it should be removed.
746
        dataset_name : str or None, optional
747
            The name of the dataset to filter. If None, all datasets are filtered. Default is None.
748

749
        Example
750
        -------
751
        Example usage:
752
            >>> model = Modely()
753
            >>> model.loadData('dataset_name', 'path/to/data')
754
            >>> def filter_fn(sample):
755
            >>>     return sample['input1'] > 0
756
            >>> model.filterData(filter_fn, 'dataset_name')
757
        """
758
        idx_to_remove = []
×
759
        if dataset_name is None:
×
760
            for name in self.data.keys():
×
761
                dataset = self.data[name]
×
762
                n_samples = len(dataset[list(dataset.keys())[0]])
×
763

764
                data_for_filter = []
×
765
                for i in range(n_samples):
×
766
                    new_sample = {key: val[i] for key, val in dataset.items()}
×
767
                    data_for_filter.append(new_sample)
×
768

769
                for idx, sample in enumerate(data_for_filter):
×
770
                    if not filter_function(sample):
×
771
                        idx_to_remove.append(idx)
×
772

773
                for key in self.data[name].keys():
×
774
                    self.data[name][key] = np.delete(self.data[name][key], idx_to_remove, axis=0)
×
775
                    self.num_of_samples[name] = self.data[name][key].shape[0]
×
776
                self.visualizer.showDataset(name=name)
×
777

778
        else:
779
            dataset = self.data[dataset_name]
×
780
            n_samples = len(dataset[list(dataset.keys())[0]])
×
781

782
            data_for_filter = []
×
783
            for i in range(n_samples):
×
784
                new_sample = {key: val[i] for key, val in dataset.items()}
×
785
                data_for_filter.append(new_sample)
×
786

787
            for idx, sample in enumerate(data_for_filter):
×
788
                if not filter_function(sample):
×
789
                    idx_to_remove.append(idx)
×
790

791
            for key in self.data[dataset_name].keys():
×
792
                self.data[dataset_name][key] = np.delete(self.data[dataset_name][key], idx_to_remove, axis=0)
×
793
                self.num_of_samples[dataset_name] = self.data[dataset_name][key].shape[0]
×
794
            self.visualizer.showDataset(name=dataset_name)
×
795

796
    def __save_internal(self, key, value):
1✔
797
        self.internals[key] = tensor_to_list(value)
1✔
798

799
    def __get_train_parameters(self, training_params):
1✔
800
        run_train_parameters = copy.deepcopy(self.standard_train_parameters)
1✔
801
        if training_params is None:
1✔
802
            return run_train_parameters
1✔
803
        for key, value in training_params.items():
1✔
804
            check(key in run_train_parameters, KeyError, f"The param {key} is not exist as standard parameters")
1✔
805
            run_train_parameters[key] = value
1✔
806
        return run_train_parameters
1✔
807

808
    def __get_parameter(self, **parameter):
1✔
809
        assert len(parameter) == 1
1✔
810
        name = list(parameter.keys())[0]
1✔
811
        self.run_training_params[name] =  parameter[name] if parameter[name] is not None else self.run_training_params[name]
1✔
812
        return self.run_training_params[name]
1✔
813

814
    def __get_batch_sizes(self, train_batch_size, val_batch_size, test_batch_size):
1✔
815
        ## Check if the batch_size can be used for the current dataset, otherwise set the batch_size to the maximum value
816
        self.__get_parameter(train_batch_size = train_batch_size)
1✔
817
        self.__get_parameter(val_batch_size = val_batch_size)
1✔
818
        self.__get_parameter(test_batch_size = test_batch_size)
1✔
819

820
        if self.run_training_params['recurrent_train']:
1✔
821
            if self.run_training_params['train_batch_size'] > self.run_training_params['n_samples_train']:
1✔
822
                self.run_training_params['train_batch_size'] = self.run_training_params['n_samples_train'] - self.run_training_params['prediction_samples']
1✔
823
            if self.run_training_params['val_batch_size'] is None or self.run_training_params['val_batch_size'] > self.run_training_params['n_samples_val']:
1✔
824
                self.run_training_params['val_batch_size'] = max(0,self.run_training_params['n_samples_val'] - self.run_training_params['prediction_samples'])
1✔
825
            if self.run_training_params['test_batch_size'] is None or self.run_training_params['test_batch_size'] > self.run_training_params['n_samples_test']:
1✔
826
                self.run_training_params['test_batch_size'] = max(0,self.run_training_params['n_samples_test'] - self.run_training_params['prediction_samples'])
1✔
827
        else:
828
            if self.run_training_params['train_batch_size'] > self.run_training_params['n_samples_train']:
1✔
829
                self.run_training_params['train_batch_size'] = self.run_training_params['n_samples_train']
1✔
830
            if self.run_training_params['val_batch_size'] is None or self.run_training_params['val_batch_size'] > self.run_training_params['n_samples_val']:
1✔
831
                self.run_training_params['val_batch_size'] = self.run_training_params['n_samples_val']
1✔
832
            if self.run_training_params['test_batch_size'] is None or self.run_training_params['test_batch_size'] > self.run_training_params['n_samples_test']:
1✔
833
                self.run_training_params['test_batch_size'] = self.run_training_params['n_samples_test']
1✔
834

835
        check(self.run_training_params['train_batch_size'] > 0, ValueError, f'The auto train_batch_size ({self.run_training_params["train_batch_size"] }) = n_samples_train ({self.run_training_params["n_samples_train"]}) - prediction_samples ({self.run_training_params["prediction_samples"]}), must be greater than 0.')
1✔
836

837
        return self.run_training_params['train_batch_size'], self.run_training_params['val_batch_size'], self.run_training_params['test_batch_size']
1✔
838

839
    def __inizilize_optimizer(self, optimizer, optimizer_params, optimizer_defaults, add_optimizer_params, add_optimizer_defaults, models, lr, lr_param):
1✔
840
        # Get optimizer and initialization parameters
841
        optimizer = copy.deepcopy(self.__get_parameter(optimizer=optimizer))
1✔
842
        optimizer_params = copy.deepcopy(self.__get_parameter(optimizer_params=optimizer_params))
1✔
843
        optimizer_defaults = copy.deepcopy(self.__get_parameter(optimizer_defaults=optimizer_defaults))
1✔
844
        add_optimizer_params = copy.deepcopy(self.__get_parameter(add_optimizer_params=add_optimizer_params))
1✔
845
        add_optimizer_defaults = copy.deepcopy(self.__get_parameter(add_optimizer_defaults=add_optimizer_defaults))
1✔
846

847
        ## Get parameter to be trained
848
        json_models = []
1✔
849
        models = self.__get_parameter(models=models)
1✔
850
        if 'Models' in self.model_def:
1✔
851
            json_models = list(self.model_def['Models'].keys()) if type(self.model_def['Models']) is dict else [self.model_def['Models']]
1✔
852
        if models is None:
1✔
853
            models = json_models
1✔
854
        self.run_training_params['models'] = models
1✔
855
        params_to_train = set()
1✔
856
        if isinstance(models, str):
1✔
857
            models = [models]
1✔
858
        for model in models:
1✔
859
            check(model in json_models, ValueError, f'The model {model} is not in the model definition')
1✔
860
            if type(self.model_def['Models']) is dict:
1✔
861
                params_to_train |= set(self.model_def['Models'][model]['Parameters'])
1✔
862
            else:
863
                params_to_train |= set(self.model_def['Parameters'].keys())
1✔
864

865
        # Get the optimizer
866
        if type(optimizer) is str:
1✔
867
            if optimizer == 'SGD':
1✔
868
                optimizer = SGD({},[])
1✔
869
            elif optimizer == 'Adam':
1✔
870
                optimizer = Adam({},[])
1✔
871
        else:
872
            check(issubclass(type(optimizer), Optimizer), TypeError,
1✔
873
                  "The optimizer must be an Optimizer or str")
874

875
        optimizer.set_params_to_train(self.model.all_parameters, params_to_train)
1✔
876

877
        optimizer.add_defaults('lr', self.run_training_params['lr'])
1✔
878
        optimizer.add_option_to_params('lr', self.run_training_params['lr_param'])
1✔
879

880
        if optimizer_defaults != {}:
1✔
881
            optimizer.set_defaults(optimizer_defaults)
1✔
882
        if optimizer_params != []:
1✔
883
            optimizer.set_params(optimizer_params)
1✔
884

885
        for key, value in add_optimizer_defaults.items():
1✔
886
            optimizer.add_defaults(key, value)
1✔
887

888
        add_optimizer_params = optimizer.unfold(add_optimizer_params)
1✔
889
        for param in add_optimizer_params:
1✔
890
            par = param['params']
1✔
891
            del param['params']
1✔
892
            for key, value in param.items():
1✔
893
                optimizer.add_option_to_params(key, {par:value})
1✔
894

895
        # Modify the parameter
896
        optimizer.add_defaults('lr', lr)
1✔
897
        optimizer.add_option_to_params('lr', lr_param)
1✔
898

899
        return optimizer
1✔
900

901
    def trainModel(self,
1✔
902
                    models=None,
903
                    train_dataset = None, validation_dataset = None, test_dataset = None, splits = None,
904
                    closed_loop = None, connect = None, step = None, prediction_samples = None,
905
                    shuffle_data = None,
906
                    early_stopping = None, early_stopping_params = None,
907
                    select_model = None, select_model_params = None,
908
                    minimize_gain = None,
909
                    num_of_epochs = None,
910
                    train_batch_size = None, val_batch_size = None, test_batch_size = None,
911
                    optimizer = None,
912
                    lr = None, lr_param = None,
913
                    optimizer_params = None, optimizer_defaults = None,
914
                    training_params = None,
915
                    add_optimizer_params = None, add_optimizer_defaults = None
916
                   ):
917
        """
918
        Trains the model using the provided datasets and parameters.
919

920
        Parameters
921
        ----------
922
        models : list or None, optional
923
            A list of models to train. Default is None.
924
        train_dataset : str or None, optional
925
            The name of the training dataset. Default is None.
926
        validation_dataset : str or None, optional
927
            The name of the validation dataset. Default is None.
928
        test_dataset : str or None, optional
929
            The name of the test dataset. Default is None.
930
        splits : list or None, optional
931
            A list of 3 elements specifying the percentage of splits for training, validation, and testing. The three elements must sum up to 100!
932
            The parameter splits is only used when there is only 1 dataset loaded. Default is None.
933
        closed_loop : dict or None, optional
934
            A dictionary specifying closed loop connections. The keys are input names and the values are output names. Default is None.
935
        connect : dict or None, optional
936
            A dictionary specifying connections. The keys are input names and the values are output names. Default is None.
937
        step : int or None, optional
938
            The step size for training. A big value will result in less data used for each epochs and a faster train. Default is None.
939
        prediction_samples : int or None, optional
940
            The size of the prediction horizon. Number of samples at each recurrent window Default is None.
941
        shuffle_data : bool or None, optional
942
            Whether to shuffle the data during training. Default is None.
943
        early_stopping : Callable or None, optional
944
            A callable for early stopping. Default is None.
945
        early_stopping_params : dict or None, optional
946
            A dictionary of parameters for early stopping. Default is None.
947
        select_model : Callable or None, optional
948
            A callable for selecting the best model. Default is None.
949
        select_model_params : dict or None, optional
950
            A dictionary of parameters for selecting the best model. Default is None.
951
        minimize_gain : dict or None, optional
952
            A dictionary specifying the gain for each minimization loss function. Default is None.
953
        num_of_epochs : int or None, optional
954
            The number of epochs to train the model. Default is None.
955
        train_batch_size : int or None, optional
956
            The batch size for training. Default is None.
957
        val_batch_size : int or None, optional
958
            The batch size for validation. Default is None.
959
        test_batch_size : int or None, optional
960
            The batch size for testing. Default is None.
961
        optimizer : Optimizer or None, optional
962
            The optimizer to use for training. Default is None.
963
        lr : float or None, optional
964
            The learning rate. Default is None.
965
        lr_param : dict or None, optional
966
            A dictionary of learning rate parameters. Default is None.
967
        optimizer_params : dict or None, optional
968
            A dictionary of optimizer parameters. Default is None.
969
        optimizer_defaults : dict or None, optional
970
            A dictionary of default optimizer settings. Default is None.
971
        training_params : dict or None, optional
972
            A dictionary of training parameters. Default is None.
973
        add_optimizer_params : dict or None, optional
974
            Additional optimizer parameters. Default is None.
975
        add_optimizer_defaults : dict or None, optional
976
            Additional default optimizer settings. Default is None.
977

978
        Raises
979
        ------
980
        RuntimeError
981
            If no data is loaded or if there are no modules with learnable parameters.
982
        KeyError
983
            If the sample horizon is not positive.
984
        ValueError
985
            If an input or output variable is not in the model definition.
986

987
        Example
988
        -------
989
        Example - basic feed-forward training:
990
            >>> x = Input('x')
991
            >>> F = Input('F')
992

993
            >>> xk1 = Output('x[k+1]', Fir()(x.tw(0.2))+Fir()(F.last()))
994

995
            >>> mass_spring_damper = Modely(seed=0)
996
            >>> mass_spring_damper.addModel('xk1',xk1)
997
            >>> mass_spring_damper.neuralizeModel(sample_time = 0.05) 
998

999
            >>> data_struct = ['time','x','dx','F']
1000
            >>> data_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),'dataset','data')
1001
            >>> mass_spring_damper.loadData(name='mass_spring_dataset', source=data_folder, format=data_struct, delimiter=';')
1002

1003
            >>> params = {'num_of_epochs': 100,'train_batch_size': 128,'lr':0.001}
1004
            >>> mass_spring_damper.trainModel(splits=[70,20,10], training_params = params)
1005

1006
        Example - recurrent training:
1007
            >>> x = Input('x')
1008
            >>> F = Input('F')
1009

1010
            >>> xk1 = Output('x[k+1]', Fir()(x.tw(0.2))+Fir()(F.last()))
1011

1012
            >>> mass_spring_damper = Modely(seed=0)
1013
            >>> mass_spring_damper.addModel('xk1',xk1)
1014
            >>> mass_spring_damper.addClosedLoop(xk1, x)
1015
            >>> mass_spring_damper.neuralizeModel(sample_time = 0.05) 
1016

1017
            >>> data_struct = ['time','x','dx','F']
1018
            >>> data_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),'dataset','data')
1019
            >>> mass_spring_damper.loadData(name='mass_spring_dataset', source=data_folder, format=data_struct, delimiter=';')
1020

1021
            >>> params = {'num_of_epochs': 100,'train_batch_size': 128,'lr':0.001}
1022
            >>> mass_spring_damper.trainModel(splits=[70,20,10], prediction_samples=10, training_params = params)
1023
        """
1024
        check(self.data_loaded, RuntimeError, 'There is no data loaded! The Training will stop.')
1✔
1025
        check(list(self.model.parameters()), RuntimeError, 'There are no modules with learnable parameters! The Training will stop.')
1✔
1026

1027
        ## Get running parameter from dict
1028
        self.run_training_params = copy.deepcopy(self.__get_train_parameters(training_params))
1✔
1029

1030
        ## Get connect and closed_loop
1031
        prediction_samples = self.__get_parameter(prediction_samples = prediction_samples)
1✔
1032
        check(prediction_samples >= 0, KeyError, 'The sample horizon must be positive!')
1✔
1033

1034
        ## Check close loop and connect
1035
        step = self.__get_parameter(step = step)
1✔
1036
        closed_loop = self.__get_parameter(closed_loop = closed_loop)
1✔
1037
        connect = self.__get_parameter(connect = connect)
1✔
1038
        recurrent_train = True
1✔
1039
        if closed_loop:
1✔
1040
            for input, output in closed_loop.items():
1✔
1041
                check(input in self.model_def['Inputs'], ValueError, f'the tag {input} is not an input variable.')
1✔
1042
                check(output in self.model_def['Outputs'], ValueError, f'the tag {output} is not an output of the network')
1✔
1043
                log.warning(f'Recurrent train: closing the loop between the the input ports {input} and the output ports {output} for {prediction_samples} samples')
1✔
1044
        elif connect:
1✔
1045
            for connect_in, connect_out in connect.items():
1✔
1046
                check(connect_in in self.model_def['Inputs'], ValueError, f'the tag {connect_in} is not an input variable.')
1✔
1047
                check(connect_out in self.model_def['Outputs'], ValueError, f'the tag {connect_out} is not an output of the network')
1✔
1048
                log.warning(f'Recurrent train: connecting the input ports {connect_in} with output ports {connect_out} for {prediction_samples} samples')
1✔
1049
        elif self.model_def['States']: ## if we have state variables we have to do the recurrent train
1✔
1050
            log.warning(f"Recurrent train: update States variables {list(self.model_def['States'].keys())} for {prediction_samples} samples")
1✔
1051
        else:
1052
            if prediction_samples != 0:
1✔
1053
                log.warning(
1✔
1054
                    f"The value of the prediction_samples={prediction_samples} is not used in not recursive network.")
1055
            recurrent_train = False
1✔
1056
        self.run_training_params['recurrent_train'] = recurrent_train
1✔
1057

1058
        ## Get early stopping
1059
        early_stopping = self.__get_parameter(early_stopping = early_stopping)
1✔
1060
        if early_stopping:
1✔
1061
            self.run_training_params['early_stopping'] = early_stopping.__name__
×
1062
        early_stopping_params = self.__get_parameter(early_stopping_params = early_stopping_params)
1✔
1063

1064
        ## Get dataset for training
1065
        shuffle_data = self.__get_parameter(shuffle_data = shuffle_data)
1✔
1066

1067
        ## Get the dataset name
1068
        train_dataset = self.__get_parameter(train_dataset = train_dataset)
1✔
1069
        #TODO manage multiple datasets
1070
        if train_dataset is None: ## If we use all datasets with the splits
1✔
1071
            splits = self.__get_parameter(splits = splits)
1✔
1072
            check(len(splits)==3, ValueError, '3 elements must be inserted for the dataset split in training, validation and test')
1✔
1073
            check(sum(splits)==100, ValueError, 'Training, Validation and Test splits must sum up to 100.')
1✔
1074
            check(splits[0] > 0, ValueError, 'The training split cannot be zero.')
1✔
1075

1076
            ## Get the dataset name
1077
            dataset = list(self.data.keys())[0] ## take the dataset name
1✔
1078
            train_dataset_name = val_dataset_name = test_dataset_name = dataset
1✔
1079

1080
            ## Collect the split sizes
1081
            train_size = splits[0] / 100.0
1✔
1082
            val_size = splits[1] / 100.0
1✔
1083
            test_size = 1 - (train_size + val_size)
1✔
1084
            num_of_samples = self.num_of_samples[dataset]
1✔
1085
            n_samples_train = round(num_of_samples*train_size)
1✔
1086
            if splits[1] == 0:
1✔
1087
                n_samples_test = num_of_samples-n_samples_train
1✔
1088
                n_samples_val = 0
1✔
1089
            else:
1090
                n_samples_test = round(num_of_samples*test_size)
1✔
1091
                n_samples_val = num_of_samples-n_samples_train-n_samples_test
1✔
1092

1093
            ## Split into train, validation and test
1094
            XY_train, XY_val, XY_test = {}, {}, {}
1✔
1095
            for key, samples in self.data[dataset].items():
1✔
1096
                if val_size == 0.0 and test_size == 0.0: ## we have only training set
1✔
1097
                    XY_train[key] = torch.from_numpy(samples).to(TORCH_DTYPE)
1✔
1098
                elif val_size == 0.0 and test_size != 0.0: ## we have only training and test set
1✔
1099
                    XY_train[key] = torch.from_numpy(samples[:n_samples_train]).to(TORCH_DTYPE)
1✔
1100
                    XY_test[key] = torch.from_numpy(samples[n_samples_train:]).to(TORCH_DTYPE)
1✔
1101
                elif val_size != 0.0 and test_size == 0.0: ## we have only training and validation set
1✔
1102
                    XY_train[key] = torch.from_numpy(samples[:n_samples_train]).to(TORCH_DTYPE)
1✔
1103
                    XY_val[key] = torch.from_numpy(samples[n_samples_train:]).to(TORCH_DTYPE)
1✔
1104
                else: ## we have training, validation and test set
1105
                    XY_train[key] = torch.from_numpy(samples[:n_samples_train]).to(TORCH_DTYPE)
1✔
1106
                    XY_val[key] = torch.from_numpy(samples[n_samples_train:-n_samples_test]).to(TORCH_DTYPE)
1✔
1107
                    XY_test[key] = torch.from_numpy(samples[n_samples_train+n_samples_val:]).to(TORCH_DTYPE)
1✔
1108

1109
            ## Set name for resultsAnalysis
1110
            train_dataset = self.__get_parameter(train_dataset = f"train_{dataset}_{train_size:0.2f}")
1✔
1111
            validation_dataset = self.__get_parameter(validation_dataset =f"validation_{dataset}_{val_size:0.2f}")
1✔
1112
            test_dataset = self.__get_parameter(test_dataset = f"test_{dataset}_{test_size:0.2f}")
1✔
1113
        else: ## Multi-Dataset
1114
            ## Get the names of the datasets
1115
            datasets = list(self.data.keys())
1✔
1116
            validation_dataset = self.__get_parameter(validation_dataset=validation_dataset)
1✔
1117
            test_dataset = self.__get_parameter(test_dataset=test_dataset)
1✔
1118
            train_dataset_name, val_dataset_name, test_dataset_name = train_dataset, validation_dataset, test_dataset
1✔
1119

1120
            ## Collect the number of samples for each dataset
1121
            n_samples_train, n_samples_val, n_samples_test = 0, 0, 0
1✔
1122

1123
            check(train_dataset in datasets, KeyError, f'{train_dataset} Not Loaded!')
1✔
1124
            if validation_dataset is not None and validation_dataset not in datasets:
1✔
1125
                log.warning(f'Validation Dataset [{validation_dataset}] Not Loaded. The training will continue without validation')
×
1126
            if test_dataset is not None and test_dataset not in datasets:
1✔
1127
                log.warning(f'Test Dataset [{test_dataset}] Not Loaded. The training will continue without test')
×
1128

1129
            ## Split into train, validation and test
1130
            XY_train, XY_val, XY_test = {}, {}, {}
1✔
1131
            n_samples_train = self.num_of_samples[train_dataset]
1✔
1132
            XY_train = {key: torch.from_numpy(val).to(TORCH_DTYPE) for key, val in self.data[train_dataset].items()}
1✔
1133
            if validation_dataset in datasets:
1✔
1134
                n_samples_val = self.num_of_samples[validation_dataset]
1✔
1135
                XY_val = {key: torch.from_numpy(val).to(TORCH_DTYPE) for key, val in self.data[validation_dataset].items()}
1✔
1136
            if test_dataset in datasets:
1✔
1137
                n_samples_test = self.num_of_samples[test_dataset]
1✔
1138
                XY_test = {key: torch.from_numpy(val).to(TORCH_DTYPE) for key, val in self.data[test_dataset].items()}
1✔
1139

1140
        for key in XY_train.keys():
1✔
1141
            assert n_samples_train == XY_train[key].shape[0], f'The number of train samples {n_samples_train}!={XY_train[key].shape[0]} not compliant.'
1✔
1142
            if key in XY_val:
1✔
1143
                assert n_samples_val == XY_val[key].shape[0], f'The number of val samples {n_samples_val}!={XY_val[key].shape[0]} not compliant.'
1✔
1144
            if key in XY_test:
1✔
1145
                assert n_samples_test == XY_test[key].shape[0], f'The number of test samples {n_samples_test}!={XY_test[key].shape[0]} not compliant.'
1✔
1146

1147
        assert n_samples_train > 0, f'There are {n_samples_train} samples for training.'
1✔
1148
        self.run_training_params['n_samples_train'] = n_samples_train
1✔
1149
        self.run_training_params['n_samples_val'] = n_samples_val
1✔
1150
        self.run_training_params['n_samples_test'] = n_samples_test
1✔
1151
        train_batch_size, val_batch_size, test_batch_size = self.__get_batch_sizes(train_batch_size, val_batch_size, test_batch_size)
1✔
1152

1153
        ## Define the optimizer
1154
        optimizer = self.__inizilize_optimizer(optimizer, optimizer_params, optimizer_defaults, add_optimizer_params, add_optimizer_defaults, models, lr, lr_param)
1✔
1155
        self.run_training_params['optimizer'] = optimizer.name
1✔
1156
        self.run_training_params['optimizer_params'] = optimizer.optimizer_params
1✔
1157
        self.run_training_params['optimizer_defaults'] = optimizer.optimizer_defaults
1✔
1158
        self.optimizer = optimizer.get_torch_optimizer()
1✔
1159

1160
        ## Get num_of_epochs
1161
        num_of_epochs = self.__get_parameter(num_of_epochs = num_of_epochs)
1✔
1162

1163
        ## Define the loss functions
1164
        minimize_gain = self.__get_parameter(minimize_gain = minimize_gain)
1✔
1165
        self.run_training_params['minimizers'] = {}
1✔
1166
        for name, values in self.model_def['Minimizers'].items():
1✔
1167
            self.loss_functions[name] = CustomLoss(values['loss'])
1✔
1168
            self.run_training_params['minimizers'][name] = {}
1✔
1169
            self.run_training_params['minimizers'][name]['A'] = values['A']
1✔
1170
            self.run_training_params['minimizers'][name]['B'] = values['B']
1✔
1171
            self.run_training_params['minimizers'][name]['loss'] = values['loss']
1✔
1172
            if name in minimize_gain:
1✔
1173
                self.run_training_params['minimizers'][name]['gain'] = minimize_gain[name]
1✔
1174

1175
        ## Clean the dict of the training parameter
1176
        del self.run_training_params['minimize_gain']
1✔
1177
        del self.run_training_params['lr']
1✔
1178
        del self.run_training_params['lr_param']
1✔
1179
        if not recurrent_train:
1✔
1180
            del self.run_training_params['connect']
1✔
1181
            del self.run_training_params['closed_loop']
1✔
1182
            del self.run_training_params['step']
1✔
1183
            del self.run_training_params['prediction_samples']
1✔
1184
        if early_stopping is None:
1✔
1185
            del self.run_training_params['early_stopping']
1✔
1186
            del self.run_training_params['early_stopping_params']
1✔
1187

1188
        ## Create the train, validation and test loss dictionaries
1189
        train_losses, val_losses, test_losses = {}, {}, {}
1✔
1190
        for key in self.model_def['Minimizers'].keys():
1✔
1191
            train_losses[key] = []
1✔
1192
            if n_samples_val > 0:
1✔
1193
                val_losses[key] = []
1✔
1194

1195
        ## Check the needed keys are in the datasets
1196
        keys = set(self.model_def['Inputs'].keys())
1✔
1197
        keys |= {value['A'] for value in self.model_def['Minimizers'].values()}|{value['B'] for value in self.model_def['Minimizers'].values()}
1✔
1198
        keys -= set(self.model_def['Relations'].keys())
1✔
1199
        keys -= set(self.model_def['States'].keys())
1✔
1200
        keys -= set(self.model_def['Outputs'].keys())
1✔
1201
        if 'connect' in self.run_training_params:
1✔
1202
            keys -= set(self.run_training_params['connect'].keys())
1✔
1203
        if 'closed_loop' in self.run_training_params:
1✔
1204
            keys -= set(self.run_training_params['closed_loop'].keys())
1✔
1205
        check(set(keys).issubset(set(XY_train.keys())), KeyError, f"Not all the mandatory keys {keys} are present in the training dataset {set(XY_train.keys())}.")
1✔
1206

1207
        # Evaluate the number of update for epochs and the unsued samples
1208
        if recurrent_train:
1✔
1209
            list_of_batch_indexes = range(0, (n_samples_train - train_batch_size - prediction_samples + 1), (train_batch_size + step))
1✔
1210
            check(n_samples_train - train_batch_size - prediction_samples + 1 > 0, ValueError,
1✔
1211
                  f"The number of available sample are (n_samples_train ({n_samples_train}) - train_batch_size ({train_batch_size}) - prediction_samples ({prediction_samples}) + 1) = {n_samples_train - train_batch_size - prediction_samples + 1}.")
1212
            update_per_epochs = (n_samples_train - train_batch_size - prediction_samples + 1)//(train_batch_size + step) + 1
1✔
1213
            unused_samples = n_samples_train - list_of_batch_indexes[-1] - train_batch_size - prediction_samples
1✔
1214
        else:
1215
            update_per_epochs =  (n_samples_train - train_batch_size)/train_batch_size + 1
1✔
1216
            unused_samples = n_samples_train - update_per_epochs * train_batch_size
1✔
1217

1218
        self.run_training_params['update_per_epochs'] = update_per_epochs
1✔
1219
        self.run_training_params['unused_samples'] = unused_samples
1✔
1220

1221
        ## Select the model
1222
        select_model = self.__get_parameter(select_model = select_model)
1✔
1223
        select_model_params = self.__get_parameter(select_model_params = select_model_params)
1✔
1224
        selected_model_def = ModelDef(self.model_def.json)
1✔
1225

1226
        ## Show the training parameters
1227
        self.visualizer.showTrainParams()
1✔
1228

1229
        import time
1✔
1230
        ## start the train timer
1231
        start = time.time()
1✔
1232
        self.visualizer.showStartTraining()
1✔
1233

1234
        for epoch in range(num_of_epochs):
1✔
1235
            ## TRAIN
1236
            self.model.train()
1✔
1237
            if recurrent_train:
1✔
1238
                losses = self.__recurrentTrain(XY_train, n_samples_train, train_dataset_name, train_batch_size, minimize_gain, closed_loop, connect, prediction_samples, step, shuffle=shuffle_data, train=True)
1✔
1239
            else:
1240
                losses = self.__Train(XY_train, n_samples_train, train_batch_size, minimize_gain, shuffle=shuffle_data, train=True)
1✔
1241
            ## save the losses
1242
            for ind, key in enumerate(self.model_def['Minimizers'].keys()):
1✔
1243
                train_losses[key].append(torch.mean(losses[ind]).tolist())
1✔
1244

1245
            if n_samples_val > 0:
1✔
1246
                ## VALIDATION
1247
                self.model.eval()
1✔
1248
                if recurrent_train:
1✔
1249
                    losses = self.__recurrentTrain(XY_val, n_samples_val, val_dataset_name, val_batch_size, minimize_gain, closed_loop, connect, prediction_samples, step, shuffle=False, train=False)
1✔
1250
                else:
1251
                    losses = self.__Train(XY_val, n_samples_val, val_batch_size, minimize_gain, shuffle=False, train=False)
1✔
1252
                ## save the losses
1253
                for ind, key in enumerate(self.model_def['Minimizers'].keys()):
1✔
1254
                    val_losses[key].append(torch.mean(losses[ind]).tolist())
1✔
1255

1256
            ## Early-stopping
1257
            if callable(early_stopping):
1✔
1258
                if early_stopping(train_losses, val_losses, early_stopping_params):
×
1259
                    log.info(f'Stopping the training at epoch {epoch} due to early stopping.')
×
1260
                    break
×
1261

1262
            if callable(select_model):
1✔
1263
                if select_model(train_losses, val_losses, select_model_params):
×
1264
                    best_model_epoch = epoch
×
1265
                    selected_model_def.updateParameters(self.model)
×
1266

1267
            ## Visualize the training...
1268
            self.visualizer.showTraining(epoch, train_losses, val_losses)
1✔
1269
            self.visualizer.showWeightsInTrain(epoch = epoch)
1✔
1270

1271
        ## Save the training time
1272
        end = time.time()
1✔
1273
        ## Visualize the training time
1274
        for key in self.model_def['Minimizers'].keys():
1✔
1275
            self.training[key] = {'train': train_losses[key]}
1✔
1276
            if n_samples_val > 0:
1✔
1277
                self.training[key]['val'] = val_losses[key]
1✔
1278
        self.visualizer.showEndTraining(num_of_epochs-1, train_losses, val_losses)
1✔
1279
        self.visualizer.showTrainingTime(end-start)
1✔
1280

1281
        ## Select the model
1282
        if callable(select_model):
1✔
1283
            log.info(f'Selected the model at the epoch {best_model_epoch+1}.')
×
1284
            self.model = Model(selected_model_def)
×
1285
        else:
1286
            log.info('The selected model is the LAST model of the training.')
1✔
1287

1288
        self.resultAnalysis(train_dataset, XY_train, minimize_gain, closed_loop, connect,  prediction_samples, step, train_batch_size)
1✔
1289
        if self.run_training_params['n_samples_val'] > 0:
1✔
1290
            self.resultAnalysis(validation_dataset, XY_val, minimize_gain, closed_loop, connect,  prediction_samples, step, val_batch_size)
1✔
1291
        if self.run_training_params['n_samples_test'] > 0:
1✔
1292
            self.resultAnalysis(test_dataset, XY_test, minimize_gain, closed_loop, connect,  prediction_samples, step, test_batch_size)
1✔
1293

1294
        self.visualizer.showResults()
1✔
1295

1296
        ## Get trained model from torch and set the model_def
1297
        self.model_def.updateParameters(self.model)
1✔
1298

1299
    def __recurrentTrain(self, data, n_samples, dataset_name, batch_size, loss_gains, closed_loop, connect, prediction_samples, step, shuffle=False, train=True):
1✔
1300
        model_inputs = list(self.model_def['Inputs'].keys())
1✔
1301
        state_closed_loop = [key for key, value in self.model_def['States'].items() if 'closedLoop' in value.keys()] + list(closed_loop.keys())
1✔
1302
        state_connect = [key for key, value in self.model_def['States'].items() if 'connect' in value.keys()] + list(connect.keys())
1✔
1303
        non_mandatory_inputs = state_closed_loop + state_connect 
1✔
1304
        mandatory_inputs = list(set(model_inputs) - set(non_mandatory_inputs))
1✔
1305

1306
        n_available_samples = n_samples - prediction_samples 
1✔
1307
        list_of_batch_indexes = list(range(n_available_samples))
1✔
1308

1309
        ## Remove forbidden indexes in case of a multi-file dataset
1310
        if dataset_name in self.multifile.keys(): ## Multi-file Dataset
1✔
1311
            if n_samples == self.run_training_params['n_samples_train']: ## Training
1✔
1312
                start_idx, end_idx = 0, n_samples
1✔
1313
            elif n_samples == self.run_training_params['n_samples_val']: ## Validation
1✔
1314
                start_idx, end_idx = self.run_training_params['n_samples_train'], self.run_training_params['n_samples_train'] + n_samples
1✔
1315
            else: ## Test
NEW
1316
                start_idx, end_idx = self.run_training_params['n_samples_train'] + self.run_training_params['n_samples_val'], self.run_training_params['n_samples_train'] + self.run_training_params['n_samples_val'] + n_samples
×
1317
            forbidden_idxs = []
1✔
1318
            for i in self.multifile[dataset_name]:
1✔
1319
                if i < end_idx and i > start_idx:
1✔
1320
                    forbidden_idxs.extend(range(i-prediction_samples, i, 1))
1✔
1321
            list_of_batch_indexes = [idx for idx in list_of_batch_indexes if idx not in forbidden_idxs]
1✔
1322

1323
        ## Loss vector 
1324
        check((batch_size+step)>0, ValueError, f"The batch_size+step must be greater than 0.")
1✔
1325
        aux_losses = torch.zeros([len(self.model_def['Minimizers']), round(len(list_of_batch_indexes)/(batch_size+step))])
1✔
1326

1327
        ## Update with virtual states
1328
        self.model.update(closed_loop=closed_loop, connect=connect)
1✔
1329
        X = {}
1✔
1330
        batch_val = 0
1✔
1331
        while len(list_of_batch_indexes) >= batch_size:
1✔
1332
            idxs = random.sample(list_of_batch_indexes, batch_size) if shuffle else list_of_batch_indexes[:batch_size]
1✔
1333
            for num in idxs:
1✔
1334
                list_of_batch_indexes.remove(num)
1✔
1335
            if step > 0:
1✔
1336
                if len(list_of_batch_indexes) >= step:
1✔
1337
                    step_idxs = random.sample(list_of_batch_indexes, step) if shuffle else list_of_batch_indexes[:step]
1✔
1338
                    for num in step_idxs:
1✔
1339
                        list_of_batch_indexes.remove(num)
1✔
1340
            if train:
1✔
1341
                self.optimizer.zero_grad() ## Reset the gradient
1✔
1342
            ## Reset 
1343
            horizon_losses = {ind: [] for ind in range(len(self.model_def['Minimizers']))}
1✔
1344
            for key in non_mandatory_inputs:
1✔
1345
                if key in data.keys():
1✔
1346
                ## with data
1347
                    X[key] = data[key][idxs]
1✔
1348
                else: ## with zeros
1349
                    window_size = self.input_n_samples[key]
1✔
1350
                    dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
1351
                    X[key] = torch.zeros(size=(batch_size, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
1352
                    self.states[key] = X[key]
1✔
1353

1354
            for horizon_idx in range(prediction_samples + 1):
1✔
1355
                ## Get data 
1356
                for key in mandatory_inputs:
1✔
1357
                    X[key] = data[key][[idx+horizon_idx for idx in idxs]]
1✔
1358
                ## Forward pass
1359
                out, minimize_out, out_closed_loop, out_connect = self.model(X)
1✔
1360

1361
                if self.log_internal and train:
1✔
1362
                    internals_dict = {'XY':tensor_to_list(X),'out':out,'param':self.model.all_parameters,'closedLoop':self.model.closed_loop_update,'connect':self.model.connect_update}
1✔
1363

1364
                ## Loss Calculation
1365
                for ind, (key, value) in enumerate(self.model_def['Minimizers'].items()):
1✔
1366
                    loss = self.loss_functions[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1367
                    loss = (loss * loss_gains[key]) if key in loss_gains.keys() else loss  ## Multiply by the gain if necessary
1✔
1368
                    horizon_losses[ind].append(loss)
1✔
1369

1370
                ## Update
1371
                for key, val in out_closed_loop.items():
1✔
1372
                    shift = val.shape[1]  ## take the output time dimension
1✔
1373
                    X[key] = torch.roll(X[key], shifts=-1, dims=1) ## Roll the time window
1✔
1374
                    X[key][:, -shift:, :] = val ## substitute with the predicted value
1✔
1375
                    self.states[key] = X[key].clone()
1✔
1376
                for key, value in out_connect.items():
1✔
1377
                    X[key] = value
1✔
1378
                    self.states[key] = X[key].clone()
1✔
1379

1380
                if self.log_internal and train:
1✔
1381
                    internals_dict['state'] = self.states
1✔
1382
                    self.__save_internal('inout_'+str(batch_val)+'_'+str(horizon_idx),internals_dict)
1✔
1383

1384
            ## Calculate the total loss
1385
            total_loss = 0
1✔
1386
            for ind in range(len(self.model_def['Minimizers'])):
1✔
1387
                loss = sum(horizon_losses[ind])/(prediction_samples+1)
1✔
1388
                aux_losses[ind][batch_val] = loss.item()
1✔
1389
                total_loss += loss
1✔
1390

1391
            ## Gradient Step
1392
            if train:
1✔
1393
                total_loss.backward() ## Backpropagate the error
1✔
1394
                self.optimizer.step()
1✔
1395
                self.visualizer.showWeightsInTrain(batch = batch_val)
1✔
1396
            batch_val += 1
1✔
1397

1398
        ## Remove virtual states
1399
        for key in (connect.keys() | closed_loop.keys()):
1✔
1400
            if key in self.states.keys():
1✔
1401
                del self.states[key]
1✔
1402

1403
        ## return the losses
1404
        return aux_losses
1✔
1405

1406
    def __Train(self, data, n_samples, batch_size, loss_gains, shuffle=True, train=True):
1✔
1407
        check((n_samples - batch_size + 1) > 0, ValueError,
1✔
1408
              f"The number of available sample are (n_samples_train - train_batch_size + 1) = {n_samples - batch_size + 1}.")
1409
        if shuffle:
1✔
1410
            randomize = torch.randperm(n_samples)
1✔
1411
            data = {key: val[randomize] for key, val in data.items()}
1✔
1412
        ## Initialize the train losses vector
1413
        aux_losses = torch.zeros([len(self.model_def['Minimizers']),n_samples//batch_size])
1✔
1414
        for idx in range(0, (n_samples - batch_size + 1), batch_size):
1✔
1415
            ## Build the input tensor
1416
            XY = {key: val[idx:idx+batch_size] for key, val in data.items()}
1✔
1417
            ## Reset gradient
1418
            if train:
1✔
1419
                self.optimizer.zero_grad()
1✔
1420
            ## Model Forward
1421
            _, minimize_out, _, _ = self.model(XY)  ## Forward pass
1✔
1422
            ## Loss Calculation
1423
            total_loss = 0
1✔
1424
            for ind, (key, value) in enumerate(self.model_def['Minimizers'].items()):
1✔
1425
                loss = self.loss_functions[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1426
                loss = (loss * loss_gains[key]) if key in loss_gains.keys() else loss  ## Multiply by the gain if necessary
1✔
1427
                aux_losses[ind][idx//batch_size] = loss.item()
1✔
1428
                total_loss += loss
1✔
1429
            ## Gradient step
1430
            if train:
1✔
1431
                total_loss.backward()
1✔
1432
                self.optimizer.step()
1✔
1433
                self.visualizer.showWeightsInTrain(batch = idx//batch_size)
1✔
1434

1435
        ## return the losses
1436
        return aux_losses
1✔
1437

1438
    def resultAnalysis(self, dataset, data = None, minimize_gain = {}, closed_loop = {}, connect = {},  prediction_samples = None, step = 0, batch_size = None):
1✔
1439
        import warnings
1✔
1440
        with torch.inference_mode():
1✔
1441
            ## Init model for retults analysis
1442
            self.model.eval()
1✔
1443
            self.performance[dataset] = {}
1✔
1444
            self.prediction[dataset] = {}
1✔
1445
            A = {}
1✔
1446
            B = {}
1✔
1447
            total_losses = {}
1✔
1448

1449
            # Create the losses
1450
            losses = {}
1✔
1451
            for name, values in self.model_def['Minimizers'].items():
1✔
1452
                losses[name] = CustomLoss(values['loss'])
1✔
1453

1454
            recurrent = False
1✔
1455
            if (closed_loop or connect or self.model_def['States']) and prediction_samples is not None:
1✔
1456
                recurrent = True
1✔
1457

1458
            if data is None:
1✔
1459
                check(dataset in self.data.keys(), ValueError, f'The dataset {dataset} is not loaded!')
1✔
1460
                data = {key: torch.from_numpy(val).to(TORCH_DTYPE) for key, val in self.data[dataset].items()}
1✔
1461
            n_samples = len(data[list(data.keys())[0]])
1✔
1462

1463
            if recurrent:
1✔
1464
                batch_size = batch_size if batch_size is not None else n_samples - prediction_samples
1✔
1465

1466
                model_inputs = list(self.model_def['Inputs'].keys())
1✔
1467

1468
                state_closed_loop = [key for key, value in self.model_def['States'].items() if 'closedLoop' in value.keys()] + list(closed_loop.keys())
1✔
1469
                state_connect = [key for key, value in self.model_def['States'].items() if 'connect' in value.keys()] + list(connect.keys())
1✔
1470

1471
                non_mandatory_inputs = state_closed_loop + state_connect 
1✔
1472
                mandatory_inputs = list(set(model_inputs) - set(non_mandatory_inputs))
1✔
1473

1474
                for key, value in self.model_def['Minimizers'].items():
1✔
1475
                    total_losses[key], A[key], B[key] = [], [], []
1✔
1476
                    for horizon_idx in range(prediction_samples + 1):
1✔
1477
                        A[key].append([])
1✔
1478
                        B[key].append([])
1✔
1479
                
1480
                list_of_batch_indexes = list(range(n_samples - prediction_samples))
1✔
1481
                ## Remove forbidden indexes in case of a multi-file dataset
1482
                if dataset in self.multifile.keys(): ## Multi-file Dataset
1✔
NEW
1483
                    if n_samples == self.run_training_params['n_samples_train']: ## Training
×
NEW
1484
                        start_idx, end_idx = 0, n_samples
×
NEW
1485
                    elif n_samples == self.run_training_params['n_samples_val']: ## Validation
×
NEW
1486
                        start_idx, end_idx = self.run_training_params['n_samples_train'], self.run_training_params['n_samples_train'] + n_samples
×
1487
                    else: ## Test
NEW
1488
                        start_idx, end_idx = self.run_training_params['n_samples_train'] + self.run_training_params['n_samples_val'], self.run_training_params['n_samples_train'] + self.run_training_params['n_samples_val'] + n_samples
×
NEW
1489
                    forbidden_idxs = []
×
NEW
1490
                    for i in self.multifile[dataset]:
×
NEW
1491
                        if i < end_idx and i > start_idx:
×
NEW
1492
                            forbidden_idxs.extend(range(i-prediction_samples, i, 1))
×
NEW
1493
                    list_of_batch_indexes = [idx for idx in list_of_batch_indexes if idx not in forbidden_idxs]
×
1494

1495
                X = {}
1✔
1496
                ## Update with virtual states
1497
                self.model.update(closed_loop=closed_loop, connect=connect)
1✔
1498
                while len(list_of_batch_indexes) >= batch_size:
1✔
1499
                    idxs = list_of_batch_indexes[:batch_size]
1✔
1500
                    for num in idxs:
1✔
1501
                        list_of_batch_indexes.remove(num)
1✔
1502
                    if step > 0:
1✔
1503
                        if len(list_of_batch_indexes) >= step:
1✔
1504
                            step_idxs = list_of_batch_indexes[:step]
1✔
1505
                            for num in step_idxs:
1✔
1506
                                list_of_batch_indexes.remove(num)
1✔
1507
                    ## Reset 
1508
                    horizon_losses = {key: [] for key in self.model_def['Minimizers'].keys()}
1✔
1509
                    for key in non_mandatory_inputs:
1✔
1510
                        if key in data.keys(): # and len(data[key]) >= (idx + self.input_n_samples[key]): 
1✔
1511
                        ## with data
1512
                            X[key] = data[key][idxs]
1✔
1513
                        else: ## with zeros
1514
                            window_size = self.input_n_samples[key]
1✔
1515
                            dim = self.model_def['Inputs'][key]['dim'] if key in model_inputs else self.model_def['States'][key]['dim']
1✔
1516
                            X[key] = torch.zeros(size=(batch_size, window_size, dim), dtype=TORCH_DTYPE, requires_grad=False)
1✔
1517
                            self.states[key] = X[key]
1✔
1518

1519
                    for horizon_idx in range(prediction_samples + 1):
1✔
1520
                        ## Get data 
1521
                        for key in mandatory_inputs:
1✔
1522
                            X[key] = data[key][[idx+horizon_idx for idx in idxs]]
1✔
1523
                        ## Forward pass
1524
                        out, minimize_out, out_closed_loop, out_connect = self.model(X)
1✔
1525

1526
                        ## Loss Calculation
1527
                        for key, value in self.model_def['Minimizers'].items():
1✔
1528
                            A[key][horizon_idx].append(minimize_out[value['A']])
1✔
1529
                            B[key][horizon_idx].append(minimize_out[value['B']])
1✔
1530
                            loss = losses[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1531
                            loss = (loss * minimize_gain[key]) if key in minimize_gain.keys() else loss  ## Multiply by the gain if necessary
1✔
1532
                            horizon_losses[key].append(loss)
1✔
1533

1534
                        ## Update
1535
                        for key, val in out_closed_loop.items():
1✔
1536
                            shift = val.shape[1]  ## take the output time dimension
1✔
1537
                            X[key] = torch.roll(X[key], shifts=-1, dims=1) ## Roll the time window
1✔
1538
                            X[key][:, -shift:, :] = val ## substitute with the predicted value
1✔
1539
                            self.states[key] = X[key].clone()
1✔
1540
                        for key, value in out_connect.items():
1✔
1541
                            X[key] = value
1✔
1542
                            self.states[key] = X[key].clone()
1✔
1543

1544
                    ## Calculate the total loss
1545
                    for key in self.model_def['Minimizers'].keys():
1✔
1546
                        loss = sum(horizon_losses[key]) / (prediction_samples + 1)
1✔
1547
                        total_losses[key].append(loss.detach().numpy())
1✔
1548

1549
                for key, value in self.model_def['Minimizers'].items():
1✔
1550
                    for horizon_idx in range(prediction_samples + 1):
1✔
1551
                        A[key][horizon_idx] = np.concatenate(A[key][horizon_idx])
1✔
1552
                        B[key][horizon_idx] = np.concatenate(B[key][horizon_idx])
1✔
1553
                    total_losses[key] = np.mean(total_losses[key])
1✔
1554

1555
            else:
1556
                if batch_size is None:
1✔
1557
                    batch_size = n_samples
1✔
1558

1559
                for key, value in self.model_def['Minimizers'].items():
1✔
1560
                    total_losses[key], A[key], B[key] = [], [], []
1✔
1561

1562
                for idx in range(0, (n_samples - batch_size + 1), batch_size):
1✔
1563
                    ## Build the input tensor
1564
                    XY = {key: val[idx:idx + batch_size] for key, val in data.items()}
1✔
1565

1566
                    ## Model Forward
1567
                    _, minimize_out, _, _ = self.model(XY)  ## Forward pass
1✔
1568
                    ## Loss Calculation
1569
                    for key, value in self.model_def['Minimizers'].items():
1✔
1570
                        A[key].append(minimize_out[value['A']].numpy())
1✔
1571
                        B[key].append(minimize_out[value['B']].numpy())
1✔
1572
                        loss = losses[key](minimize_out[value['A']], minimize_out[value['B']])
1✔
1573
                        loss = (loss * minimize_gain[key]) if key in minimize_gain.keys() else loss
1✔
1574
                        total_losses[key].append(loss.detach().numpy())
1✔
1575

1576
                for key, value in self.model_def['Minimizers'].items():
1✔
1577
                    A[key] = np.concatenate(A[key])
1✔
1578
                    B[key] = np.concatenate(B[key])
1✔
1579
                    total_losses[key] = np.mean(total_losses[key])
1✔
1580

1581
            for ind, (key, value) in enumerate(self.model_def['Minimizers'].items()):
1✔
1582
                A_np = np.array(A[key])
1✔
1583
                B_np = np.array(B[key])
1✔
1584
                self.performance[dataset][key] = {}
1✔
1585
                self.performance[dataset][key][value['loss']] = np.mean(total_losses[key]).item()
1✔
1586
                self.performance[dataset][key]['fvu'] = {}
1✔
1587
                # Compute FVU
1588
                residual = A_np - B_np
1✔
1589
                error_var = np.var(residual)
1✔
1590
                error_mean = np.mean(residual)
1✔
1591
                #error_var_manual = np.sum((residual-error_mean) ** 2) / (len(self.prediction['B'][ind]) - 0)
1592
                #print(f"{key} var np:{new_error_var} and var manual:{error_var_manual}")
1593
                with warnings.catch_warnings(record=True) as w:
1✔
1594
                    self.performance[dataset][key]['fvu']['A'] = (error_var / np.var(A_np)).item()
1✔
1595
                    self.performance[dataset][key]['fvu']['B'] = (error_var / np.var(B_np)).item()
1✔
1596
                    if w and np.var(A_np) == 0.0 and  np.var(B_np) == 0.0:
1✔
1597
                        self.performance[dataset][key]['fvu']['A'] = np.nan
1✔
1598
                        self.performance[dataset][key]['fvu']['B'] = np.nan
1✔
1599
                self.performance[dataset][key]['fvu']['total'] = np.mean([self.performance[dataset][key]['fvu']['A'],self.performance[dataset][key]['fvu']['B']]).item()
1✔
1600
                # Compute AIC
1601
                #normal_dist = norm(0, error_var ** 0.5)
1602
                #probability_of_residual = normal_dist.pdf(residual)
1603
                #log_likelihood_first = sum(np.log(probability_of_residual))
1604
                p1 = -len(residual)/2.0*np.log(2*np.pi)
1✔
1605
                with warnings.catch_warnings(record=True) as w:
1✔
1606
                    p2 = -len(residual)/2.0*np.log(error_var)
1✔
1607
                    p3 = -1 / (2.0 * error_var) * np.sum(residual ** 2)
1✔
1608
                    if w and p2 == np.float32(np.inf) and p3 == np.float32(-np.inf):
1✔
1609
                        p2 = p3 = 0.0
1✔
1610
                log_likelihood = p1+p2+p3
1✔
1611
                #print(f"{key} log likelihood second mode:{log_likelihood} = {p1}+{p2}+{p3} first mode: {log_likelihood_first}")
1612
                total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad) #TODO to be check the number is doubled
1✔
1613
                #print(f"{key} total_params:{total_params}")
1614
                aic = - 2 * log_likelihood + 2 * total_params
1✔
1615
                #print(f"{key} aic:{aic}")
1616
                self.performance[dataset][key]['aic'] = {'value':aic,'total_params':total_params,'log_likelihood':log_likelihood}
1✔
1617
                # Prediction and target
1618
                self.prediction[dataset][key] = {}
1✔
1619
                self.prediction[dataset][key]['A'] = A_np.tolist()
1✔
1620
                self.prediction[dataset][key]['B'] = B_np.tolist()
1✔
1621

1622
            ## Remove virtual states
1623
            for key in (connect.keys() | closed_loop.keys()):
1✔
1624
                if key in self.states.keys():
1✔
1625
                    del self.states[key]
1✔
1626

1627
            self.performance[dataset]['total'] = {}
1✔
1628
            self.performance[dataset]['total']['mean_error'] = np.mean([value for key,value in total_losses.items()])
1✔
1629
            self.performance[dataset]['total']['fvu'] = np.mean([self.performance[dataset][key]['fvu']['total'] for key in self.model_def['Minimizers'].keys()])
1✔
1630
            self.performance[dataset]['total']['aic'] = np.mean([self.performance[dataset][key]['aic']['value']for key in self.model_def['Minimizers'].keys()])
1✔
1631

1632
        self.visualizer.showResult(dataset)
1✔
1633

1634
    def getWorkspace(self):
1✔
1635
        return self.exporter.getWorkspace()
1✔
1636

1637
    def saveTorchModel(self, name = 'net', model_folder = None, models = None):
1✔
1638
        """
1639
        Saves the neural network model in PyTorch format.
1640

1641
        Parameters
1642
        ----------
1643
        name : str, optional
1644
            The name of the saved model file. Default is 'net'.
1645
        model_folder : str or None, optional
1646
            The folder to save the model file in. Default is None.
1647
        models : list or None, optional
1648
            A list of model names to save. If None, the entire model is saved. Default is None.
1649

1650
        Raises
1651
        ------
1652
        RuntimeError
1653
            If the model is not neuralized.
1654

1655
        Example
1656
        -------
1657
        Example usage:
1658
            >>> model = Modely()
1659
            >>> model.neuralizeModel()
1660
            >>> model.saveTorchModel(name='example_model', model_folder='path/to/save')
1661
        """
1662
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet!')
1✔
1663
        if models is not None:
1✔
1664
            if name == 'net':
×
1665
                name += '_' + '_'.join(models)
×
1666
            model_def = ModelDef()
×
1667
            model_def.update(model_dict = {key: self.model_dict[key] for key in models if key in self.model_dict})
×
1668
            model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
×
1669
            model_def.updateParameters(self.model)
×
1670
            model = Model(model_def.json)
×
1671
        else:
1672
            model = self.model
1✔
1673
        self.exporter.saveTorchModel(model, name, model_folder)
1✔
1674

1675
    def loadTorchModel(self, name = 'net', model_folder = None):
1✔
1676
        """
1677
        Loads a neural network model from a PyTorch format file.
1678

1679
        Parameters
1680
        ----------
1681
        name : str, optional
1682
            The name of the model file to load. Default is 'net'.
1683
        model_folder : str or None, optional
1684
            The folder to load the model file from. Default is None.
1685

1686
        Raises
1687
        ------
1688
        RuntimeError
1689
            If the model is not neuralized.
1690

1691
        Example
1692
        -------
1693
        Example usage:
1694
            >>> model = Modely()
1695
            >>> model.neuralizeModel()
1696
            >>> model.loadTorchModel(name='example_model', model_folder='path/to/load')
1697
        """
1698
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet.')
1✔
1699
        self.exporter.loadTorchModel(self.model, name, model_folder)
1✔
1700

1701
    def saveModel(self, name = 'net', model_path = None, models = None):
1✔
1702
        """
1703
        Saves the neural network model definition in a json file.
1704

1705
        Parameters
1706
        ----------
1707
        name : str, optional
1708
            The name of the saved model file. Default is 'net'.
1709
        model_path : str or None, optional
1710
            The path to save the model file. Default is None.
1711
        models : list or None, optional
1712
            A list of model names to save. If None, the entire model is saved. Default is None.
1713

1714
        Raises
1715
        ------
1716
        RuntimeError
1717
            If the network has not been defined.
1718

1719
        Example
1720
        -------
1721
        Example usage:
1722
            >>> model = Modely()
1723
            >>> model.neuralizeModel()
1724
            >>> model.saveModel(name='example_model', model_path='path/to/save')
1725
        """
1726
        if models is not None:
1✔
1727
            if name == 'net':
×
1728
                name += '_' + '_'.join(models)
×
1729
            model_def = ModelDef()
×
1730
            model_def.update(model_dict = {key: self.model_dict[key] for key in models if key in self.model_dict})
×
1731
            model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
×
1732
            model_def.updateParameters(self.model)
×
1733
        else:
1734
            model_def = self.model_def
1✔
1735
        check(model_def.isDefined(), RuntimeError, "The network has not been defined.")
1✔
1736
        self.exporter.saveModel(model_def.json, name, model_path)
1✔
1737

1738
    def loadModel(self, name = None, model_folder = None):
1✔
1739
        """
1740
        Loads a neural network model from a json file containing the model definition.
1741

1742
        Parameters
1743
        ----------
1744
        name : str or None, optional
1745
            The name of the model file to load. Default is 'net'.
1746
        model_folder : str or None, optional
1747
            The folder to load the model file from. Default is None.
1748

1749
        Raises
1750
        ------
1751
        RuntimeError
1752
            If there is an error loading the network.
1753

1754
        Example
1755
        -------
1756
        Example usage:
1757
            >>> model = Modely()
1758
            >>> model.loadModel(name='example_model', model_folder='path/to/load')
1759
        """
1760
        if name is None:
1✔
1761
            name = 'net'
1✔
1762
        model_def = self.exporter.loadModel(name, model_folder)
1✔
1763
        check(model_def, RuntimeError, "Error to load the network.")
1✔
1764
        self.model_def = ModelDef(model_def)
1✔
1765
        self.model = None
1✔
1766
        self.neuralized = False
1✔
1767
        self.traced = False
1✔
1768

1769
    def exportPythonModel(self, name = 'net', model_path = None, models = None):
1✔
1770
        """
1771
        Exports the neural network model as a standalone PyTorch Module class.
1772

1773
        Parameters
1774
        ----------
1775
        name : str, optional
1776
            The name of the exported model file. Default is 'net'.
1777
        model_path : str or None, optional
1778
            The path to save the exported model file. Default is None.
1779
        models : list or None, optional
1780
            A list of model names to export. If None, the entire model is exported. Default is None.
1781

1782
        Raises
1783
        ------
1784
        RuntimeError
1785
            If the network has not been defined.
1786
            If the model is traced and cannot be exported to Python.
1787
            If the model is not neuralized.
1788

1789
        Example
1790
        -------
1791
        Example usage:
1792
            >>> model = Modely(name='example_model')
1793
            >>> model.neuralizeModel()
1794
            >>> model.exportPythonModel(name='example_model', model_path='path/to/export')
1795
        """
1796
        if models is not None:
1✔
1797
            if name == 'net':
×
1798
                name += '_' + '_'.join(models)
×
1799
            model_def = ModelDef()
×
1800
            model_def.update(model_dict = {key: self.model_dict[key] for key in models if key in self.model_dict})
×
1801
            model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
×
1802
            model_def.updateParameters(self.model)
×
1803
            model = Model(model_def.json)
×
1804
        else:
1805
            model_def = self.model_def
1✔
1806
            model = self.model
1✔
1807
        #check(model_def['States'] == {}, TypeError, "The network has state variables. The export to python is not possible.")
1808
        check(model_def.isDefined(), RuntimeError, "The network has not been defined.")
1✔
1809
        check(self.traced == False, RuntimeError,
1✔
1810
                  'The model is traced and cannot be exported to Python.\n Run neuralizeModel() to recreate a standard model.')
1811
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet.')
1✔
1812
        self.exporter.saveModel(model_def.json, name, model_path)
1✔
1813
        self.exporter.exportPythonModel(model_def, model, name, model_path)
1✔
1814

1815
    def importPythonModel(self, name = None, model_folder = None):
1✔
1816
        """
1817
        Imports a neural network model from a standalone PyTorch Module class.
1818

1819
        Parameters
1820
        ----------
1821
        name : str or None, optional
1822
            The name of the model file to import. Default is 'net'.
1823
        model_folder : str or None, optional
1824
            The folder to import the model file from. Default is None.
1825

1826
        Raises
1827
        ------
1828
        RuntimeError
1829
            If there is an error loading the network.
1830

1831
        Example
1832
        -------
1833
        Example usage:
1834
            >>> model = Modely()
1835
            >>> model.importPythonModel(name='example_model', model_folder='path/to/import')
1836
        """
1837
        if name is None:
1✔
1838
            name = 'net'
1✔
1839
        model_def = self.exporter.loadModel(name, model_folder)
1✔
1840
        check(model_def is not None, RuntimeError, "Error to load the network.")
1✔
1841
        self.neuralizeModel(model_def=model_def)
1✔
1842
        self.model = self.exporter.importPythonModel(name, model_folder)
1✔
1843
        self.traced = True
1✔
1844
        self.model_def.updateParameters(self.model)
1✔
1845

1846
    def exportONNX(self, inputs_order, outputs_order,  models = None, name = 'net', model_folder = None):
1✔
1847
        """
1848
        Exports the neural network model to an ONNX file.
1849

1850
        -----
1851
        .. note::
1852
            The input_order must contain all the inputs and states of the model in the order that you want to export them.
1853

1854
        Parameters
1855
        ----------
1856
        inputs_order : list
1857
            The order of the input and state variables.
1858
        outputs_order : list
1859
            The order of the output variables.
1860
        models : list or None, optional
1861
            A list of model names to export. If None, the entire model is exported. Default is None.
1862
        name : str, optional
1863
            The name of the exported ONNX file. Default is 'net'.
1864
        model_folder : str or None, optional
1865
            The folder to save the exported ONNX file. Default is None.
1866

1867
        Raises
1868
        ------
1869
        RuntimeError
1870
            If the network has not been defined.
1871
            If the model is traced and cannot be exported to ONNX.
1872
            If the model is not neuralized.
1873
            If the model is loaded and not created.
1874

1875
        Example
1876
        -------
1877
        Example usage:
1878
            >>> input1 = Input('input1').last()
1879
            >>> input2 = Input('input2').last()
1880
            >>> out = Output('output1', input1+input2)
1881
 
1882
            >>> model = Modely()
1883
            >>> model.neuralizeModel()
1884
            >>> model.exportONNX(inputs_order=['input1', 'input2'], outputs_order=['output1'], name='example_model', model_folder='path/to/export')
1885
        """
1886
        check(self.model_def.isDefined(), RuntimeError, "The network has not been defined.")
1✔
1887
        check(self.traced == False, RuntimeError, 'The model is traced and cannot be exported to ONNX.\n Run neuralizeModel() to recreate a standard model.')
1✔
1888
        check(self.neuralized == True, RuntimeError, 'The model is not neuralized yet.')
1✔
1889
        check(self.model_def.model_dict != {}, RuntimeError, 'The model is loaded and not created.')
1✔
1890
        model_def = ModelDef()
1✔
1891
        if models is not None:
1✔
1892
            if name == 'net':
1✔
1893
                name += '_' + '_'.join(models)
1✔
1894
            model_def.update(model_dict = {key: self.model_def.model_dict[key] for key in models if key in self.model_def.model_dict})
1✔
1895
        else:
1896
            model_def.update(model_dict = self.model_def.model_dict)
1✔
1897
        model_def.setBuildWindow(self.model_def['Info']['SampleTime'])
1✔
1898
        model_def.updateParameters(self.model)
1✔
1899
        model = Model(model_def.json)
1✔
1900
        model.update()
1✔
1901
        self.exporter.exportONNX(model_def, model, inputs_order, outputs_order, name, model_folder)
1✔
1902

1903
    def onnxInference(self, inputs:dict, path:str):
1✔
1904
        """
1905
        Run an inference session using an onnx model previously exported using the nnodely framework. 
1906

1907
        -----
1908
        .. note:: Feed-Forward ONNX model
1909
            For feed-forward models, the onnx model expect all the inputs and states to have 3 dimensions. The first dimension is the batch size, the second is the time window and the third is the feature dimension.
1910
        .. note:: Recurrent ONNX model
1911
            For recurrent models, the onnx model expect all the inputs to have 4 dimensions. The first dimension is the prediction horizon, the second is the batch size, the third is the time window and the fourth is the feature dimension.
1912
            For recurrent models, the onnx model expect all the States to have 3 dimensions. The first dimension is the batch size, the second is the time window, the third is the feature dimension
1913

1914
        Parameters
1915
        ----------
1916
        inputs : dict
1917
            A dictionary containing the input and state variables to be used to make the inference. 
1918
            State variables are mandatory and are used to initialize the states of the model.
1919
        path : str
1920
            The path to the ONNX file to use.
1921

1922
        Raises
1923
        ------
1924
        RuntimeError
1925
            If the shape of the inputs are not equals to the ones defined in the onnx model.
1926
            If the batch size is not equal for all the inputs and states.
1927

1928
        Example
1929
        -------
1930
        feed-forward Example:
1931
            >>> x = Input('x')
1932
 
1933
            >>> onnx_model_path = path/to/net.onnx
1934
            >>> dummy_input = {'x':np.ones(shape=(3, 1, 1)).astype(np.float32)}
1935
            >>> predictions = Modely().onnxInference(dummy_input, onnx_model_path)
1936
        Recurrent Example:
1937
            >>> x = Input('x')
1938
            >>> y = State('y')
1939
 
1940
            >>> onnx_model_path = path/to/net.onnx
1941
            >>> dummy_input = {'x':np.ones(shape=(3, 1, 1, 1)).astype(np.float32)
1942
                                'y':np.ones(shape=(1, 1, 1)).astype(np.float32)}
1943
            >>> predictions = Modely().onnxInference(dummy_input, onnx_model_path)
1944
        """
1945
        return self.exporter.onnxInference(inputs, path)
1✔
1946

1947
    def exportReport(self, name = 'net', model_folder = None):
1✔
1948
        """
1949
        Generates a PDF report with plots containing the results of the training and validation of the neural network.
1950

1951
        Parameters
1952
        ----------
1953
        name : str, optional
1954
            The name of the exported report file. Default is 'net'.
1955
        model_folder : str or None, optional
1956
            The folder to save the exported report file. Default is None.
1957

1958
        Example
1959
        -------
1960
        Example usage:
1961
            >>> model = Modely()
1962
            >>> model.neuralizeModel()
1963
            >>> model.trainModel(train_dataset='train_dataset', validation_dataset='val_dataset', num_of_epochs=10)
1964
            >>> model.exportReport(name='example_model', model_folder='path/to/export')
1965
        """
1966
        self.exporter.exportReport(self, name, model_folder)
1✔
1967

1968
nnodely = Modely
1✔
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc